ggml-vulkan.cpp 406 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395
  1. #include "ggml-vulkan.h"
  2. #include <vulkan/vulkan_core.h>
  3. #ifdef GGML_VULKAN_RUN_TESTS
  4. #include <chrono>
  5. #endif
  6. #include <vulkan/vulkan.hpp>
  7. #include <algorithm>
  8. #include <cmath>
  9. #include <iomanip>
  10. #include <iostream>
  11. #include <tuple>
  12. #include <vector>
  13. #include <sstream>
  14. #include <utility>
  15. #include <memory>
  16. #include <limits>
  17. #include <map>
  18. #include <memory>
  19. #include <mutex>
  20. #include "ggml.h"
  21. #include "ggml-backend-impl.h"
  22. #include "ggml-vulkan-shaders.hpp"
  23. #define VK_API_VERSION VK_API_VERSION_1_2
  24. #define CEIL_DIV(M, N) (((M) + (N)-1) / (N))
  25. #define VK_VENDOR_ID_AMD 0x1002
  26. #define VK_VENDOR_ID_APPLE 0x106b
  27. #define VK_VENDOR_ID_INTEL 0x8086
  28. #define VK_VENDOR_ID_NVIDIA 0x10de
  29. #define VK_DEVICE_DESCRIPTOR_POOL_MODE_UNKNOWN 0
  30. #define VK_DEVICE_DESCRIPTOR_POOL_MODE_MULTI 1
  31. #define VK_DEVICE_DESCRIPTOR_POOL_MODE_SINGLE 2
  32. #define GGML_VK_MAX_NODES 8192
  33. #define MAX_VK_BUFFERS 256
  34. #ifndef K_QUANTS_PER_ITERATION
  35. #define K_QUANTS_PER_ITERATION 1
  36. #else
  37. static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2");
  38. #endif
  39. #define VK_CHECK(err, msg) \
  40. do { \
  41. vk::Result err_ = (err); \
  42. if (err_ != vk::Result::eSuccess) { \
  43. fprintf(stderr, "ggml_vulkan: %s error %s at %s:%d\n", \
  44. #err, to_string(err_).c_str(), __FILE__, __LINE__); \
  45. exit(1); \
  46. } \
  47. } while (0)
  48. #ifdef GGML_VULKAN_DEBUG
  49. #define VK_LOG_DEBUG(msg) std::cerr << msg << std::endl
  50. #else
  51. #define VK_LOG_DEBUG(msg) ((void) 0)
  52. #endif // GGML_VULKAN_DEBUG
  53. struct ggml_backend_vk_context;
  54. struct vk_queue {
  55. uint32_t queue_family_index;
  56. vk::Queue queue;
  57. vk::CommandPool pool;
  58. uint32_t cmd_buffer_idx;
  59. std::vector<vk::CommandBuffer> cmd_buffers;
  60. vk::PipelineStageFlags stage_flags;
  61. };
  62. struct vk_pipeline_struct {
  63. std::string name;
  64. vk::ShaderModule shader_module;
  65. vk::DescriptorSetLayout dsl;
  66. std::vector<vk::DescriptorPool> descriptor_pools;
  67. std::vector<vk::DescriptorSet> descriptor_sets;
  68. uint32_t descriptor_set_idx;
  69. vk::PipelineLayout layout;
  70. vk::Pipeline pipeline;
  71. uint32_t push_constant_size;
  72. uint32_t parameter_count;
  73. std::array<uint32_t, 3> wg_denoms;
  74. uint32_t align;
  75. };
  76. typedef std::shared_ptr<vk_pipeline_struct> vk_pipeline;
  77. typedef std::weak_ptr<vk_pipeline_struct> vk_pipeline_ref;
  78. static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline);
  79. struct vk_matmul_pipeline_struct {
  80. vk_pipeline l, m, s;
  81. vk_pipeline a_l, a_m, a_s;
  82. };
  83. typedef std::shared_ptr<vk_matmul_pipeline_struct> vk_matmul_pipeline;
  84. struct vk_device_struct;
  85. typedef std::shared_ptr<vk_device_struct> vk_device;
  86. typedef std::weak_ptr<vk_device_struct> vk_device_ref;
  87. struct vk_buffer_struct;
  88. typedef std::shared_ptr<vk_buffer_struct> vk_buffer;
  89. typedef std::weak_ptr<vk_buffer_struct> vk_buffer_ref;
  90. struct ggml_backend_vk_buffer_type_context {
  91. std::string name;
  92. vk_device device;
  93. };
  94. GGML_CALL static const char * ggml_backend_vk_buffer_type_name(ggml_backend_buffer_type_t buft);
  95. GGML_CALL static ggml_backend_buffer_t ggml_backend_vk_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size);
  96. GGML_CALL static size_t ggml_backend_vk_buffer_type_get_alignment(ggml_backend_buffer_type_t buft);
  97. GGML_CALL static size_t ggml_backend_vk_buffer_type_get_max_size(ggml_backend_buffer_type_t buft);
  98. GGML_CALL static size_t ggml_backend_vk_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor);
  99. static ggml_backend_buffer_type_i ggml_backend_vk_buffer_type_interface = {
  100. /* .get_name = */ ggml_backend_vk_buffer_type_name,
  101. /* .alloc_buffer = */ ggml_backend_vk_buffer_type_alloc_buffer,
  102. /* .get_alignment = */ ggml_backend_vk_buffer_type_get_alignment,
  103. /* .get_max_size = */ ggml_backend_vk_buffer_type_get_max_size,
  104. /* .get_alloc_size = */ ggml_backend_vk_buffer_type_get_alloc_size,
  105. /* .is_host = */ NULL,
  106. };
  107. #ifdef GGML_VULKAN_MEMORY_DEBUG
  108. class vk_memory_logger;
  109. #endif
  110. static void ggml_vk_destroy_buffer(vk_buffer& buf);
  111. struct vk_device_struct {
  112. std::mutex mutex;
  113. vk::PhysicalDevice physical_device;
  114. vk::PhysicalDeviceProperties properties;
  115. std::string name;
  116. uint64_t max_memory_allocation_size;
  117. bool fp16;
  118. vk::Device device;
  119. uint32_t vendor_id;
  120. vk_queue compute_queue;
  121. vk_queue transfer_queue;
  122. bool single_queue;
  123. uint32_t descriptor_set_mode;
  124. uint32_t subgroup_size;
  125. bool uma;
  126. size_t idx;
  127. vk_matmul_pipeline pipeline_matmul_f32;
  128. vk_matmul_pipeline pipeline_matmul_f32_f16;
  129. vk_matmul_pipeline pipeline_matmul_f16;
  130. vk_matmul_pipeline pipeline_matmul_f16_f32;
  131. vk_pipeline pipeline_matmul_split_k_reduce;
  132. vk_matmul_pipeline pipeline_dequant_mul_mat_mat[GGML_TYPE_COUNT];
  133. vk_matmul_pipeline pipeline_matmul_id_f32;
  134. vk_matmul_pipeline pipeline_matmul_id_f16;
  135. vk_matmul_pipeline pipeline_matmul_id_f16_f32;
  136. vk_matmul_pipeline pipeline_dequant_mul_mat_mat_id[GGML_TYPE_COUNT];
  137. vk_pipeline pipeline_dequant[GGML_TYPE_COUNT];
  138. vk_pipeline pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_COUNT];
  139. vk_pipeline pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_COUNT];
  140. vk_pipeline pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_COUNT];
  141. vk_pipeline pipeline_mul_mat_vec_p021_f16_f32;
  142. vk_pipeline pipeline_mul_mat_vec_nc_f16_f32;
  143. vk_pipeline pipeline_get_rows[GGML_TYPE_COUNT];
  144. vk_pipeline pipeline_get_rows_f32[GGML_TYPE_COUNT];
  145. vk_pipeline pipeline_add_f32, pipeline_add_f16_f32_f16;
  146. vk_pipeline pipeline_mul_f32;
  147. vk_pipeline pipeline_div_f32;
  148. vk_pipeline pipeline_concat_f32, pipeline_concat_f16, pipeline_concat_i32;
  149. vk_pipeline pipeline_upscale_f32;
  150. vk_pipeline pipeline_scale_f32;
  151. vk_pipeline pipeline_sqr_f32;
  152. vk_pipeline pipeline_clamp_f32;
  153. vk_pipeline pipeline_pad_f32;
  154. vk_pipeline pipeline_cpy_f32_f32, pipeline_cpy_f32_f16, pipeline_cpy_f16_f16;
  155. vk_pipeline pipeline_norm_f32;
  156. vk_pipeline pipeline_group_norm_f32;
  157. vk_pipeline pipeline_rms_norm_f32;
  158. vk_pipeline pipeline_gelu_f32;
  159. vk_pipeline pipeline_gelu_quick_f32;
  160. vk_pipeline pipeline_silu_f32;
  161. vk_pipeline pipeline_relu_f32;
  162. vk_pipeline pipeline_leaky_relu_f32;
  163. vk_pipeline pipeline_tanh_f32;
  164. vk_pipeline pipeline_diag_mask_inf_f32;
  165. vk_pipeline pipeline_soft_max_f32, pipeline_soft_max_f32_f16;
  166. vk_pipeline pipeline_rope_norm_f32, pipeline_rope_norm_f16;
  167. vk_pipeline pipeline_rope_neox_f32, pipeline_rope_neox_f16;
  168. vk_pipeline pipeline_argsort_f32;
  169. vk_pipeline pipeline_sum_rows_f32;
  170. vk_pipeline pipeline_im2col_f32, pipeline_im2col_f32_f16;
  171. vk_pipeline pipeline_timestep_embedding_f32;
  172. std::vector<vk_pipeline_ref> pipelines;
  173. std::vector<std::tuple<void*, size_t, vk_buffer>> pinned_memory;
  174. vk::Fence fence;
  175. vk_buffer sync_staging;
  176. ggml_backend_buffer_type buffer_type;
  177. #ifdef GGML_VULKAN_MEMORY_DEBUG
  178. std::unique_ptr<vk_memory_logger> memory_logger;
  179. #endif
  180. ~vk_device_struct() {
  181. VK_LOG_DEBUG("destroy device " << name);
  182. device.destroyFence(fence);
  183. ggml_vk_destroy_buffer(sync_staging);
  184. device.destroyCommandPool(compute_queue.pool);
  185. if (!single_queue) {
  186. device.destroyCommandPool(transfer_queue.pool);
  187. }
  188. for (auto& pipeline : pipelines) {
  189. if (pipeline.expired()) {
  190. continue;
  191. }
  192. vk_pipeline pl = pipeline.lock();
  193. ggml_vk_destroy_pipeline(device, pl);
  194. }
  195. pipelines.clear();
  196. device.destroy();
  197. }
  198. };
  199. struct vk_buffer_struct {
  200. vk::Buffer buffer = VK_NULL_HANDLE;
  201. vk::DeviceMemory device_memory = VK_NULL_HANDLE;
  202. vk::MemoryPropertyFlags memory_property_flags;
  203. void * ptr;
  204. size_t size = 0;
  205. vk_device device;
  206. ~vk_buffer_struct() {
  207. if (size == 0) {
  208. return;
  209. }
  210. VK_LOG_DEBUG("~vk_buffer_struct(" << buffer << ", " << size << ")");
  211. device->device.freeMemory(device_memory);
  212. device->device.destroyBuffer(buffer);
  213. }
  214. };
  215. struct vk_subbuffer {
  216. vk_buffer buffer;
  217. uint64_t offset;
  218. uint64_t size;
  219. operator vk::DescriptorBufferInfo() const {
  220. return { buffer->buffer, offset, size };
  221. }
  222. };
  223. struct vk_semaphore {
  224. vk::Semaphore s;
  225. uint64_t value;
  226. };
  227. struct vk_submission {
  228. vk::CommandBuffer buffer;
  229. std::vector<vk_semaphore> wait_semaphores;
  230. std::vector<vk_semaphore> signal_semaphores;
  231. };
  232. typedef std::vector<vk_submission> vk_sequence;
  233. struct vk_mat_mat_push_constants {
  234. uint32_t M; uint32_t N; uint32_t K;
  235. uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  236. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  237. uint32_t k_split;
  238. uint32_t ne02; uint32_t ne12; uint32_t broadcast2; uint32_t broadcast3;
  239. };
  240. struct vk_mat_vec_push_constants {
  241. uint32_t ncols; uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  242. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  243. uint32_t ne02; uint32_t ne12; uint32_t broadcast2; uint32_t broadcast3;
  244. };
  245. struct vk_mat_mat_id_push_constants {
  246. uint32_t M; uint32_t N; uint32_t K;
  247. uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  248. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  249. uint32_t nei0; uint32_t nei1; uint32_t nbi1; uint32_t ne11;
  250. };
  251. struct vk_mat_vec_id_push_constants {
  252. uint32_t ncols; uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  253. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  254. uint32_t nei0; uint32_t ne11;
  255. };
  256. struct vk_op_push_constants {
  257. uint32_t KX;
  258. uint32_t KY;
  259. float param1;
  260. float param2;
  261. };
  262. struct vk_op_unary_push_constants {
  263. uint32_t ne;
  264. uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  265. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13;
  266. uint32_t d_offset;
  267. float param1; float param2;
  268. };
  269. struct vk_op_binary_push_constants {
  270. uint32_t ne;
  271. uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  272. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13;
  273. uint32_t ne20; uint32_t ne21; uint32_t ne22; uint32_t ne23; uint32_t nb20; uint32_t nb21; uint32_t nb22; uint32_t nb23;
  274. uint32_t d_offset;
  275. float param1; float param2; int32_t param3;
  276. };
  277. struct vk_op_diag_mask_push_constants {
  278. uint32_t ncols;
  279. uint32_t rows_per_channel;
  280. int32_t n_past;
  281. };
  282. struct vk_op_rope_push_constants {
  283. uint32_t ncols;
  284. uint32_t n_dims;
  285. float freq_scale;
  286. uint32_t p_delta_rows;
  287. float freq_base;
  288. float ext_factor;
  289. float attn_factor;
  290. float corr_dims[2];
  291. float theta_scale;
  292. uint32_t has_ff;
  293. };
  294. struct vk_op_soft_max_push_constants {
  295. uint32_t KX;
  296. uint32_t KY;
  297. float scale;
  298. float max_bias;
  299. float m0;
  300. float m1;
  301. uint32_t n_head_log2;
  302. };
  303. struct vk_op_argsort_push_constants {
  304. uint32_t ncols;
  305. uint32_t ncols_pad;
  306. int32_t order;
  307. };
  308. struct vk_op_im2col_push_constants {
  309. uint32_t batch_offset; uint32_t offset_delta;
  310. uint32_t IC;
  311. uint32_t IW; uint32_t IH;
  312. uint32_t OW; uint32_t OH;
  313. uint32_t KW; uint32_t KH;
  314. uint32_t pelements;
  315. uint32_t CHW;
  316. int32_t s0; int32_t s1;
  317. int32_t p0; int32_t p1;
  318. int32_t d0; int32_t d1;
  319. };
  320. struct vk_op_timestep_embedding_push_constants {
  321. uint32_t nb1;
  322. uint32_t dim;
  323. uint32_t max_period;
  324. };
  325. // Allow pre-recording command buffers
  326. struct vk_staging_memcpy {
  327. vk_staging_memcpy(void * _dst, const void * _src, size_t _n) : dst(_dst), src(_src), n(_n) {}
  328. void * dst;
  329. const void * src;
  330. size_t n;
  331. };
  332. struct vk_op_upscale_push_constants {
  333. uint32_t ne; uint32_t d_offset;
  334. uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  335. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13;
  336. float sf0; float sf1; float sf2; float sf3;
  337. };
  338. struct vk_context_struct {
  339. vk_submission * s;
  340. std::vector<vk_sequence> seqs;
  341. int exit_tensor_idx;
  342. std::vector<vk_staging_memcpy> in_memcpys;
  343. std::vector<vk_staging_memcpy> out_memcpys;
  344. vk_queue * q;
  345. };
  346. typedef std::shared_ptr<vk_context_struct> vk_context;
  347. typedef std::weak_ptr<vk_context_struct> vk_context_ref;
  348. struct ggml_tensor_extra_gpu {
  349. vk_buffer_ref buffer_gpu;
  350. uint64_t offset;
  351. void reset() {
  352. buffer_gpu.reset();
  353. offset = 0;
  354. }
  355. };
  356. struct ggml_vk_garbage_collector {
  357. std::vector<vk_semaphore> tl_semaphores;
  358. std::vector<vk_semaphore> semaphores;
  359. std::vector<vk::Event> events;
  360. std::vector<vk_buffer> temp_buffers;
  361. std::vector<vk_context> contexts;
  362. };
  363. #if defined(GGML_VULKAN_MEMORY_DEBUG) || defined(GGML_VULKAN_DEBUG)
  364. #define VK_LOG_MEMORY(msg) std::cerr << "ggml_vulkan memory: " << msg << std::endl
  365. static std::string format_size(size_t size) {
  366. const size_t kib = 1024;
  367. const size_t mib = kib * 1024;
  368. const size_t gib = mib * 1024;
  369. std::ostringstream oss;
  370. oss << std::fixed << std::setprecision(2);
  371. if (size >= gib) {
  372. oss << static_cast<double>(size) / gib << " GiB";
  373. } else if (size >= mib) {
  374. oss << static_cast<double>(size) / mib << " MiB";
  375. } else if (size >= kib) {
  376. oss << static_cast<double>(size) / kib << " KiB";
  377. } else {
  378. oss << size << " B";
  379. }
  380. return oss.str();
  381. }
  382. static std::mutex log_mutex;
  383. class vk_memory_logger {
  384. public:
  385. vk_memory_logger(): total_device(0), total_host(0) {}
  386. void log_allocation(vk_buffer_ref buf_ref, size_t size);
  387. void log_deallocation(vk_buffer_ref buf_ref);
  388. private:
  389. std::map<vk::Buffer, size_t> allocations; // Track allocations
  390. size_t total_device;
  391. size_t total_host;
  392. };
  393. #else
  394. #define VK_LOG_MEMORY(msg) ((void) 0)
  395. #endif // GGML_VULKAN_MEMORY_DEBUG
  396. struct ggml_backend_vk_context {
  397. std::string name;
  398. vk_device device;
  399. size_t semaphore_idx, event_idx;
  400. ggml_vk_garbage_collector gc;
  401. size_t prealloc_size_x, prealloc_size_y, prealloc_size_split_k;
  402. vk_buffer prealloc_x, prealloc_y, prealloc_split_k;
  403. vk::Fence fence;
  404. vk_buffer staging;
  405. size_t staging_size;
  406. size_t staging_offset;
  407. vk_buffer buffer_pool[MAX_VK_BUFFERS];
  408. vk_context_ref compute_ctx;
  409. vk_context_ref transfer_ctx;
  410. std::vector<vk_context_ref> tensor_ctxs;
  411. };
  412. #ifdef GGML_VULKAN_MEMORY_DEBUG
  413. void vk_memory_logger::log_allocation(vk_buffer_ref buf_ref, size_t size) {
  414. std::lock_guard<std::mutex> guard(log_mutex);
  415. vk_buffer buf = buf_ref.lock();
  416. const bool device = bool(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eDeviceLocal);
  417. const std::string type = device ? "device" : "host";
  418. allocations[buf->buffer] = size;
  419. total_device += device ? size : 0;
  420. total_host += device ? 0 : size;
  421. VK_LOG_MEMORY(buf->device->name << ": +" << format_size(size) << " " << type << " at " << buf->buffer << ". Total device: " << format_size(total_device) << ", total host: " << format_size(total_host));
  422. }
  423. void vk_memory_logger::log_deallocation(vk_buffer_ref buf_ref) {
  424. if (buf_ref.expired() || buf_ref.lock()->size == 0) {
  425. return;
  426. }
  427. std::lock_guard<std::mutex> guard(log_mutex);
  428. vk_buffer buf = buf_ref.lock();
  429. const bool device = bool(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eDeviceLocal);
  430. std::string type = device ? "device" : "host";
  431. auto it = allocations.find(buf->buffer);
  432. total_device -= device ? it->second : 0;
  433. total_host -= device ? 0 : it->second;
  434. if (it != allocations.end()) {
  435. VK_LOG_MEMORY(buf->device->name << ": -" << format_size(it->second) << " " << type << " at " << buf->buffer << ". Total device: " << format_size(total_device) << ", total host: " << format_size(total_host));
  436. allocations.erase(it);
  437. } else {
  438. VK_LOG_MEMORY("ERROR " << buf->device->name << ": Attempted to deallocate unknown " << type << " memory at " << buf->buffer);
  439. }
  440. }
  441. #endif // GGML_VULKAN_MEMORY_DEBUG
  442. struct vk_instance_t {
  443. vk::Instance instance;
  444. std::vector<size_t> device_indices;
  445. vk_device devices[GGML_VK_MAX_DEVICES];
  446. };
  447. static bool vk_instance_initialized = false;
  448. static vk_instance_t vk_instance;
  449. #ifdef GGML_VULKAN_CHECK_RESULTS
  450. static size_t vk_skip_checks;
  451. static size_t vk_output_tensor;
  452. static void ggml_vk_print_tensor(const ggml_tensor * tensor, const char * name);
  453. static void ggml_vk_check_results_0(ggml_tensor * tensor);
  454. static void ggml_vk_check_results_1(ggml_tensor * tensor);
  455. #endif
  456. typedef void (*ggml_vk_func_t)(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
  457. GGML_CALL static void ggml_backend_vk_free(ggml_backend_t backend);
  458. static void ggml_vk_create_pipeline(vk_device& device, vk_pipeline& pipeline, const std::string& name, size_t spv_size, const void* spv_data, const std::string& entrypoint, uint32_t parameter_count, uint32_t push_constant_size, std::array<uint32_t, 3> wg_denoms, std::vector<uint32_t>&& specialization_constants, uint32_t align) {
  459. VK_LOG_DEBUG("ggml_vk_create_pipeline(" << device->name << ", " << name << ", " << entrypoint << ", " << parameter_count << ", " << push_constant_size << ", (" << wg_denoms[0] << "," << wg_denoms[1] << "," << wg_denoms[2] << "), specialization_constants, " << align << ")");
  460. GGML_ASSERT(parameter_count > 0);
  461. GGML_ASSERT(wg_denoms[0] > 0 && wg_denoms[1] > 0 && wg_denoms[2] > 0); // NOLINT
  462. std::lock_guard<std::mutex> guard(device->mutex);
  463. pipeline = std::make_shared<vk_pipeline_struct>();
  464. pipeline->name = name;
  465. pipeline->parameter_count = parameter_count;
  466. pipeline->push_constant_size = push_constant_size;
  467. pipeline->wg_denoms = wg_denoms;
  468. pipeline->align = align;
  469. vk::ShaderModuleCreateInfo shader_module_create_info({}, spv_size, reinterpret_cast<const uint32_t *>(spv_data));
  470. pipeline->shader_module = device->device.createShaderModule(shader_module_create_info);
  471. std::vector<vk::DescriptorSetLayoutBinding> dsl_binding;
  472. std::vector<vk::DescriptorBindingFlags> dsl_binding_flags;
  473. for (uint32_t i = 0; i < parameter_count; i++) {
  474. dsl_binding.push_back({i, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eCompute});
  475. dsl_binding_flags.push_back({});
  476. }
  477. vk::DescriptorSetLayoutBindingFlagsCreateInfo dslbfci = { dsl_binding_flags };
  478. vk::PushConstantRange pcr(
  479. vk::ShaderStageFlagBits::eCompute,
  480. 0,
  481. pipeline->push_constant_size
  482. );
  483. vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_create_info(
  484. {},
  485. dsl_binding);
  486. descriptor_set_layout_create_info.setPNext(&dslbfci);
  487. pipeline->dsl = device->device.createDescriptorSetLayout(descriptor_set_layout_create_info);
  488. // Check if device supports multiple descriptors per pool
  489. if (device->descriptor_set_mode == VK_DEVICE_DESCRIPTOR_POOL_MODE_UNKNOWN) {
  490. const uint32_t alloc_count = 2;
  491. // Try allocating multiple sets from one pool
  492. // This fails on AMD for some reason, so add a fall back to allocating one pool per set
  493. vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline->parameter_count);
  494. vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, alloc_count, descriptor_pool_size);
  495. vk::DescriptorPool pool = device->device.createDescriptorPool(descriptor_pool_create_info);
  496. std::vector<vk::DescriptorSetLayout> layouts(alloc_count);
  497. for (uint32_t i = 0; i < alloc_count; i++) {
  498. layouts[i] = pipeline->dsl;
  499. }
  500. try {
  501. vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(pool, alloc_count, layouts.data());
  502. std::vector<vk::DescriptorSet> sets = device->device.allocateDescriptorSets(descriptor_set_alloc_info);
  503. } catch(vk::OutOfPoolMemoryError const&) {
  504. device->descriptor_set_mode = VK_DEVICE_DESCRIPTOR_POOL_MODE_SINGLE;
  505. }
  506. device->device.destroyDescriptorPool(pool);
  507. }
  508. if (device->descriptor_set_mode == VK_DEVICE_DESCRIPTOR_POOL_MODE_MULTI) {
  509. vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline->parameter_count);
  510. vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, 128, descriptor_pool_size);
  511. pipeline->descriptor_pools.push_back(device->device.createDescriptorPool(descriptor_pool_create_info));
  512. }
  513. pipeline->descriptor_set_idx = 0;
  514. vk::PipelineLayoutCreateInfo pipeline_layout_create_info(vk::PipelineLayoutCreateFlags(), pipeline->dsl, pcr);
  515. pipeline->layout = device->device.createPipelineLayout(pipeline_layout_create_info);
  516. std::vector<vk::SpecializationMapEntry> specialization_entries(specialization_constants.size());
  517. for (size_t i = 0; i < specialization_constants.size(); i++) {
  518. specialization_entries[i].constantID = i;
  519. specialization_entries[i].offset = i * sizeof(uint32_t);
  520. specialization_entries[i].size = sizeof(uint32_t);
  521. }
  522. vk::SpecializationInfo specialization_info(
  523. specialization_entries.size(),
  524. specialization_entries.data(),
  525. specialization_constants.size() * sizeof(uint32_t),
  526. specialization_constants.data()
  527. );
  528. vk::PipelineShaderStageCreateInfo pipeline_shader_create_info(
  529. vk::PipelineShaderStageCreateFlags(),
  530. vk::ShaderStageFlagBits::eCompute,
  531. pipeline->shader_module,
  532. entrypoint.c_str(),
  533. &specialization_info);
  534. vk::ComputePipelineCreateInfo compute_pipeline_create_info(
  535. vk::PipelineCreateFlags(),
  536. pipeline_shader_create_info,
  537. pipeline->layout);
  538. pipeline->pipeline = device->device.createComputePipeline(VK_NULL_HANDLE, compute_pipeline_create_info).value;
  539. device->pipelines.push_back(pipeline);
  540. }
  541. static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline) {
  542. VK_LOG_DEBUG("ggml_pipeline_destroy_pipeline(" << pipeline->name << ")");
  543. for (auto& pool : pipeline->descriptor_pools) {
  544. device.destroyDescriptorPool(pool);
  545. }
  546. pipeline->descriptor_pools.clear();
  547. pipeline->descriptor_sets.clear();
  548. pipeline->descriptor_set_idx = 0;
  549. device.destroyDescriptorSetLayout(pipeline->dsl);
  550. device.destroyPipelineLayout(pipeline->layout);
  551. device.destroyShaderModule(pipeline->shader_module);
  552. device.destroyPipeline(pipeline->pipeline);
  553. }
  554. static void ggml_pipeline_allocate_descriptor_sets(vk_device& device, vk_pipeline& pipeline, uint32_t n) {
  555. VK_LOG_DEBUG("ggml_pipeline_allocate_descriptor_sets(" << pipeline->name << ", " << n << ")");
  556. if (pipeline->descriptor_sets.size() >= pipeline->descriptor_set_idx + n) {
  557. // Enough descriptors are available
  558. return;
  559. }
  560. std::lock_guard<std::mutex> guard(device->mutex);
  561. if (device->descriptor_set_mode == VK_DEVICE_DESCRIPTOR_POOL_MODE_MULTI) {
  562. const uint32_t alloc_count = pipeline->descriptor_set_idx + n - pipeline->descriptor_sets.size();
  563. std::vector<vk::DescriptorSetLayout> layouts(alloc_count);
  564. for (uint32_t i = 0; i < alloc_count; i++) {
  565. layouts[i] = pipeline->dsl;
  566. }
  567. vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(pipeline->descriptor_pools[0], alloc_count, layouts.data());
  568. std::vector<vk::DescriptorSet> sets = device->device.allocateDescriptorSets(descriptor_set_alloc_info);
  569. pipeline->descriptor_sets.insert(pipeline->descriptor_sets.end(), sets.begin(), sets.end());
  570. } else {
  571. for (uint32_t i = pipeline->descriptor_sets.size(); i < pipeline->descriptor_set_idx + n; i++) {
  572. vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline->parameter_count);
  573. vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, 1, descriptor_pool_size);
  574. pipeline->descriptor_pools.push_back(device->device.createDescriptorPool(descriptor_pool_create_info));
  575. vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(pipeline->descriptor_pools[i], 1, &pipeline->dsl);
  576. std::vector<vk::DescriptorSet> sets = device->device.allocateDescriptorSets(descriptor_set_alloc_info);
  577. pipeline->descriptor_sets.push_back(sets[0]);
  578. }
  579. }
  580. }
  581. static void ggml_pipeline_cleanup(vk_pipeline& pipeline) {
  582. VK_LOG_DEBUG("ggml_pipeline_cleanup(" << pipeline->name << ")");
  583. pipeline->descriptor_set_idx = 0;
  584. }
  585. static vk::CommandBuffer ggml_vk_create_cmd_buffer(vk_device& device, vk_queue& q) {
  586. VK_LOG_DEBUG("ggml_vk_create_cmd_buffer()");
  587. std::lock_guard<std::mutex> guard(device->mutex);
  588. if (q.cmd_buffers.size() > q.cmd_buffer_idx) {
  589. // Reuse command buffer
  590. return q.cmd_buffers[q.cmd_buffer_idx++];
  591. }
  592. vk::CommandBufferAllocateInfo command_buffer_alloc_info(
  593. q.pool,
  594. vk::CommandBufferLevel::ePrimary,
  595. 1);
  596. const std::vector<vk::CommandBuffer> cmd_buffers = device->device.allocateCommandBuffers(command_buffer_alloc_info);
  597. auto buf = cmd_buffers.front();
  598. q.cmd_buffers.push_back(buf);
  599. q.cmd_buffer_idx++;
  600. return buf;
  601. }
  602. static vk_submission ggml_vk_create_submission(vk_device& device, vk_queue& q, std::vector<vk_semaphore> wait_semaphores, std::vector<vk_semaphore> signal_semaphores) {
  603. VK_LOG_DEBUG("ggml_vk_create_submission()");
  604. vk_submission s;
  605. s.buffer = ggml_vk_create_cmd_buffer(device, q);
  606. s.wait_semaphores = std::move(wait_semaphores);
  607. s.signal_semaphores = std::move(signal_semaphores);
  608. return s;
  609. }
  610. static void ggml_vk_submit(vk_context& ctx, vk::Fence fence) {
  611. if (ctx->seqs.empty()) {
  612. return;
  613. }
  614. VK_LOG_DEBUG("ggml_vk_submit(" << ctx << ", " << fence << ")");
  615. std::vector<std::vector<uint64_t>> tl_wait_vals;
  616. std::vector<std::vector<uint64_t>> tl_signal_vals;
  617. std::vector<std::vector<vk::Semaphore>> tl_wait_semaphores;
  618. std::vector<std::vector<vk::Semaphore>> tl_signal_semaphores;
  619. std::vector<vk::TimelineSemaphoreSubmitInfo> tl_submit_infos;
  620. std::vector<vk::SubmitInfo> submit_infos;
  621. int idx = -1;
  622. std::vector<std::vector<vk::PipelineStageFlags>> stage_flags;
  623. size_t reserve = 0;
  624. for (const auto& sequence : ctx->seqs) {
  625. reserve += sequence.size();
  626. }
  627. // Pre-reserve vectors to prevent reallocation, which invalidates pointers
  628. tl_wait_semaphores.reserve(reserve);
  629. tl_wait_vals.reserve(reserve);
  630. tl_signal_semaphores.reserve(reserve);
  631. tl_signal_vals.reserve(reserve);
  632. tl_submit_infos.reserve(reserve);
  633. submit_infos.reserve(reserve);
  634. stage_flags.reserve(reserve);
  635. for (const auto& sequence : ctx->seqs) {
  636. for (const auto& submission : sequence) {
  637. stage_flags.push_back({});
  638. idx++;
  639. tl_wait_vals.push_back({});
  640. tl_wait_semaphores.push_back({});
  641. tl_signal_vals.push_back({});
  642. tl_signal_semaphores.push_back({});
  643. for (size_t i = 0; i < submission.wait_semaphores.size(); i++) {
  644. stage_flags[idx].push_back(ctx->q->stage_flags);
  645. tl_wait_vals[idx].push_back(submission.wait_semaphores[i].value);
  646. tl_wait_semaphores[idx].push_back(submission.wait_semaphores[i].s);
  647. }
  648. for (size_t i = 0; i < submission.signal_semaphores.size(); i++) {
  649. tl_signal_vals[idx].push_back(submission.signal_semaphores[i].value);
  650. tl_signal_semaphores[idx].push_back(submission.signal_semaphores[i].s);
  651. }
  652. tl_submit_infos.push_back({
  653. (uint32_t) submission.wait_semaphores.size(),
  654. tl_wait_vals[idx].data(),
  655. (uint32_t) submission.signal_semaphores.size(),
  656. tl_signal_vals[idx].data(),
  657. });
  658. tl_submit_infos[idx].sType = vk::StructureType::eTimelineSemaphoreSubmitInfo;
  659. tl_submit_infos[idx].pNext = nullptr;
  660. vk::SubmitInfo si{
  661. (uint32_t) submission.wait_semaphores.size(),
  662. tl_wait_semaphores[idx].data(),
  663. stage_flags[idx].data(),
  664. 1,
  665. &submission.buffer,
  666. (uint32_t) submission.signal_semaphores.size(),
  667. tl_signal_semaphores[idx].data(),
  668. };
  669. si.setPNext(&tl_submit_infos[idx]);
  670. submit_infos.push_back(si);
  671. }
  672. }
  673. ctx->q->queue.submit(submit_infos, fence);
  674. ctx->seqs.clear();
  675. }
  676. static uint32_t ggml_vk_find_queue_family_index(std::vector<vk::QueueFamilyProperties>& queue_family_props, const vk::QueueFlags& required, const vk::QueueFlags& avoid, int32_t compute_index, uint32_t min_num_queues) {
  677. VK_LOG_DEBUG("ggml_vk_find_queue_family_index()");
  678. const uint32_t qfsize = queue_family_props.size();
  679. // Try with avoid preferences first
  680. for (uint32_t i = 0; i < qfsize; i++) {
  681. if (queue_family_props[i].queueCount >= min_num_queues && (compute_index < 0 || i != (uint32_t) compute_index) && queue_family_props[i].queueFlags & required && !(queue_family_props[i].queueFlags & avoid)) {
  682. return i;
  683. }
  684. }
  685. // Fall back to only required
  686. for (size_t i = 0; i < qfsize; i++) {
  687. if (queue_family_props[i].queueCount >= min_num_queues && (compute_index < 0 || i != (uint32_t) compute_index) && queue_family_props[i].queueFlags & required) {
  688. return i;
  689. }
  690. }
  691. // Fall back to reusing compute queue
  692. for (size_t i = 0; i < qfsize; i++) {
  693. if (queue_family_props[i].queueCount >= min_num_queues && queue_family_props[i].queueFlags & required) {
  694. return i;
  695. }
  696. }
  697. // Fall back to ignoring min_num_queries
  698. for (size_t i = 0; i < qfsize; i++) {
  699. if (queue_family_props[i].queueFlags & required) {
  700. return i;
  701. }
  702. }
  703. // All commands that are allowed on a queue that supports transfer operations are also allowed on a queue that supports either graphics or compute operations.
  704. // Thus, if the capabilities of a queue family include VK_QUEUE_GRAPHICS_BIT or VK_QUEUE_COMPUTE_BIT, then reporting the VK_QUEUE_TRANSFER_BIT capability separately for that queue family is optional.
  705. if (compute_index >= 0) {
  706. return compute_index;
  707. }
  708. std::cerr << "ggml_vulkan: No suitable queue family index found." << std::endl;
  709. for(auto &q_family : queue_family_props) {
  710. std::cerr << "Queue number: " + std::to_string(q_family.queueCount) << " flags: " + to_string(q_family.queueFlags) << std::endl;
  711. }
  712. abort();
  713. }
  714. static void ggml_vk_create_queue(vk_device& device, vk_queue& q, uint32_t queue_family_index, uint32_t queue_index, vk::PipelineStageFlags&& stage_flags) {
  715. VK_LOG_DEBUG("ggml_vk_create_queue()");
  716. std::lock_guard<std::mutex> guard(device->mutex);
  717. q.queue_family_index = queue_family_index;
  718. vk::CommandPoolCreateInfo command_pool_create_info_compute(vk::CommandPoolCreateFlags(VK_COMMAND_POOL_CREATE_TRANSIENT_BIT), queue_family_index);
  719. q.pool = device->device.createCommandPool(command_pool_create_info_compute);
  720. q.cmd_buffer_idx = 0;
  721. q.queue = device->device.getQueue(queue_family_index, queue_index);
  722. q.stage_flags = stage_flags;
  723. }
  724. static vk_context ggml_vk_create_context(ggml_backend_vk_context * ctx, vk_queue& q) {
  725. vk_context result = std::make_shared<vk_context_struct>();
  726. VK_LOG_DEBUG("ggml_vk_create_context(" << result << ")");
  727. ctx->gc.contexts.emplace_back(result);
  728. result->q = &q;
  729. return result;
  730. }
  731. static vk_context ggml_vk_create_temporary_context(vk_queue& q) {
  732. vk_context result = std::make_shared<vk_context_struct>();
  733. VK_LOG_DEBUG("ggml_vk_create_temporary_context(" << result << ")");
  734. result->q = &q;
  735. return result;
  736. }
  737. static vk_semaphore * ggml_vk_create_binary_semaphore(ggml_backend_vk_context * ctx) {
  738. VK_LOG_DEBUG("ggml_vk_create_timeline_semaphore()");
  739. vk::SemaphoreTypeCreateInfo tci{ vk::SemaphoreType::eBinary, 0 };
  740. vk::SemaphoreCreateInfo ci{};
  741. ci.setPNext(&tci);
  742. vk::Semaphore semaphore = ctx->device->device.createSemaphore(ci);
  743. ctx->gc.semaphores.push_back({ semaphore, 0 });
  744. return &ctx->gc.semaphores[ctx->gc.semaphores.size() - 1];
  745. }
  746. static vk_semaphore * ggml_vk_create_timeline_semaphore(ggml_backend_vk_context * ctx) {
  747. VK_LOG_DEBUG("ggml_vk_create_timeline_semaphore()");
  748. if (ctx->semaphore_idx >= ctx->gc.tl_semaphores.size()) {
  749. vk::SemaphoreTypeCreateInfo tci{ vk::SemaphoreType::eTimeline, 0 };
  750. vk::SemaphoreCreateInfo ci{};
  751. ci.setPNext(&tci);
  752. vk::Semaphore semaphore = ctx->device->device.createSemaphore(ci);
  753. ctx->gc.tl_semaphores.push_back({ semaphore, 0 });
  754. }
  755. return &ctx->gc.tl_semaphores[ctx->semaphore_idx++];
  756. }
  757. static vk::Event ggml_vk_create_event(ggml_backend_vk_context * ctx) {
  758. if (ctx->event_idx >= ctx->gc.events.size()) {
  759. ctx->gc.events.push_back(ctx->device->device.createEvent({}));
  760. }
  761. return ctx->gc.events[ctx->event_idx++];
  762. }
  763. static void ggml_vk_queue_cleanup(vk_device& device, vk_queue& q) {
  764. VK_LOG_DEBUG("ggml_vk_queue_cleanup()");
  765. std::lock_guard<std::mutex> guard(device->mutex);
  766. // Requires command buffers to be done
  767. device->device.resetCommandPool(q.pool);
  768. q.cmd_buffer_idx = 0;
  769. }
  770. static uint32_t find_properties(const vk::PhysicalDeviceMemoryProperties* mem_props, vk::MemoryRequirements* mem_req, vk::MemoryPropertyFlags flags) {
  771. for (uint32_t i = 0; i < mem_props->memoryTypeCount; ++i) {
  772. vk::MemoryType memory_type = mem_props->memoryTypes[i];
  773. if ((mem_req->memoryTypeBits & ((uint64_t)1 << i)) &&
  774. (flags & memory_type.propertyFlags) == flags &&
  775. mem_props->memoryHeaps[memory_type.heapIndex].size >= mem_req->size) {
  776. return static_cast<int32_t>(i);
  777. }
  778. }
  779. return UINT32_MAX;
  780. }
  781. static vk_buffer ggml_vk_create_buffer(vk_device& device, size_t size, vk::MemoryPropertyFlags req_flags, vk::MemoryPropertyFlags fallback_flags = vk::MemoryPropertyFlags(0)) {
  782. VK_LOG_DEBUG("ggml_vk_create_buffer(" << device->name << ", " << size << ", " << to_string(req_flags) << ", " << to_string(fallback_flags) << ")");
  783. if (size > device->max_memory_allocation_size) {
  784. throw vk::OutOfDeviceMemoryError("Requested buffer size exceeds device memory allocation limit");
  785. }
  786. std::lock_guard<std::mutex> guard(device->mutex);
  787. vk_buffer buf = std::make_shared<vk_buffer_struct>();
  788. if (size == 0) {
  789. buf->size = 0;
  790. return buf;
  791. }
  792. buf->size = size;
  793. vk::BufferCreateInfo buffer_create_info{
  794. vk::BufferCreateFlags(),
  795. size,
  796. vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst,
  797. vk::SharingMode::eExclusive,
  798. 0,
  799. nullptr,
  800. };
  801. buf->buffer = device->device.createBuffer(buffer_create_info);
  802. vk::MemoryRequirements mem_req = device->device.getBufferMemoryRequirements(buf->buffer);
  803. vk::PhysicalDeviceMemoryProperties mem_props = device->physical_device.getMemoryProperties();
  804. uint32_t memory_type_index = UINT32_MAX;
  805. memory_type_index = find_properties(&mem_props, &mem_req, req_flags);
  806. buf->memory_property_flags = req_flags;
  807. if (memory_type_index == UINT32_MAX && fallback_flags) {
  808. memory_type_index = find_properties(&mem_props, &mem_req, fallback_flags);
  809. buf->memory_property_flags = fallback_flags;
  810. }
  811. if (memory_type_index == UINT32_MAX) {
  812. device->device.destroyBuffer(buf->buffer);
  813. buf->size = 0;
  814. throw vk::OutOfDeviceMemoryError("No suitable memory type found");
  815. }
  816. try {
  817. buf->device_memory = device->device.allocateMemory({ mem_req.size, memory_type_index });
  818. } catch (const vk::SystemError& e) {
  819. // Out of Host/Device memory, clean up buffer
  820. device->device.destroyBuffer(buf->buffer);
  821. buf->size = 0;
  822. throw e;
  823. }
  824. buf->ptr = nullptr;
  825. if (buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  826. buf->ptr = device->device.mapMemory(buf->device_memory, 0, VK_WHOLE_SIZE);
  827. }
  828. device->device.bindBufferMemory(buf->buffer, buf->device_memory, 0);
  829. buf->device = device;
  830. #ifdef GGML_VULKAN_MEMORY_DEBUG
  831. device->memory_logger->log_allocation(buf, size);
  832. #endif
  833. return buf;
  834. }
  835. static vk_buffer ggml_vk_create_buffer_check(vk_device& device, size_t size, vk::MemoryPropertyFlags req_flags, vk::MemoryPropertyFlags fallback_flags = vk::MemoryPropertyFlags(0)) {
  836. try {
  837. return ggml_vk_create_buffer(device, size, req_flags, fallback_flags);
  838. } catch (const vk::SystemError& e) {
  839. std::cerr << "ggml_vulkan: Memory allocation of size " << size << " failed." << std::endl;
  840. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  841. throw e;
  842. }
  843. }
  844. static vk_buffer ggml_vk_create_buffer_device(vk_device& device, size_t size) {
  845. vk_buffer buf;
  846. try {
  847. if (device->uma) {
  848. // Fall back to host memory type
  849. buf = ggml_vk_create_buffer(device, size, vk::MemoryPropertyFlagBits::eDeviceLocal, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  850. } else {
  851. buf = ggml_vk_create_buffer(device, size, vk::MemoryPropertyFlagBits::eDeviceLocal);
  852. }
  853. } catch (const vk::SystemError& e) {
  854. std::cerr << "ggml_vulkan: Device memory allocation of size " << size << " failed." << std::endl;
  855. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  856. throw e;
  857. }
  858. return buf;
  859. }
  860. static void ggml_vk_destroy_buffer(vk_buffer& buf) {
  861. if (buf == nullptr) {
  862. return;
  863. }
  864. #ifdef GGML_VULKAN_MEMORY_DEBUG
  865. if (buf->device != nullptr) {
  866. buf->device->memory_logger->log_deallocation(buf);
  867. }
  868. #endif
  869. buf.reset();
  870. }
  871. static vk_subbuffer ggml_vk_subbuffer(vk_buffer& buf) {
  872. return { buf, 0, VK_WHOLE_SIZE };
  873. }
  874. static void ggml_vk_sync_buffers(vk_context& ctx) {
  875. VK_LOG_DEBUG("ggml_vk_sync_buffers()");
  876. ctx->s->buffer.pipelineBarrier(
  877. ctx->q->stage_flags,
  878. ctx->q->stage_flags,
  879. {},
  880. { {
  881. {vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite | vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite},
  882. {vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite | vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite}
  883. } },
  884. {},
  885. {}
  886. );
  887. }
  888. static void ggml_vk_wait_events(vk_context& ctx, std::vector<vk::Event>&& events) {
  889. VK_LOG_DEBUG("ggml_vk_wait_events()");
  890. if (events.empty()) {
  891. return;
  892. }
  893. ctx->s->buffer.waitEvents(
  894. events,
  895. ctx->q->stage_flags,
  896. ctx->q->stage_flags,
  897. {},
  898. {},
  899. {}
  900. );
  901. }
  902. static void ggml_vk_load_shaders(vk_device& device) {
  903. VK_LOG_DEBUG("ggml_vk_load_shaders(" << device->name << ")");
  904. // mulmat
  905. std::initializer_list<uint32_t> warptile_l = { 128, 128, 128, 16, device->subgroup_size * 2, 64, 2, 4, 4, device->subgroup_size };
  906. std::initializer_list<uint32_t> warptile_m = { 128, 64, 64, 16, device->subgroup_size, 32, 2, 4, 2, device->subgroup_size };
  907. std::initializer_list<uint32_t> warptile_s = { device->subgroup_size, 32, 32, 16, 32, 32, 2, 2, 2, device->subgroup_size };
  908. std::initializer_list<uint32_t> warptile_mmq_l = { 128, 128, 128, 32, device->subgroup_size * 2, 64, 2, 4, 4, device->subgroup_size };
  909. std::initializer_list<uint32_t> warptile_mmq_m = { 128, 64, 64, 32, device->subgroup_size, 32, 2, 4, 2, device->subgroup_size };
  910. std::initializer_list<uint32_t> warptile_mmq_s = { device->subgroup_size, 32, 32, 32, 32, 32, 2, 2, 2, device->subgroup_size };
  911. std::array<uint32_t, 3> l_wg_denoms = {128, 128, 1 };
  912. std::array<uint32_t, 3> m_wg_denoms = { 64, 64, 1 };
  913. std::array<uint32_t, 3> s_wg_denoms = { 32, 32, 1 };
  914. uint32_t l_align = 128;
  915. uint32_t m_align = 64;
  916. uint32_t s_align = 32;
  917. device->pipeline_matmul_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  918. device->pipeline_matmul_f32_f16 = std::make_shared<vk_matmul_pipeline_struct>();
  919. device->pipeline_matmul_f16_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  920. device->pipeline_matmul_f16 = std::make_shared<vk_matmul_pipeline_struct>();
  921. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0] = std::make_shared<vk_matmul_pipeline_struct>();
  922. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1] = std::make_shared<vk_matmul_pipeline_struct>();
  923. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0] = std::make_shared<vk_matmul_pipeline_struct>();
  924. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1] = std::make_shared<vk_matmul_pipeline_struct>();
  925. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0] = std::make_shared<vk_matmul_pipeline_struct>();
  926. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K] = std::make_shared<vk_matmul_pipeline_struct>();
  927. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K] = std::make_shared<vk_matmul_pipeline_struct>();
  928. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K] = std::make_shared<vk_matmul_pipeline_struct>();
  929. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K] = std::make_shared<vk_matmul_pipeline_struct>();
  930. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K] = std::make_shared<vk_matmul_pipeline_struct>();
  931. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL] = std::make_shared<vk_matmul_pipeline_struct>();
  932. device->pipeline_matmul_id_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  933. device->pipeline_matmul_id_f16_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  934. device->pipeline_matmul_id_f16 = std::make_shared<vk_matmul_pipeline_struct>();
  935. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0] = std::make_shared<vk_matmul_pipeline_struct>();
  936. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1] = std::make_shared<vk_matmul_pipeline_struct>();
  937. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0] = std::make_shared<vk_matmul_pipeline_struct>();
  938. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1] = std::make_shared<vk_matmul_pipeline_struct>();
  939. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0] = std::make_shared<vk_matmul_pipeline_struct>();
  940. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K] = std::make_shared<vk_matmul_pipeline_struct>();
  941. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K] = std::make_shared<vk_matmul_pipeline_struct>();
  942. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K] = std::make_shared<vk_matmul_pipeline_struct>();
  943. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K] = std::make_shared<vk_matmul_pipeline_struct>();
  944. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K] = std::make_shared<vk_matmul_pipeline_struct>();
  945. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL] = std::make_shared<vk_matmul_pipeline_struct>();
  946. if (device->fp16) {
  947. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->l, "matmul_f32_l", matmul_f32_f32_len, matmul_f32_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  948. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->m, "matmul_f32_m", matmul_f32_f32_len, matmul_f32_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  949. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->s, "matmul_f32_s", matmul_f32_f32_len, matmul_f32_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  950. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->a_l, "matmul_f32_aligned_l", matmul_f32_f32_aligned_len, matmul_f32_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  951. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->a_m, "matmul_f32_aligned_m", matmul_f32_f32_aligned_len, matmul_f32_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  952. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->a_s, "matmul_f32_aligned_s", matmul_f32_f32_aligned_len, matmul_f32_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  953. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->l, "matmul_f32_f16_l", matmul_f32_f16_len, matmul_f32_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  954. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->m, "matmul_f32_f16_m", matmul_f32_f16_len, matmul_f32_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  955. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->s, "matmul_f32_f16_s", matmul_f32_f16_len, matmul_f32_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  956. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->a_l, "matmul_f32_f16_aligned_l", matmul_f32_f16_aligned_len, matmul_f32_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  957. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->a_m, "matmul_f32_f16_aligned_m", matmul_f32_f16_aligned_len, matmul_f32_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  958. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->a_s, "matmul_f32_f16_aligned_s", matmul_f32_f16_aligned_len, matmul_f32_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  959. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->l, "matmul_f16_l", matmul_f16_len, matmul_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  960. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->m, "matmul_f16_m", matmul_f16_len, matmul_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  961. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->s, "matmul_f16_s", matmul_f16_len, matmul_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  962. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->a_l, "matmul_f16_aligned_l", matmul_f16_aligned_len, matmul_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  963. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->a_m, "matmul_f16_aligned_m", matmul_f16_aligned_len, matmul_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  964. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->a_s, "matmul_f16_aligned_s", matmul_f16_aligned_len, matmul_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  965. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->l, "matmul_f16_f32_l", matmul_f16_f32_len, matmul_f16_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  966. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->m, "matmul_f16_f32_m", matmul_f16_f32_len, matmul_f16_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  967. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->s, "matmul_f16_f32_s", matmul_f16_f32_len, matmul_f16_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  968. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->a_l, "matmul_f16_f32_aligned_l", matmul_f16_f32_aligned_len, matmul_f16_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  969. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->a_m, "matmul_f16_f32_aligned_m", matmul_f16_f32_aligned_len, matmul_f16_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  970. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->a_s, "matmul_f16_f32_aligned_s", matmul_f16_f32_aligned_len, matmul_f16_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  971. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->l, "matmul_q4_0_f32_l", matmul_q4_0_f32_len, matmul_q4_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  972. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->m, "matmul_q4_0_f32_m", matmul_q4_0_f32_len, matmul_q4_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  973. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->s, "matmul_q4_0_f32_s", matmul_q4_0_f32_len, matmul_q4_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  974. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_l, "matmul_q4_0_f32_aligned_l", matmul_q4_0_f32_aligned_len, matmul_q4_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  975. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_m, "matmul_q4_0_f32_aligned_m", matmul_q4_0_f32_aligned_len, matmul_q4_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  976. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_s, "matmul_q4_0_f32_aligned_s", matmul_q4_0_f32_aligned_len, matmul_q4_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  977. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->l, "matmul_q4_1_f32_l", matmul_q4_1_f32_len, matmul_q4_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  978. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->m, "matmul_q4_1_f32_m", matmul_q4_1_f32_len, matmul_q4_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  979. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->s, "matmul_q4_1_f32_s", matmul_q4_1_f32_len, matmul_q4_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  980. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_l, "matmul_q4_1_f32_aligned_l", matmul_q4_1_f32_aligned_len, matmul_q4_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  981. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_m, "matmul_q4_1_f32_aligned_m", matmul_q4_1_f32_aligned_len, matmul_q4_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  982. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_s, "matmul_q4_1_f32_aligned_s", matmul_q4_1_f32_aligned_len, matmul_q4_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  983. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->l, "matmul_q5_0_f32_l", matmul_q5_0_f32_len, matmul_q5_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  984. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->m, "matmul_q5_0_f32_m", matmul_q5_0_f32_len, matmul_q5_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  985. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->s, "matmul_q5_0_f32_s", matmul_q5_0_f32_len, matmul_q5_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  986. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_l, "matmul_q5_0_f32_aligned_l", matmul_q5_0_f32_aligned_len, matmul_q5_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  987. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_m, "matmul_q5_0_f32_aligned_m", matmul_q5_0_f32_aligned_len, matmul_q5_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  988. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_s, "matmul_q5_0_f32_aligned_s", matmul_q5_0_f32_aligned_len, matmul_q5_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  989. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->l, "matmul_q5_1_f32_l", matmul_q5_1_f32_len, matmul_q5_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  990. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->m, "matmul_q5_1_f32_m", matmul_q5_1_f32_len, matmul_q5_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  991. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->s, "matmul_q5_1_f32_s", matmul_q5_1_f32_len, matmul_q5_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  992. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_l, "matmul_q5_1_f32_aligned_l", matmul_q5_1_f32_aligned_len, matmul_q5_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  993. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_m, "matmul_q5_1_f32_aligned_m", matmul_q5_1_f32_aligned_len, matmul_q5_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  994. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_s, "matmul_q5_1_f32_aligned_s", matmul_q5_1_f32_aligned_len, matmul_q5_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  995. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->l, "matmul_q8_0_f32_l", matmul_q8_0_f32_len, matmul_q8_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  996. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->m, "matmul_q8_0_f32_m", matmul_q8_0_f32_len, matmul_q8_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  997. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->s, "matmul_q8_0_f32_s", matmul_q8_0_f32_len, matmul_q8_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  998. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_l, "matmul_q8_0_f32_aligned_l", matmul_q8_0_f32_aligned_len, matmul_q8_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  999. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_m, "matmul_q8_0_f32_aligned_m", matmul_q8_0_f32_aligned_len, matmul_q8_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1000. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_s, "matmul_q8_0_f32_aligned_s", matmul_q8_0_f32_aligned_len, matmul_q8_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1001. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->l, "matmul_q2_k_f32_l", matmul_q2_k_f32_len, matmul_q2_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1002. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->m, "matmul_q2_k_f32_m", matmul_q2_k_f32_len, matmul_q2_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1003. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->s, "matmul_q2_k_f32_s", matmul_q2_k_f32_len, matmul_q2_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1004. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_l, "matmul_q2_k_f32_aligned_l", matmul_q2_k_f32_aligned_len, matmul_q2_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1005. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_m, "matmul_q2_k_f32_aligned_m", matmul_q2_k_f32_aligned_len, matmul_q2_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1006. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_s, "matmul_q2_k_f32_aligned_s", matmul_q2_k_f32_aligned_len, matmul_q2_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1007. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->l, "matmul_q3_k_f32_l", matmul_q3_k_f32_len, matmul_q3_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1008. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->m, "matmul_q3_k_f32_m", matmul_q3_k_f32_len, matmul_q3_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1009. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->s, "matmul_q3_k_f32_s", matmul_q3_k_f32_len, matmul_q3_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1010. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_l, "matmul_q3_k_f32_aligned_l", matmul_q3_k_f32_aligned_len, matmul_q3_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1011. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_m, "matmul_q3_k_f32_aligned_m", matmul_q3_k_f32_aligned_len, matmul_q3_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1012. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_s, "matmul_q3_k_f32_aligned_s", matmul_q3_k_f32_aligned_len, matmul_q3_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1013. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->l, "matmul_q4_k_f32_l", matmul_q4_k_f32_len, matmul_q4_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1014. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->m, "matmul_q4_k_f32_m", matmul_q4_k_f32_len, matmul_q4_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1015. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->s, "matmul_q4_k_f32_s", matmul_q4_k_f32_len, matmul_q4_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1016. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_l, "matmul_q4_k_f32_aligned_l", matmul_q4_k_f32_aligned_len, matmul_q4_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1017. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_m, "matmul_q4_k_f32_aligned_m", matmul_q4_k_f32_aligned_len, matmul_q4_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1018. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_s, "matmul_q4_k_f32_aligned_s", matmul_q4_k_f32_aligned_len, matmul_q4_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1019. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->l, "matmul_q5_k_f32_l", matmul_q5_k_f32_len, matmul_q5_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1020. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->m, "matmul_q5_k_f32_m", matmul_q5_k_f32_len, matmul_q5_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1021. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->s, "matmul_q5_k_f32_s", matmul_q5_k_f32_len, matmul_q5_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1022. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_l, "matmul_q5_k_f32_aligned_l", matmul_q5_k_f32_aligned_len, matmul_q5_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1023. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_m, "matmul_q5_k_f32_aligned_m", matmul_q5_k_f32_aligned_len, matmul_q5_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1024. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_s, "matmul_q5_k_f32_aligned_s", matmul_q5_k_f32_aligned_len, matmul_q5_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1025. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->l, "matmul_q6_k_f32_l", matmul_q6_k_f32_len, matmul_q6_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1026. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->m, "matmul_q6_k_f32_m", matmul_q6_k_f32_len, matmul_q6_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1027. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->s, "matmul_q6_k_f32_s", matmul_q6_k_f32_len, matmul_q6_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1028. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_l, "matmul_q6_k_f32_aligned_l", matmul_q6_k_f32_aligned_len, matmul_q6_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1029. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_m, "matmul_q6_k_f32_aligned_m", matmul_q6_k_f32_aligned_len, matmul_q6_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1030. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_s, "matmul_q6_k_f32_aligned_s", matmul_q6_k_f32_aligned_len, matmul_q6_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1031. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->l, "matmul_iq4_nl_f32_l", matmul_iq4_nl_f32_len, matmul_iq4_nl_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1032. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->m, "matmul_iq4_nl_f32_m", matmul_iq4_nl_f32_len, matmul_iq4_nl_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1033. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->s, "matmul_iq4_nl_f32_s", matmul_iq4_nl_f32_len, matmul_iq4_nl_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1034. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->a_l, "matmul_iq4_nl_f32_aligned_l", matmul_iq4_nl_f32_aligned_len, matmul_iq4_nl_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1035. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->a_m, "matmul_iq4_nl_f32_aligned_m", matmul_iq4_nl_f32_aligned_len, matmul_iq4_nl_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1036. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->a_s, "matmul_iq4_nl_f32_aligned_s", matmul_iq4_nl_f32_aligned_len, matmul_iq4_nl_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1037. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->l, "matmul_id_f32_l", matmul_id_f32_f32_len, matmul_id_f32_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1038. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->m, "matmul_id_f32_m", matmul_id_f32_f32_len, matmul_id_f32_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1039. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->s, "matmul_id_f32_s", matmul_id_f32_f32_len, matmul_id_f32_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1040. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->a_l, "matmul_id_f32_aligned_l", matmul_id_f32_f32_aligned_len, matmul_id_f32_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1041. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->a_m, "matmul_id_f32_aligned_m", matmul_id_f32_f32_aligned_len, matmul_id_f32_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1042. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->a_s, "matmul_id_f32_aligned_s", matmul_id_f32_f32_aligned_len, matmul_id_f32_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1043. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->l, "matmul_id_f16_l", matmul_id_f16_len, matmul_id_f16_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1044. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->m, "matmul_id_f16_m", matmul_id_f16_len, matmul_id_f16_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1045. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->s, "matmul_id_f16_s", matmul_id_f16_len, matmul_id_f16_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1046. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->a_l, "matmul_id_f16_aligned_l", matmul_id_f16_aligned_len, matmul_id_f16_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1047. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->a_m, "matmul_id_f16_aligned_m", matmul_id_f16_aligned_len, matmul_id_f16_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1048. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->a_s, "matmul_id_f16_aligned_s", matmul_id_f16_aligned_len, matmul_id_f16_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1049. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->l, "matmul_id_f16_f32_l", matmul_id_f16_f32_len, matmul_id_f16_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1050. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->m, "matmul_id_f16_f32_m", matmul_id_f16_f32_len, matmul_id_f16_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1051. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->s, "matmul_id_f16_f32_s", matmul_id_f16_f32_len, matmul_id_f16_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1052. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->a_l, "matmul_id_f16_f32_aligned_l", matmul_id_f16_f32_aligned_len, matmul_id_f16_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1053. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->a_m, "matmul_id_f16_f32_aligned_m", matmul_id_f16_f32_aligned_len, matmul_id_f16_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1054. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->a_s, "matmul_id_f16_f32_aligned_s", matmul_id_f16_f32_aligned_len, matmul_id_f16_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1055. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->l, "matmul_id_q4_0_f32_l", matmul_id_q4_0_f32_len, matmul_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1056. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->m, "matmul_id_q4_0_f32_m", matmul_id_q4_0_f32_len, matmul_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1057. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->s, "matmul_id_q4_0_f32_s", matmul_id_q4_0_f32_len, matmul_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1058. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_l, "matmul_id_q4_0_f32_aligned_l", matmul_id_q4_0_f32_aligned_len, matmul_id_q4_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1059. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_m, "matmul_id_q4_0_f32_aligned_m", matmul_id_q4_0_f32_aligned_len, matmul_id_q4_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1060. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_s, "matmul_id_q4_0_f32_aligned_s", matmul_id_q4_0_f32_aligned_len, matmul_id_q4_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1061. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->l, "matmul_id_q4_1_f32_l", matmul_id_q4_1_f32_len, matmul_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1062. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->m, "matmul_id_q4_1_f32_m", matmul_id_q4_1_f32_len, matmul_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1063. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->s, "matmul_id_q4_1_f32_s", matmul_id_q4_1_f32_len, matmul_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1064. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_l, "matmul_id_q4_1_f32_aligned_l", matmul_id_q4_1_f32_aligned_len, matmul_id_q4_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1065. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_m, "matmul_id_q4_1_f32_aligned_m", matmul_id_q4_1_f32_aligned_len, matmul_id_q4_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1066. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_s, "matmul_id_q4_1_f32_aligned_s", matmul_id_q4_1_f32_aligned_len, matmul_id_q4_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1067. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->l, "matmul_id_q5_0_f32_l", matmul_id_q5_0_f32_len, matmul_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1068. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->m, "matmul_id_q5_0_f32_m", matmul_id_q5_0_f32_len, matmul_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1069. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->s, "matmul_id_q5_0_f32_s", matmul_id_q5_0_f32_len, matmul_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1070. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_l, "matmul_id_q5_0_f32_aligned_l", matmul_id_q5_0_f32_aligned_len, matmul_id_q5_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1071. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_m, "matmul_id_q5_0_f32_aligned_m", matmul_id_q5_0_f32_aligned_len, matmul_id_q5_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1072. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_s, "matmul_id_q5_0_f32_aligned_s", matmul_id_q5_0_f32_aligned_len, matmul_id_q5_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1073. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->l, "matmul_id_q5_1_f32_l", matmul_id_q5_1_f32_len, matmul_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1074. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->m, "matmul_id_q5_1_f32_m", matmul_id_q5_1_f32_len, matmul_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1075. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->s, "matmul_id_q5_1_f32_s", matmul_id_q5_1_f32_len, matmul_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1076. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_l, "matmul_id_q5_1_f32_aligned_l", matmul_id_q5_1_f32_aligned_len, matmul_id_q5_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1077. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_m, "matmul_id_q5_1_f32_aligned_m", matmul_id_q5_1_f32_aligned_len, matmul_id_q5_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1078. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_s, "matmul_id_q5_1_f32_aligned_s", matmul_id_q5_1_f32_aligned_len, matmul_id_q5_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1079. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->l, "matmul_id_q8_0_f32_l", matmul_id_q8_0_f32_len, matmul_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1080. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->m, "matmul_id_q8_0_f32_m", matmul_id_q8_0_f32_len, matmul_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1081. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->s, "matmul_id_q8_0_f32_s", matmul_id_q8_0_f32_len, matmul_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1082. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_l, "matmul_id_q8_0_f32_aligned_l", matmul_id_q8_0_f32_aligned_len, matmul_id_q8_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1083. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_m, "matmul_id_q8_0_f32_aligned_m", matmul_id_q8_0_f32_aligned_len, matmul_id_q8_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1084. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_s, "matmul_id_q8_0_f32_aligned_s", matmul_id_q8_0_f32_aligned_len, matmul_id_q8_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1085. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->l, "matmul_id_q2_k_f32_l", matmul_id_q2_k_f32_len, matmul_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1086. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->m, "matmul_id_q2_k_f32_m", matmul_id_q2_k_f32_len, matmul_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1087. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->s, "matmul_id_q2_k_f32_s", matmul_id_q2_k_f32_len, matmul_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1088. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_l, "matmul_id_q2_k_f32_aligned_l", matmul_id_q2_k_f32_aligned_len, matmul_id_q2_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1089. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_m, "matmul_id_q2_k_f32_aligned_m", matmul_id_q2_k_f32_aligned_len, matmul_id_q2_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1090. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_s, "matmul_id_q2_k_f32_aligned_s", matmul_id_q2_k_f32_aligned_len, matmul_id_q2_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1091. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->l, "matmul_id_q3_k_f32_l", matmul_id_q3_k_f32_len, matmul_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1092. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->m, "matmul_id_q3_k_f32_m", matmul_id_q3_k_f32_len, matmul_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1093. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->s, "matmul_id_q3_k_f32_s", matmul_id_q3_k_f32_len, matmul_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1094. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_l, "matmul_id_q3_k_f32_aligned_l", matmul_id_q3_k_f32_aligned_len, matmul_id_q3_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1095. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_m, "matmul_id_q3_k_f32_aligned_m", matmul_id_q3_k_f32_aligned_len, matmul_id_q3_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1096. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_s, "matmul_id_q3_k_f32_aligned_s", matmul_id_q3_k_f32_aligned_len, matmul_id_q3_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1097. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->l, "matmul_id_q4_k_f32_l", matmul_id_q4_k_f32_len, matmul_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1098. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->m, "matmul_id_q4_k_f32_m", matmul_id_q4_k_f32_len, matmul_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1099. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->s, "matmul_id_q4_k_f32_s", matmul_id_q4_k_f32_len, matmul_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1100. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_l, "matmul_id_q4_k_f32_aligned_l", matmul_id_q4_k_f32_aligned_len, matmul_id_q4_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1101. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_m, "matmul_id_q4_k_f32_aligned_m", matmul_id_q4_k_f32_aligned_len, matmul_id_q4_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1102. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_s, "matmul_id_q4_k_f32_aligned_s", matmul_id_q4_k_f32_aligned_len, matmul_id_q4_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1103. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->l, "matmul_id_q5_k_f32_l", matmul_id_q5_k_f32_len, matmul_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1104. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->m, "matmul_id_q5_k_f32_m", matmul_id_q5_k_f32_len, matmul_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1105. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->s, "matmul_id_q5_k_f32_s", matmul_id_q5_k_f32_len, matmul_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1106. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_l, "matmul_id_q5_k_f32_aligned_l", matmul_id_q5_k_f32_aligned_len, matmul_id_q5_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1107. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_m, "matmul_id_q5_k_f32_aligned_m", matmul_id_q5_k_f32_aligned_len, matmul_id_q5_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1108. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_s, "matmul_id_q5_k_f32_aligned_s", matmul_id_q5_k_f32_aligned_len, matmul_id_q5_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1109. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->l, "matmul_id_q6_k_f32_l", matmul_id_q6_k_f32_len, matmul_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1110. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->m, "matmul_id_q6_k_f32_m", matmul_id_q6_k_f32_len, matmul_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1111. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->s, "matmul_id_q6_k_f32_s", matmul_id_q6_k_f32_len, matmul_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1112. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_l, "matmul_id_q6_k_f32_aligned_l", matmul_id_q6_k_f32_aligned_len, matmul_id_q6_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1113. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_m, "matmul_id_q6_k_f32_aligned_m", matmul_id_q6_k_f32_aligned_len, matmul_id_q6_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1114. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_s, "matmul_id_q6_k_f32_aligned_s", matmul_id_q6_k_f32_aligned_len, matmul_id_q6_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1115. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->l, "matmul_id_iq4_nl_f32_l", matmul_id_iq4_nl_f32_len, matmul_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1116. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->m, "matmul_id_iq4_nl_f32_m", matmul_id_iq4_nl_f32_len, matmul_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1117. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->s, "matmul_id_iq4_nl_f32_s", matmul_id_iq4_nl_f32_len, matmul_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1118. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->a_l, "matmul_id_iq4_nl_f32_aligned_l", matmul_id_iq4_nl_f32_aligned_len, matmul_id_iq4_nl_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1119. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->a_m, "matmul_id_iq4_nl_f32_aligned_m", matmul_id_iq4_nl_f32_aligned_len, matmul_id_iq4_nl_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1120. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->a_s, "matmul_id_iq4_nl_f32_aligned_s", matmul_id_iq4_nl_f32_aligned_len, matmul_id_iq4_nl_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1121. } else {
  1122. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->l, "matmul_f32_l", matmul_f32_f32_fp32_len, matmul_f32_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1123. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->m, "matmul_f32_m", matmul_f32_f32_fp32_len, matmul_f32_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1124. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->s, "matmul_f32_s", matmul_f32_f32_fp32_len, matmul_f32_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1125. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->a_l, "matmul_f32_aligned_l", matmul_f32_f32_aligned_fp32_len, matmul_f32_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1126. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->a_m, "matmul_f32_aligned_m", matmul_f32_f32_aligned_fp32_len, matmul_f32_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1127. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->a_s, "matmul_f32_aligned_s", matmul_f32_f32_aligned_fp32_len, matmul_f32_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1128. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->l, "matmul_f32_f16_l", matmul_f32_f16_fp32_len, matmul_f32_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1129. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->m, "matmul_f32_f16_m", matmul_f32_f16_fp32_len, matmul_f32_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1130. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->s, "matmul_f32_f16_s", matmul_f32_f16_fp32_len, matmul_f32_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1131. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->a_l, "matmul_f32_f16_aligned_l", matmul_f32_f16_aligned_fp32_len, matmul_f32_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1132. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->a_m, "matmul_f32_f16_aligned_m", matmul_f32_f16_aligned_fp32_len, matmul_f32_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1133. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->a_s, "matmul_f32_f16_aligned_s", matmul_f32_f16_aligned_fp32_len, matmul_f32_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1134. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->l, "matmul_f16_l", matmul_f16_fp32_len, matmul_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1135. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->m, "matmul_f16_m", matmul_f16_fp32_len, matmul_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1136. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->s, "matmul_f16_s", matmul_f16_fp32_len, matmul_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1137. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->a_l, "matmul_f16_aligned_l", matmul_f16_aligned_fp32_len, matmul_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1138. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->a_m, "matmul_f16_aligned_m", matmul_f16_aligned_fp32_len, matmul_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1139. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->a_s, "matmul_f16_aligned_s", matmul_f16_aligned_fp32_len, matmul_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1140. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->l, "matmul_f16_f32_l", matmul_f16_f32_fp32_len, matmul_f16_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1141. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->m, "matmul_f16_f32_m", matmul_f16_f32_fp32_len, matmul_f16_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1142. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->s, "matmul_f16_f32_s", matmul_f16_f32_fp32_len, matmul_f16_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1143. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->a_l, "matmul_f16_f32_aligned_l", matmul_f16_f32_aligned_fp32_len, matmul_f16_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1144. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->a_m, "matmul_f16_f32_aligned_m", matmul_f16_f32_aligned_fp32_len, matmul_f16_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1145. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->a_s, "matmul_f16_f32_aligned_s", matmul_f16_f32_aligned_fp32_len, matmul_f16_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1146. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->l, "matmul_q4_0_f32_l", matmul_q4_0_f32_fp32_len, matmul_q4_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1147. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->m, "matmul_q4_0_f32_m", matmul_q4_0_f32_fp32_len, matmul_q4_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1148. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->s, "matmul_q4_0_f32_s", matmul_q4_0_f32_fp32_len, matmul_q4_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1149. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_l, "matmul_q4_0_f32_aligned_l", matmul_q4_0_f32_aligned_fp32_len, matmul_q4_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1150. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_m, "matmul_q4_0_f32_aligned_m", matmul_q4_0_f32_aligned_fp32_len, matmul_q4_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1151. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_s, "matmul_q4_0_f32_aligned_s", matmul_q4_0_f32_aligned_fp32_len, matmul_q4_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1152. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->l, "matmul_q4_1_f32_l", matmul_q4_1_f32_fp32_len, matmul_q4_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1153. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->m, "matmul_q4_1_f32_m", matmul_q4_1_f32_fp32_len, matmul_q4_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1154. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->s, "matmul_q4_1_f32_s", matmul_q4_1_f32_fp32_len, matmul_q4_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1155. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_l, "matmul_q4_1_f32_aligned_l", matmul_q4_1_f32_aligned_fp32_len, matmul_q4_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1156. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_m, "matmul_q4_1_f32_aligned_m", matmul_q4_1_f32_aligned_fp32_len, matmul_q4_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1157. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_s, "matmul_q4_1_f32_aligned_s", matmul_q4_1_f32_aligned_fp32_len, matmul_q4_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1158. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->l, "matmul_q5_0_f32_l", matmul_q5_0_f32_fp32_len, matmul_q5_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1159. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->m, "matmul_q5_0_f32_m", matmul_q5_0_f32_fp32_len, matmul_q5_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1160. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->s, "matmul_q5_0_f32_s", matmul_q5_0_f32_fp32_len, matmul_q5_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1161. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_l, "matmul_q5_0_f32_aligned_l", matmul_q5_0_f32_aligned_fp32_len, matmul_q5_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1162. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_m, "matmul_q5_0_f32_aligned_m", matmul_q5_0_f32_aligned_fp32_len, matmul_q5_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1163. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_s, "matmul_q5_0_f32_aligned_s", matmul_q5_0_f32_aligned_fp32_len, matmul_q5_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1164. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->l, "matmul_q5_1_f32_l", matmul_q5_1_f32_fp32_len, matmul_q5_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1165. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->m, "matmul_q5_1_f32_m", matmul_q5_1_f32_fp32_len, matmul_q5_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1166. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->s, "matmul_q5_1_f32_s", matmul_q5_1_f32_fp32_len, matmul_q5_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1167. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_l, "matmul_q5_1_f32_aligned_l", matmul_q5_1_f32_aligned_fp32_len, matmul_q5_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1168. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_m, "matmul_q5_1_f32_aligned_m", matmul_q5_1_f32_aligned_fp32_len, matmul_q5_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1169. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_s, "matmul_q5_1_f32_aligned_s", matmul_q5_1_f32_aligned_fp32_len, matmul_q5_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1170. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->l, "matmul_q8_0_f32_l", matmul_q8_0_f32_fp32_len, matmul_q8_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1171. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->m, "matmul_q8_0_f32_m", matmul_q8_0_f32_fp32_len, matmul_q8_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1172. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->s, "matmul_q8_0_f32_s", matmul_q8_0_f32_fp32_len, matmul_q8_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1173. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_l, "matmul_q8_0_f32_aligned_l", matmul_q8_0_f32_aligned_fp32_len, matmul_q8_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1174. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_m, "matmul_q8_0_f32_aligned_m", matmul_q8_0_f32_aligned_fp32_len, matmul_q8_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1175. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_s, "matmul_q8_0_f32_aligned_s", matmul_q8_0_f32_aligned_fp32_len, matmul_q8_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1176. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->l, "matmul_q2_k_f32_l", matmul_q2_k_f32_fp32_len, matmul_q2_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1177. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->m, "matmul_q2_k_f32_m", matmul_q2_k_f32_fp32_len, matmul_q2_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1178. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->s, "matmul_q2_k_f32_s", matmul_q2_k_f32_fp32_len, matmul_q2_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1179. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_l, "matmul_q2_k_f32_aligned_l", matmul_q2_k_f32_aligned_fp32_len, matmul_q2_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1180. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_m, "matmul_q2_k_f32_aligned_m", matmul_q2_k_f32_aligned_fp32_len, matmul_q2_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1181. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_s, "matmul_q2_k_f32_aligned_s", matmul_q2_k_f32_aligned_fp32_len, matmul_q2_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1182. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->l, "matmul_q3_k_f32_l", matmul_q3_k_f32_fp32_len, matmul_q3_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1183. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->m, "matmul_q3_k_f32_m", matmul_q3_k_f32_fp32_len, matmul_q3_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1184. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->s, "matmul_q3_k_f32_s", matmul_q3_k_f32_fp32_len, matmul_q3_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1185. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_l, "matmul_q3_k_f32_aligned_l", matmul_q3_k_f32_aligned_fp32_len, matmul_q3_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1186. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_m, "matmul_q3_k_f32_aligned_m", matmul_q3_k_f32_aligned_fp32_len, matmul_q3_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1187. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_s, "matmul_q3_k_f32_aligned_s", matmul_q3_k_f32_aligned_fp32_len, matmul_q3_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1188. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->l, "matmul_q4_k_f32_l", matmul_q4_k_f32_fp32_len, matmul_q4_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1189. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->m, "matmul_q4_k_f32_m", matmul_q4_k_f32_fp32_len, matmul_q4_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1190. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->s, "matmul_q4_k_f32_s", matmul_q4_k_f32_fp32_len, matmul_q4_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1191. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_l, "matmul_q4_k_f32_aligned_l", matmul_q4_k_f32_aligned_fp32_len, matmul_q4_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1192. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_m, "matmul_q4_k_f32_aligned_m", matmul_q4_k_f32_aligned_fp32_len, matmul_q4_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1193. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_s, "matmul_q4_k_f32_aligned_s", matmul_q4_k_f32_aligned_fp32_len, matmul_q4_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1194. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->l, "matmul_q5_k_f32_l", matmul_q5_k_f32_fp32_len, matmul_q5_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1195. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->m, "matmul_q5_k_f32_m", matmul_q5_k_f32_fp32_len, matmul_q5_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1196. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->s, "matmul_q5_k_f32_s", matmul_q5_k_f32_fp32_len, matmul_q5_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1197. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_l, "matmul_q5_k_f32_aligned_l", matmul_q5_k_f32_aligned_fp32_len, matmul_q5_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1198. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_m, "matmul_q5_k_f32_aligned_m", matmul_q5_k_f32_aligned_fp32_len, matmul_q5_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1199. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_s, "matmul_q5_k_f32_aligned_s", matmul_q5_k_f32_aligned_fp32_len, matmul_q5_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1200. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->l, "matmul_q6_k_f32_l", matmul_q6_k_f32_fp32_len, matmul_q6_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1201. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->m, "matmul_q6_k_f32_m", matmul_q6_k_f32_fp32_len, matmul_q6_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1202. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->s, "matmul_q6_k_f32_s", matmul_q6_k_f32_fp32_len, matmul_q6_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1203. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_l, "matmul_q6_k_f32_aligned_l", matmul_q6_k_f32_aligned_fp32_len, matmul_q6_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1204. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_m, "matmul_q6_k_f32_aligned_m", matmul_q6_k_f32_aligned_fp32_len, matmul_q6_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1205. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_s, "matmul_q6_k_f32_aligned_s", matmul_q6_k_f32_aligned_fp32_len, matmul_q6_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1206. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->l, "matmul_iq4_nl_f32_l", matmul_iq4_nl_f32_fp32_len, matmul_iq4_nl_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1207. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->m, "matmul_iq4_nl_f32_m", matmul_iq4_nl_f32_fp32_len, matmul_iq4_nl_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1208. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->s, "matmul_iq4_nl_f32_s", matmul_iq4_nl_f32_fp32_len, matmul_iq4_nl_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1209. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->a_l, "matmul_iq4_nl_f32_aligned_l", matmul_iq4_nl_f32_aligned_fp32_len, matmul_iq4_nl_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1210. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->a_m, "matmul_iq4_nl_f32_aligned_m", matmul_iq4_nl_f32_aligned_fp32_len, matmul_iq4_nl_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1211. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->a_s, "matmul_iq4_nl_f32_aligned_s", matmul_iq4_nl_f32_aligned_fp32_len, matmul_iq4_nl_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1212. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->l, "matmul_id_f32_l", matmul_id_f32_f32_fp32_len, matmul_id_f32_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1213. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->m, "matmul_id_f32_m", matmul_id_f32_f32_fp32_len, matmul_id_f32_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1214. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->s, "matmul_id_f32_s", matmul_id_f32_f32_fp32_len, matmul_id_f32_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1215. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->a_l, "matmul_id_f32_aligned_l", matmul_id_f32_f32_aligned_fp32_len, matmul_id_f32_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1216. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->a_m, "matmul_id_f32_aligned_m", matmul_id_f32_f32_aligned_fp32_len, matmul_id_f32_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1217. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->a_s, "matmul_id_f32_aligned_s", matmul_id_f32_f32_aligned_fp32_len, matmul_id_f32_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1218. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->l, "matmul_id_f16_l", matmul_id_f16_fp32_len, matmul_id_f16_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1219. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->m, "matmul_id_f16_m", matmul_id_f16_fp32_len, matmul_id_f16_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1220. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->s, "matmul_id_f16_s", matmul_id_f16_fp32_len, matmul_id_f16_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1221. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->a_l, "matmul_id_f16_aligned_l", matmul_id_f16_aligned_fp32_len, matmul_id_f16_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1222. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->a_m, "matmul_id_f16_aligned_m", matmul_id_f16_aligned_fp32_len, matmul_id_f16_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1223. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->a_s, "matmul_id_f16_aligned_s", matmul_id_f16_aligned_fp32_len, matmul_id_f16_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1224. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->l, "matmul_id_f16_f32_l", matmul_id_f16_f32_fp32_len, matmul_id_f16_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1225. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->m, "matmul_id_f16_f32_m", matmul_id_f16_f32_fp32_len, matmul_id_f16_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1226. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->s, "matmul_id_f16_f32_s", matmul_id_f16_f32_fp32_len, matmul_id_f16_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1227. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->a_l, "matmul_id_f16_f32_aligned_l", matmul_id_f16_f32_aligned_fp32_len, matmul_id_f16_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1228. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->a_m, "matmul_id_f16_f32_aligned_m", matmul_id_f16_f32_aligned_fp32_len, matmul_id_f16_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1229. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->a_s, "matmul_id_f16_f32_aligned_s", matmul_id_f16_f32_aligned_fp32_len, matmul_id_f16_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1230. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->l, "matmul_id_q4_0_f32_l", matmul_id_q4_0_f32_fp32_len, matmul_id_q4_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1231. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->m, "matmul_id_q4_0_f32_m", matmul_id_q4_0_f32_fp32_len, matmul_id_q4_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1232. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->s, "matmul_id_q4_0_f32_s", matmul_id_q4_0_f32_fp32_len, matmul_id_q4_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1233. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_l, "matmul_id_q4_0_f32_aligned_l", matmul_id_q4_0_f32_aligned_fp32_len, matmul_id_q4_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1234. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_m, "matmul_id_q4_0_f32_aligned_m", matmul_id_q4_0_f32_aligned_fp32_len, matmul_id_q4_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1235. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_s, "matmul_id_q4_0_f32_aligned_s", matmul_id_q4_0_f32_aligned_fp32_len, matmul_id_q4_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1236. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->l, "matmul_id_q4_1_f32_l", matmul_id_q4_1_f32_fp32_len, matmul_id_q4_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1237. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->m, "matmul_id_q4_1_f32_m", matmul_id_q4_1_f32_fp32_len, matmul_id_q4_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1238. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->s, "matmul_id_q4_1_f32_s", matmul_id_q4_1_f32_fp32_len, matmul_id_q4_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1239. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_l, "matmul_id_q4_1_f32_aligned_l", matmul_id_q4_1_f32_aligned_fp32_len, matmul_id_q4_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1240. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_m, "matmul_id_q4_1_f32_aligned_m", matmul_id_q4_1_f32_aligned_fp32_len, matmul_id_q4_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1241. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_s, "matmul_id_q4_1_f32_aligned_s", matmul_id_q4_1_f32_aligned_fp32_len, matmul_id_q4_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1242. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->l, "matmul_id_q5_0_f32_l", matmul_id_q5_0_f32_fp32_len, matmul_id_q5_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1243. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->m, "matmul_id_q5_0_f32_m", matmul_id_q5_0_f32_fp32_len, matmul_id_q5_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1244. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->s, "matmul_id_q5_0_f32_s", matmul_id_q5_0_f32_fp32_len, matmul_id_q5_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1245. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_l, "matmul_id_q5_0_f32_aligned_l", matmul_id_q5_0_f32_aligned_fp32_len, matmul_id_q5_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1246. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_m, "matmul_id_q5_0_f32_aligned_m", matmul_id_q5_0_f32_aligned_fp32_len, matmul_id_q5_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1247. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_s, "matmul_id_q5_0_f32_aligned_s", matmul_id_q5_0_f32_aligned_fp32_len, matmul_id_q5_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1248. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->l, "matmul_id_q5_1_f32_l", matmul_id_q5_1_f32_fp32_len, matmul_id_q5_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1249. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->m, "matmul_id_q5_1_f32_m", matmul_id_q5_1_f32_fp32_len, matmul_id_q5_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1250. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->s, "matmul_id_q5_1_f32_s", matmul_id_q5_1_f32_fp32_len, matmul_id_q5_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1251. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_l, "matmul_id_q5_1_f32_aligned_l", matmul_id_q5_1_f32_aligned_fp32_len, matmul_id_q5_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1252. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_m, "matmul_id_q5_1_f32_aligned_m", matmul_id_q5_1_f32_aligned_fp32_len, matmul_id_q5_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1253. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_s, "matmul_id_q5_1_f32_aligned_s", matmul_id_q5_1_f32_aligned_fp32_len, matmul_id_q5_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1254. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->l, "matmul_id_q8_0_f32_l", matmul_id_q8_0_f32_fp32_len, matmul_id_q8_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1255. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->m, "matmul_id_q8_0_f32_m", matmul_id_q8_0_f32_fp32_len, matmul_id_q8_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1256. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->s, "matmul_id_q8_0_f32_s", matmul_id_q8_0_f32_fp32_len, matmul_id_q8_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1257. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_l, "matmul_id_q8_0_f32_aligned_l", matmul_id_q8_0_f32_aligned_fp32_len, matmul_id_q8_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1258. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_m, "matmul_id_q8_0_f32_aligned_m", matmul_id_q8_0_f32_aligned_fp32_len, matmul_id_q8_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1259. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_s, "matmul_id_q8_0_f32_aligned_s", matmul_id_q8_0_f32_aligned_fp32_len, matmul_id_q8_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1260. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->l, "matmul_id_q2_k_f32_l", matmul_id_q2_k_f32_fp32_len, matmul_id_q2_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1261. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->m, "matmul_id_q2_k_f32_m", matmul_id_q2_k_f32_fp32_len, matmul_id_q2_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1262. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->s, "matmul_id_q2_k_f32_s", matmul_id_q2_k_f32_fp32_len, matmul_id_q2_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1263. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_l, "matmul_id_q2_k_f32_aligned_l", matmul_id_q2_k_f32_aligned_fp32_len, matmul_id_q2_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1264. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_m, "matmul_id_q2_k_f32_aligned_m", matmul_id_q2_k_f32_aligned_fp32_len, matmul_id_q2_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1265. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_s, "matmul_id_q2_k_f32_aligned_s", matmul_id_q2_k_f32_aligned_fp32_len, matmul_id_q2_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1266. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->l, "matmul_id_q3_k_f32_l", matmul_id_q3_k_f32_fp32_len, matmul_id_q3_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1267. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->m, "matmul_id_q3_k_f32_m", matmul_id_q3_k_f32_fp32_len, matmul_id_q3_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1268. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->s, "matmul_id_q3_k_f32_s", matmul_id_q3_k_f32_fp32_len, matmul_id_q3_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1269. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_l, "matmul_id_q3_k_f32_aligned_l", matmul_id_q3_k_f32_aligned_fp32_len, matmul_id_q3_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1270. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_m, "matmul_id_q3_k_f32_aligned_m", matmul_id_q3_k_f32_aligned_fp32_len, matmul_id_q3_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1271. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_s, "matmul_id_q3_k_f32_aligned_s", matmul_id_q3_k_f32_aligned_fp32_len, matmul_id_q3_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1272. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->l, "matmul_id_q4_k_f32_l", matmul_id_q4_k_f32_fp32_len, matmul_id_q4_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1273. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->m, "matmul_id_q4_k_f32_m", matmul_id_q4_k_f32_fp32_len, matmul_id_q4_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1274. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->s, "matmul_id_q4_k_f32_s", matmul_id_q4_k_f32_fp32_len, matmul_id_q4_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1275. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_l, "matmul_id_q4_k_f32_aligned_l", matmul_id_q4_k_f32_aligned_fp32_len, matmul_id_q4_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1276. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_m, "matmul_id_q4_k_f32_aligned_m", matmul_id_q4_k_f32_aligned_fp32_len, matmul_id_q4_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1277. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_s, "matmul_id_q4_k_f32_aligned_s", matmul_id_q4_k_f32_aligned_fp32_len, matmul_id_q4_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1278. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->l, "matmul_id_q5_k_f32_l", matmul_id_q5_k_f32_fp32_len, matmul_id_q5_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1279. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->m, "matmul_id_q5_k_f32_m", matmul_id_q5_k_f32_fp32_len, matmul_id_q5_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1280. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->s, "matmul_id_q5_k_f32_s", matmul_id_q5_k_f32_fp32_len, matmul_id_q5_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1281. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_l, "matmul_id_q5_k_f32_aligned_l", matmul_id_q5_k_f32_aligned_fp32_len, matmul_id_q5_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1282. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_m, "matmul_id_q5_k_f32_aligned_m", matmul_id_q5_k_f32_aligned_fp32_len, matmul_id_q5_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1283. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_s, "matmul_id_q5_k_f32_aligned_s", matmul_id_q5_k_f32_aligned_fp32_len, matmul_id_q5_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1284. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->l, "matmul_id_q6_k_f32_l", matmul_id_q6_k_f32_fp32_len, matmul_id_q6_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1285. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->m, "matmul_id_q6_k_f32_m", matmul_id_q6_k_f32_fp32_len, matmul_id_q6_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1286. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->s, "matmul_id_q6_k_f32_s", matmul_id_q6_k_f32_fp32_len, matmul_id_q6_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1287. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_l, "matmul_id_q6_k_f32_aligned_l", matmul_id_q6_k_f32_aligned_fp32_len, matmul_id_q6_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1288. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_m, "matmul_id_q6_k_f32_aligned_m", matmul_id_q6_k_f32_aligned_fp32_len, matmul_id_q6_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1289. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_s, "matmul_id_q6_k_f32_aligned_s", matmul_id_q6_k_f32_aligned_fp32_len, matmul_id_q6_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1290. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->l, "matmul_id_iq4_nl_f32_l", matmul_id_iq4_nl_f32_fp32_len, matmul_id_iq4_nl_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1291. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->m, "matmul_id_iq4_nl_f32_m", matmul_id_iq4_nl_f32_fp32_len, matmul_id_iq4_nl_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1292. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->s, "matmul_id_iq4_nl_f32_s", matmul_id_iq4_nl_f32_fp32_len, matmul_id_iq4_nl_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1293. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->a_l, "matmul_id_iq4_nl_f32_aligned_l", matmul_id_iq4_nl_f32_aligned_fp32_len, matmul_id_iq4_nl_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1294. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->a_m, "matmul_id_iq4_nl_f32_aligned_m", matmul_id_iq4_nl_f32_aligned_fp32_len, matmul_id_iq4_nl_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1295. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->a_s, "matmul_id_iq4_nl_f32_aligned_s", matmul_id_iq4_nl_f32_aligned_fp32_len, matmul_id_iq4_nl_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1296. }
  1297. // mul mat vec
  1298. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F32 ], "mul_mat_vec_f32_f32_f32", mul_mat_vec_f32_f32_f32_len, mul_mat_vec_f32_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1299. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f32_f32", mul_mat_vec_f16_f32_f32_len, mul_mat_vec_f16_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1300. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f32_f32", mul_mat_vec_q4_0_f32_f32_len, mul_mat_vec_q4_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1301. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f32_f32", mul_mat_vec_q4_1_f32_f32_len, mul_mat_vec_q4_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1302. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f32_f32", mul_mat_vec_q5_0_f32_f32_len, mul_mat_vec_q5_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1303. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f32_f32", mul_mat_vec_q5_1_f32_f32_len, mul_mat_vec_q5_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1304. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f32_f32", mul_mat_vec_q8_0_f32_f32_len, mul_mat_vec_q8_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1305. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f32_f32", mul_mat_vec_q2_k_f32_f32_len, mul_mat_vec_q2_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1306. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f32_f32", mul_mat_vec_q3_k_f32_f32_len, mul_mat_vec_q3_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1307. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f32_f32", mul_mat_vec_q4_k_f32_f32_len, mul_mat_vec_q4_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1308. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f32_f32", mul_mat_vec_q5_k_f32_f32_len, mul_mat_vec_q5_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1309. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f32_f32", mul_mat_vec_q6_k_f32_f32_len, mul_mat_vec_q6_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1310. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f32_f32", mul_mat_vec_iq4_nl_f32_f32_len, mul_mat_vec_iq4_nl_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1311. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F32 ], "mul_mat_vec_f32_f16_f32", mul_mat_vec_f32_f16_f32_len, mul_mat_vec_f32_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1312. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f16_f32", mul_mat_vec_f16_f16_f32_len, mul_mat_vec_f16_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1313. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f16_f32", mul_mat_vec_q4_0_f16_f32_len, mul_mat_vec_q4_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1314. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f16_f32", mul_mat_vec_q4_1_f16_f32_len, mul_mat_vec_q4_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1315. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f16_f32", mul_mat_vec_q5_0_f16_f32_len, mul_mat_vec_q5_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1316. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f16_f32", mul_mat_vec_q5_1_f16_f32_len, mul_mat_vec_q5_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1317. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f16_f32", mul_mat_vec_q8_0_f16_f32_len, mul_mat_vec_q8_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1318. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f16_f32", mul_mat_vec_q2_k_f16_f32_len, mul_mat_vec_q2_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1319. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f16_f32", mul_mat_vec_q3_k_f16_f32_len, mul_mat_vec_q3_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1320. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f16_f32", mul_mat_vec_q4_k_f16_f32_len, mul_mat_vec_q4_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1321. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f16_f32", mul_mat_vec_q5_k_f16_f32_len, mul_mat_vec_q5_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1322. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f16_f32", mul_mat_vec_q6_k_f16_f32_len, mul_mat_vec_q6_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1323. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f16_f32", mul_mat_vec_iq4_nl_f16_f32_len, mul_mat_vec_iq4_nl_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1324. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F32 ], "mul_mat_vec_id_f32_f32", mul_mat_vec_id_f32_f32_len, mul_mat_vec_id_f32_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1325. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F16 ], "mul_mat_vec_id_f16_f32", mul_mat_vec_id_f16_f32_len, mul_mat_vec_id_f16_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1326. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_0], "mul_mat_vec_id_q4_0_f32", mul_mat_vec_id_q4_0_f32_len, mul_mat_vec_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1327. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_1], "mul_mat_vec_id_q4_1_f32", mul_mat_vec_id_q4_1_f32_len, mul_mat_vec_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1328. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_0], "mul_mat_vec_id_q5_0_f32", mul_mat_vec_id_q5_0_f32_len, mul_mat_vec_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1329. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_1], "mul_mat_vec_id_q5_1_f32", mul_mat_vec_id_q5_1_f32_len, mul_mat_vec_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1330. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q8_0], "mul_mat_vec_id_q8_0_f32", mul_mat_vec_id_q8_0_f32_len, mul_mat_vec_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1331. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q2_K], "mul_mat_vec_id_q2_k_f32", mul_mat_vec_id_q2_k_f32_len, mul_mat_vec_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1332. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q3_K], "mul_mat_vec_id_q3_k_f32", mul_mat_vec_id_q3_k_f32_len, mul_mat_vec_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1333. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_K], "mul_mat_vec_id_q4_k_f32", mul_mat_vec_id_q4_k_f32_len, mul_mat_vec_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1334. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_K], "mul_mat_vec_id_q5_k_f32", mul_mat_vec_id_q5_k_f32_len, mul_mat_vec_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1335. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q6_K], "mul_mat_vec_id_q6_k_f32", mul_mat_vec_id_q6_k_f32_len, mul_mat_vec_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1336. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_id_iq4_nl_f32", mul_mat_vec_id_iq4_nl_f32_len, mul_mat_vec_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1337. // dequant shaders
  1338. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_F32 ], "f32_to_f16", dequant_f32_len, dequant_f32_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1339. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_0], "dequant_q4_0", dequant_q4_0_len, dequant_q4_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1340. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_1], "dequant_q4_1", dequant_q4_1_len, dequant_q4_1_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1341. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_0], "dequant_q5_0", dequant_q5_0_len, dequant_q5_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1342. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_1], "dequant_q5_1", dequant_q5_1_len, dequant_q5_1_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1343. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q8_0], "dequant_q8_0", dequant_q8_0_len, dequant_q8_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1344. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q2_K], "dequant_q2_k", dequant_q2_k_len, dequant_q2_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  1345. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q3_K], "dequant_q3_k", dequant_q3_k_len, dequant_q3_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  1346. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_K], "dequant_q4_k", dequant_q4_k_len, dequant_q4_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  1347. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_K], "dequant_q5_k", dequant_q5_k_len, dequant_q5_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  1348. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q6_K], "dequant_q6_k", dequant_q6_k_len, dequant_q6_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  1349. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ4_NL], "dequant_iq4_nl", dequant_iq4_nl_len, dequant_iq4_nl_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1350. // get_rows
  1351. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_F32 ], "get_rows_f32", get_rows_f32_len, get_rows_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  1352. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_F16 ], "get_rows_f16", get_rows_f16_len, get_rows_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  1353. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q4_0], "get_rows_q4_0", get_rows_q4_0_len, get_rows_q4_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1354. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q4_1], "get_rows_q4_1", get_rows_q4_1_len, get_rows_q4_1_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1355. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q5_0], "get_rows_q5_0", get_rows_q5_0_len, get_rows_q5_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1356. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q5_1], "get_rows_q5_1", get_rows_q5_1_len, get_rows_q5_1_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1357. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q8_0], "get_rows_q8_0", get_rows_q8_0_len, get_rows_q8_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1358. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl", get_rows_iq4_nl_len, get_rows_iq4_nl_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1359. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_F32 ], "get_rows_f32_f32", get_rows_f32_f32_len, get_rows_f32_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  1360. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_F16 ], "get_rows_f16_f32", get_rows_f16_f32_len, get_rows_f16_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  1361. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q4_0], "get_rows_q4_0_f32", get_rows_q4_0_f32_len, get_rows_q4_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1362. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q4_1], "get_rows_q4_1_f32", get_rows_q4_1_f32_len, get_rows_q4_1_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1363. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q5_0], "get_rows_q5_0_f32", get_rows_q5_0_f32_len, get_rows_q5_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1364. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q5_1], "get_rows_q5_1_f32", get_rows_q5_1_f32_len, get_rows_q5_1_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1365. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q8_0], "get_rows_q8_0_f32", get_rows_q8_0_f32_len, get_rows_q8_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1366. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl_f32", get_rows_iq4_nl_f32_len, get_rows_iq4_nl_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1367. ggml_vk_create_pipeline(device, device->pipeline_matmul_split_k_reduce, "split_k_reduce", split_k_reduce_len, split_k_reduce_data, "main", 2, 2 * sizeof(uint32_t), {256, 1, 1}, {}, 1);
  1368. ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_p021_f16_f32, "mul_mat_vec_p021_f16_f32", mul_mat_vec_p021_f16_f32_len, mul_mat_vec_p021_f16_f32_data, "main", 3, 6 * sizeof(uint32_t), {1, 1, 1}, {}, 1);
  1369. ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_nc_f16_f32, "mul_mat_vec_nc_f16_f32", mul_mat_vec_nc_f16_f32_len, mul_mat_vec_nc_f16_f32_data, "main", 3, 7 * sizeof(uint32_t), {1, 1, 1}, {}, 1);
  1370. ggml_vk_create_pipeline(device, device->pipeline_norm_f32, "norm_f32", norm_f32_len, norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  1371. ggml_vk_create_pipeline(device, device->pipeline_group_norm_f32, "group_norm_f32", group_norm_f32_len, group_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  1372. ggml_vk_create_pipeline(device, device->pipeline_rms_norm_f32, "rms_norm_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  1373. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_f32, "cpy_f32_f32", cpy_f32_f32_len, cpy_f32_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1374. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_f16, "cpy_f32_f16", cpy_f32_f16_len, cpy_f32_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1375. ggml_vk_create_pipeline(device, device->pipeline_cpy_f16_f16, "cpy_f16_f16", cpy_f16_f16_len, cpy_f16_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1376. ggml_vk_create_pipeline(device, device->pipeline_add_f32, "add_f32", add_f32_len, add_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1377. ggml_vk_create_pipeline(device, device->pipeline_add_f16_f32_f16, "add_f16_f32_f16", add_f16_f32_f16_len, add_f16_f32_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1378. ggml_vk_create_pipeline(device, device->pipeline_mul_f32, "mul_f32", mul_f32_len, mul_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1379. ggml_vk_create_pipeline(device, device->pipeline_div_f32, "div_f32", div_f32_len, div_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1380. ggml_vk_create_pipeline(device, device->pipeline_concat_f32, "concat_f32", concat_f32_len, concat_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1381. ggml_vk_create_pipeline(device, device->pipeline_concat_f16, "concat_f16", concat_f16_len, concat_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1382. ggml_vk_create_pipeline(device, device->pipeline_concat_i32, "concat_i32", concat_i32_len, concat_i32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1383. ggml_vk_create_pipeline(device, device->pipeline_upscale_f32, "upscale_f32", upscale_f32_len, upscale_f32_data, "main", 2, sizeof(vk_op_upscale_push_constants), {512, 1, 1}, {}, 1);
  1384. ggml_vk_create_pipeline(device, device->pipeline_scale_f32, "scale_f32", scale_f32_len, scale_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1385. ggml_vk_create_pipeline(device, device->pipeline_sqr_f32, "sqr_f32", sqr_f32_len, sqr_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1386. ggml_vk_create_pipeline(device, device->pipeline_clamp_f32, "clamp_f32", clamp_f32_len, clamp_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1387. ggml_vk_create_pipeline(device, device->pipeline_pad_f32, "pad_f32", pad_f32_len, pad_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1388. ggml_vk_create_pipeline(device, device->pipeline_gelu_f32, "gelu_f32", gelu_f32_len, gelu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1389. ggml_vk_create_pipeline(device, device->pipeline_gelu_quick_f32, "gelu_quick_f32", gelu_quick_f32_len, gelu_quick_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1390. ggml_vk_create_pipeline(device, device->pipeline_silu_f32, "silu_f32", silu_f32_len, silu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1391. ggml_vk_create_pipeline(device, device->pipeline_relu_f32, "relu_f32", relu_f32_len, relu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1392. ggml_vk_create_pipeline(device, device->pipeline_leaky_relu_f32, "leaky_relu_f32", leaky_relu_f32_len, leaky_relu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1393. ggml_vk_create_pipeline(device, device->pipeline_tanh_f32, "tanh_f32", tanh_f32_len, tanh_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1394. ggml_vk_create_pipeline(device, device->pipeline_diag_mask_inf_f32, "diag_mask_inf_f32", diag_mask_inf_f32_len, diag_mask_inf_f32_data, "main", 2, sizeof(vk_op_diag_mask_push_constants), {512, 1, 1}, {}, 1);
  1395. ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32, "soft_max_f32", soft_max_f32_len, soft_max_f32_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, {}, 1);
  1396. ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_f16, "soft_max_f32_f16", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, {}, 1);
  1397. ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f32, "rope_norm_f32", rope_norm_f32_len, rope_norm_f32_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  1398. ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f16, "rope_norm_f16", rope_norm_f16_len, rope_norm_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  1399. ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f32, "rope_neox_f32", rope_neox_f32_len, rope_neox_f32_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  1400. ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f16, "rope_neox_f16", rope_neox_f16_len, rope_neox_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  1401. ggml_vk_create_pipeline(device, device->pipeline_argsort_f32, "argsort_f32", argsort_f32_len, argsort_f32_data, "main", 2, sizeof(vk_op_argsort_push_constants), {1024, 1, 1}, {}, 1);
  1402. ggml_vk_create_pipeline(device, device->pipeline_sum_rows_f32, "sum_rows_f32", sum_rows_f32_len, sum_rows_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1403. ggml_vk_create_pipeline(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32_len, im2col_f32_data, "main", 2, sizeof(vk_op_im2col_push_constants), {256, 1, 1}, {}, 1);
  1404. ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_len, im2col_f32_f16_data, "main", 2, sizeof(vk_op_im2col_push_constants), {256, 1, 1}, {}, 1);
  1405. ggml_vk_create_pipeline(device, device->pipeline_timestep_embedding_f32, "timestep_embedding_f32", timestep_embedding_f32_len, timestep_embedding_f32_data, "main", 2, sizeof(vk_op_timestep_embedding_push_constants), {256, 1, 1}, {}, 1);
  1406. }
  1407. static vk_device ggml_vk_get_device(size_t idx) {
  1408. VK_LOG_DEBUG("ggml_vk_get_device(" << idx << ")");
  1409. if (vk_instance.devices[idx] == nullptr) {
  1410. VK_LOG_DEBUG("Initializing new vk_device");
  1411. vk_device device = std::make_shared<vk_device_struct>();
  1412. vk_instance.devices[idx] = device;
  1413. #ifdef GGML_VULKAN_MEMORY_DEBUG
  1414. device->memory_logger = std::unique_ptr<vk_memory_logger>(new vk_memory_logger());
  1415. #endif
  1416. size_t dev_num = vk_instance.device_indices[idx];
  1417. std::vector<vk::PhysicalDevice> physical_devices = vk_instance.instance.enumeratePhysicalDevices();
  1418. if (dev_num >= physical_devices.size()) {
  1419. std::cerr << "ggml_vulkan: Device with index " << dev_num << " does not exist." << std::endl;
  1420. throw std::runtime_error("Device not found");
  1421. }
  1422. device->physical_device = physical_devices[dev_num];
  1423. const std::vector<vk::ExtensionProperties> ext_props = device->physical_device.enumerateDeviceExtensionProperties();
  1424. bool maintenance4_support = false;
  1425. // Check if maintenance4 is supported
  1426. for (const auto& properties : ext_props) {
  1427. if (strcmp("VK_KHR_maintenance4", properties.extensionName) == 0) {
  1428. maintenance4_support = true;
  1429. }
  1430. }
  1431. vk::PhysicalDeviceProperties2 props2;
  1432. vk::PhysicalDeviceMaintenance3Properties props3;
  1433. vk::PhysicalDeviceMaintenance4Properties props4;
  1434. vk::PhysicalDeviceSubgroupProperties subgroup_props;
  1435. props2.pNext = &props3;
  1436. props3.pNext = &subgroup_props;
  1437. if (maintenance4_support) {
  1438. subgroup_props.pNext = &props4;
  1439. }
  1440. device->physical_device.getProperties2(&props2);
  1441. device->properties = props2.properties;
  1442. const char* GGML_VK_FORCE_MAX_ALLOCATION_SIZE = getenv("GGML_VK_FORCE_MAX_ALLOCATION_SIZE");
  1443. if (GGML_VK_FORCE_MAX_ALLOCATION_SIZE != nullptr) {
  1444. device->max_memory_allocation_size = std::stoi(GGML_VK_FORCE_MAX_ALLOCATION_SIZE);
  1445. } else if (maintenance4_support) {
  1446. device->max_memory_allocation_size = std::min(props3.maxMemoryAllocationSize, props4.maxBufferSize);
  1447. } else {
  1448. device->max_memory_allocation_size = props3.maxMemoryAllocationSize;
  1449. }
  1450. device->vendor_id = device->properties.vendorID;
  1451. device->subgroup_size = subgroup_props.subgroupSize;
  1452. device->uma = device->properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
  1453. bool fp16_storage = false;
  1454. bool fp16_compute = false;
  1455. for (const auto& properties : ext_props) {
  1456. if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) {
  1457. fp16_storage = true;
  1458. } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
  1459. fp16_compute = true;
  1460. }
  1461. }
  1462. const char* GGML_VK_DISABLE_F16 = getenv("GGML_VK_DISABLE_F16");
  1463. const bool force_disable_f16 = GGML_VK_DISABLE_F16 != nullptr;
  1464. device->fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
  1465. std::vector<vk::QueueFamilyProperties> queue_family_props = device->physical_device.getQueueFamilyProperties();
  1466. // Try to find a non-graphics compute queue and transfer-focused queues
  1467. const uint32_t compute_queue_family_index = ggml_vk_find_queue_family_index(queue_family_props, vk::QueueFlagBits::eCompute, vk::QueueFlagBits::eGraphics, -1, 1);
  1468. const uint32_t transfer_queue_family_index = ggml_vk_find_queue_family_index(queue_family_props, vk::QueueFlagBits::eTransfer, vk::QueueFlagBits::eCompute | vk::QueueFlagBits::eGraphics, compute_queue_family_index, 1);
  1469. const float priorities[] = { 1.0f, 1.0f };
  1470. device->single_queue = compute_queue_family_index == transfer_queue_family_index && queue_family_props[compute_queue_family_index].queueCount == 1;
  1471. std::vector<vk::DeviceQueueCreateInfo> device_queue_create_infos;
  1472. if (compute_queue_family_index != transfer_queue_family_index) {
  1473. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, priorities});
  1474. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), transfer_queue_family_index, 1, priorities + 1});
  1475. } else if(!device->single_queue) {
  1476. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 2, priorities});
  1477. } else {
  1478. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, priorities});
  1479. }
  1480. vk::DeviceCreateInfo device_create_info;
  1481. std::vector<const char *> device_extensions;
  1482. vk::PhysicalDeviceFeatures device_features = device->physical_device.getFeatures();
  1483. VkPhysicalDeviceFeatures2 device_features2;
  1484. device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
  1485. device_features2.pNext = nullptr;
  1486. device_features2.features = (VkPhysicalDeviceFeatures)device_features;
  1487. VkPhysicalDeviceVulkan11Features vk11_features;
  1488. vk11_features.pNext = nullptr;
  1489. vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
  1490. device_features2.pNext = &vk11_features;
  1491. VkPhysicalDeviceVulkan12Features vk12_features;
  1492. vk12_features.pNext = nullptr;
  1493. vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
  1494. vk11_features.pNext = &vk12_features;
  1495. vkGetPhysicalDeviceFeatures2(device->physical_device, &device_features2);
  1496. device->fp16 = device->fp16 && vk12_features.shaderFloat16;
  1497. if (!vk11_features.storageBuffer16BitAccess) {
  1498. std::cerr << "ggml_vulkan: device " << GGML_VK_NAME << idx << " does not support 16-bit storage." << std::endl;
  1499. throw std::runtime_error("Unsupported device");
  1500. }
  1501. device_extensions.push_back("VK_KHR_16bit_storage");
  1502. #ifdef GGML_VULKAN_VALIDATE
  1503. device_extensions.push_back("VK_KHR_shader_non_semantic_info");
  1504. #endif
  1505. if (device->fp16) {
  1506. device_extensions.push_back("VK_KHR_shader_float16_int8");
  1507. }
  1508. device->name = device->properties.deviceName.data();
  1509. device_create_info = {
  1510. vk::DeviceCreateFlags(),
  1511. device_queue_create_infos,
  1512. {},
  1513. device_extensions
  1514. };
  1515. device_create_info.setPNext(&device_features2);
  1516. device->device = device->physical_device.createDevice(device_create_info);
  1517. device->descriptor_set_mode = VK_DEVICE_DESCRIPTOR_POOL_MODE_UNKNOWN;
  1518. // Queues
  1519. ggml_vk_create_queue(device, device->compute_queue, compute_queue_family_index, 0, { vk::PipelineStageFlagBits::eComputeShader | vk::PipelineStageFlagBits::eTransfer });
  1520. // Shaders
  1521. ggml_vk_load_shaders(device);
  1522. if (!device->single_queue) {
  1523. const uint32_t transfer_queue_index = compute_queue_family_index == transfer_queue_family_index ? 1 : 0;
  1524. ggml_vk_create_queue(device, device->transfer_queue, transfer_queue_family_index, transfer_queue_index, { vk::PipelineStageFlagBits::eTransfer });
  1525. } else {
  1526. // TODO: Use pointer or reference to avoid copy
  1527. device->transfer_queue = device->compute_queue;
  1528. }
  1529. device->buffer_type = {
  1530. /* .iface = */ ggml_backend_vk_buffer_type_interface,
  1531. /* .context = */ new ggml_backend_vk_buffer_type_context{ device->name, device },
  1532. };
  1533. device->fence = device->device.createFence({});
  1534. device->idx = idx;
  1535. return device;
  1536. }
  1537. return vk_instance.devices[idx];
  1538. }
  1539. static void ggml_vk_print_gpu_info(size_t idx) {
  1540. GGML_ASSERT(idx < vk_instance.device_indices.size());
  1541. size_t dev_num = vk_instance.device_indices[idx];
  1542. VK_LOG_DEBUG("ggml_vk_print_gpu_info(" << dev_num << ")");
  1543. GGML_ASSERT(vk_instance_initialized);
  1544. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  1545. if (dev_num >= devices.size()) {
  1546. std::cerr << "ggml_vulkan: Device with index " << dev_num << " does not exist." << std::endl;
  1547. throw std::runtime_error("Device not found");
  1548. }
  1549. vk::PhysicalDevice physical_device = devices[dev_num];
  1550. std::vector<vk::ExtensionProperties> ext_props = physical_device.enumerateDeviceExtensionProperties();
  1551. vk::PhysicalDeviceProperties2 props2;
  1552. vk::PhysicalDeviceMaintenance3Properties props3;
  1553. vk::PhysicalDeviceSubgroupProperties subgroup_props;
  1554. vk::PhysicalDeviceDriverProperties driver_props;
  1555. props2.pNext = &props3;
  1556. props3.pNext = &subgroup_props;
  1557. subgroup_props.pNext = &driver_props;
  1558. physical_device.getProperties2(&props2);
  1559. const size_t subgroup_size = subgroup_props.subgroupSize;
  1560. const bool uma = props2.properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
  1561. bool fp16_storage = false;
  1562. bool fp16_compute = false;
  1563. for (auto properties : ext_props) {
  1564. if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) {
  1565. fp16_storage = true;
  1566. } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
  1567. fp16_compute = true;
  1568. }
  1569. }
  1570. const char* GGML_VK_DISABLE_F16 = getenv("GGML_VK_DISABLE_F16");
  1571. bool force_disable_f16 = GGML_VK_DISABLE_F16 != nullptr;
  1572. bool fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
  1573. vk::PhysicalDeviceFeatures device_features = physical_device.getFeatures();
  1574. VkPhysicalDeviceFeatures2 device_features2;
  1575. device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
  1576. device_features2.pNext = nullptr;
  1577. device_features2.features = (VkPhysicalDeviceFeatures)device_features;
  1578. VkPhysicalDeviceVulkan11Features vk11_features;
  1579. vk11_features.pNext = nullptr;
  1580. vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
  1581. device_features2.pNext = &vk11_features;
  1582. VkPhysicalDeviceVulkan12Features vk12_features;
  1583. vk12_features.pNext = nullptr;
  1584. vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
  1585. vk11_features.pNext = &vk12_features;
  1586. vkGetPhysicalDeviceFeatures2(physical_device, &device_features2);
  1587. fp16 = fp16 && vk12_features.shaderFloat16;
  1588. std::string device_name = props2.properties.deviceName.data();
  1589. std::cerr << GGML_VK_NAME << idx << ": " << device_name << " (" << driver_props.driverName << ") | uma: " << uma << " | fp16: " << fp16 << " | warp size: " << subgroup_size << std::endl;
  1590. if (props2.properties.deviceType == vk::PhysicalDeviceType::eCpu) {
  1591. std::cerr << "ggml_vulkan: Warning: Device type is CPU. This is probably not the device you want." << std::endl;
  1592. }
  1593. }
  1594. static bool ggml_vk_instance_validation_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions);
  1595. static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions);
  1596. void ggml_vk_instance_init() {
  1597. if (vk_instance_initialized) {
  1598. return;
  1599. }
  1600. VK_LOG_DEBUG("ggml_vk_instance_init()");
  1601. vk_instance_initialized = true;
  1602. vk::ApplicationInfo app_info{ "ggml-vulkan", 1, nullptr, 0, VK_API_VERSION };
  1603. const std::vector<vk::ExtensionProperties> instance_extensions = vk::enumerateInstanceExtensionProperties();
  1604. const bool validation_ext = ggml_vk_instance_validation_ext_available(instance_extensions);
  1605. #ifdef __APPLE__
  1606. const bool portability_enumeration_ext = ggml_vk_instance_portability_enumeration_ext_available(instance_extensions);
  1607. #endif
  1608. std::vector<const char*> layers;
  1609. if (validation_ext) {
  1610. layers.push_back("VK_LAYER_KHRONOS_validation");
  1611. }
  1612. std::vector<const char*> extensions;
  1613. if (validation_ext) {
  1614. extensions.push_back("VK_EXT_validation_features");
  1615. }
  1616. #ifdef __APPLE__
  1617. if (portability_enumeration_ext) {
  1618. extensions.push_back("VK_KHR_portability_enumeration");
  1619. }
  1620. #endif
  1621. vk::InstanceCreateInfo instance_create_info(vk::InstanceCreateFlags{}, &app_info, layers, extensions);
  1622. #ifdef __APPLE__
  1623. if (portability_enumeration_ext) {
  1624. instance_create_info.flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
  1625. }
  1626. #endif
  1627. std::vector<vk::ValidationFeatureEnableEXT> features_enable;
  1628. vk::ValidationFeaturesEXT validation_features;
  1629. if (validation_ext) {
  1630. features_enable = { vk::ValidationFeatureEnableEXT::eBestPractices };
  1631. validation_features = {
  1632. features_enable,
  1633. {},
  1634. };
  1635. validation_features.setPNext(nullptr);
  1636. instance_create_info.setPNext(&validation_features);
  1637. std::cerr << "ggml_vulkan: Validation layers enabled" << std::endl;
  1638. }
  1639. vk_instance.instance = vk::createInstance(instance_create_info);
  1640. size_t num_available_devices = vk_instance.instance.enumeratePhysicalDevices().size();
  1641. // Emulate behavior of CUDA_VISIBLE_DEVICES for Vulkan
  1642. char * devices_env = getenv("GGML_VK_VISIBLE_DEVICES");
  1643. if (devices_env != nullptr) {
  1644. std::string devices(devices_env);
  1645. std::replace(devices.begin(), devices.end(), ',', ' ');
  1646. std::stringstream ss(devices);
  1647. size_t tmp;
  1648. while (ss >> tmp) {
  1649. if(tmp >= num_available_devices) {
  1650. std::cerr << "ggml_vulkan: Invalid device index " << tmp << " in GGML_VK_VISIBLE_DEVICES." << std::endl;
  1651. throw std::runtime_error("Invalid Vulkan device index");
  1652. }
  1653. vk_instance.device_indices.push_back(tmp);
  1654. }
  1655. } else {
  1656. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  1657. // Make sure at least one device exists
  1658. if (devices.empty()) {
  1659. std::cerr << "ggml_vulkan: Error: No devices found." << std::endl;
  1660. GGML_ABORT("fatal error");
  1661. }
  1662. // Default to using all dedicated GPUs
  1663. for (size_t i = 0; i < devices.size(); i++) {
  1664. vk::PhysicalDeviceProperties2 new_props;
  1665. vk::PhysicalDeviceDriverProperties new_driver;
  1666. vk::PhysicalDeviceIDProperties new_id;
  1667. new_props.pNext = &new_driver;
  1668. new_driver.pNext = &new_id;
  1669. devices[i].getProperties2(&new_props);
  1670. if (new_props.properties.deviceType == vk::PhysicalDeviceType::eDiscreteGpu) {
  1671. // Check if there are two physical devices corresponding to the same GPU
  1672. auto old_device = std::find_if(
  1673. vk_instance.device_indices.begin(),
  1674. vk_instance.device_indices.end(),
  1675. [&devices, &new_id](const size_t k){
  1676. vk::PhysicalDeviceProperties2 old_props;
  1677. vk::PhysicalDeviceIDProperties old_id;
  1678. old_props.pNext = &old_id;
  1679. devices[k].getProperties2(&old_props);
  1680. return std::equal(std::begin(old_id.deviceUUID), std::end(old_id.deviceUUID), std::begin(new_id.deviceUUID));
  1681. }
  1682. );
  1683. if (old_device == vk_instance.device_indices.end()) {
  1684. vk_instance.device_indices.push_back(i);
  1685. } else {
  1686. // There can be two physical devices corresponding to the same GPU if there are 2 different drivers
  1687. // This can cause error when splitting layers aross the devices, need to keep only 1
  1688. VK_LOG_DEBUG("Device " << i << " and device " << *old_device << " have the same deviceUUID");
  1689. vk::PhysicalDeviceProperties2 old_props;
  1690. vk::PhysicalDeviceDriverProperties old_driver;
  1691. old_props.pNext = &old_driver;
  1692. devices[*old_device].getProperties2(&old_props);
  1693. std::map<vk::DriverId, int> driver_priorities {};
  1694. int old_priority = std::numeric_limits<int>::max();
  1695. int new_priority = std::numeric_limits<int>::max();
  1696. // Check https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkDriverId.html for the list of driver id
  1697. // Smaller number -> higher priority
  1698. switch (old_props.properties.vendorID) {
  1699. case VK_VENDOR_ID_AMD:
  1700. driver_priorities[vk::DriverId::eMesaRadv] = 1;
  1701. driver_priorities[vk::DriverId::eAmdOpenSource] = 2;
  1702. driver_priorities[vk::DriverId::eAmdProprietary] = 3;
  1703. break;
  1704. case VK_VENDOR_ID_INTEL:
  1705. driver_priorities[vk::DriverId::eIntelOpenSourceMESA] = 1;
  1706. driver_priorities[vk::DriverId::eIntelProprietaryWindows] = 2;
  1707. break;
  1708. case VK_VENDOR_ID_NVIDIA:
  1709. driver_priorities[vk::DriverId::eNvidiaProprietary] = 1;
  1710. #if defined(VK_API_VERSION_1_3) && VK_HEADER_VERSION >= 235
  1711. driver_priorities[vk::DriverId::eMesaNvk] = 2;
  1712. #endif
  1713. break;
  1714. }
  1715. if (driver_priorities.count(old_driver.driverID)) {
  1716. old_priority = driver_priorities[old_driver.driverID];
  1717. }
  1718. if (driver_priorities.count(new_driver.driverID)) {
  1719. new_priority = driver_priorities[new_driver.driverID];
  1720. }
  1721. if (new_priority < old_priority) {
  1722. auto r = std::remove(vk_instance.device_indices.begin(), vk_instance.device_indices.end(), *old_device);
  1723. vk_instance.device_indices.erase(r, vk_instance.device_indices.end());
  1724. vk_instance.device_indices.push_back(i);
  1725. VK_LOG_DEBUG("Prioritize device " << i << " driver " << new_driver.driverName << " over device " << *old_device << " driver " << old_driver.driverName);
  1726. }
  1727. else {
  1728. VK_LOG_DEBUG("Prioritize device " << *old_device << " driver " << old_driver.driverName << " over device " << i << " driver " << new_driver.driverName << std::endl);
  1729. }
  1730. }
  1731. }
  1732. }
  1733. // If no dedicated GPUs found, fall back to GPU 0
  1734. if (vk_instance.device_indices.empty()) {
  1735. vk_instance.device_indices.push_back(0);
  1736. }
  1737. }
  1738. std::cerr << "ggml_vulkan: Found " << vk_instance.device_indices.size() << " Vulkan devices:" << std::endl;
  1739. for (size_t i = 0; i < vk_instance.device_indices.size(); i++) {
  1740. ggml_vk_print_gpu_info(i);
  1741. }
  1742. }
  1743. static void ggml_vk_init(ggml_backend_vk_context * ctx, size_t idx) {
  1744. VK_LOG_DEBUG("ggml_vk_init(" << ctx->name << ", " << idx << ")");
  1745. ggml_vk_instance_init();
  1746. GGML_ASSERT(idx < vk_instance.device_indices.size());
  1747. ctx->name = GGML_VK_NAME + std::to_string(idx);
  1748. ctx->device = ggml_vk_get_device(idx);
  1749. ctx->semaphore_idx = 0;
  1750. ctx->event_idx = 0;
  1751. ctx->prealloc_size_x = 0;
  1752. ctx->prealloc_size_y = 0;
  1753. ctx->prealloc_size_split_k = 0;
  1754. ctx->fence = ctx->device->device.createFence({});
  1755. ctx->staging_size = 0;
  1756. ctx->staging_offset = 0;
  1757. #ifdef GGML_VULKAN_CHECK_RESULTS
  1758. const char* skip_checks = getenv("GGML_VULKAN_SKIP_CHECKS");
  1759. vk_skip_checks = (skip_checks == NULL ? 0 : atoi(skip_checks));
  1760. const char* output_tensor = getenv("GGML_VULKAN_OUTPUT_TENSOR");
  1761. vk_output_tensor = (output_tensor == NULL ? 0 : atoi(output_tensor));
  1762. #endif
  1763. }
  1764. static vk_pipeline ggml_vk_get_to_fp16(ggml_backend_vk_context * ctx, ggml_type type) {
  1765. VK_LOG_DEBUG("ggml_vk_get_to_fp16()");
  1766. switch (type) {
  1767. case GGML_TYPE_F32:
  1768. case GGML_TYPE_Q4_0:
  1769. case GGML_TYPE_Q4_1:
  1770. case GGML_TYPE_Q5_0:
  1771. case GGML_TYPE_Q5_1:
  1772. case GGML_TYPE_Q8_0:
  1773. case GGML_TYPE_Q2_K:
  1774. case GGML_TYPE_Q3_K:
  1775. case GGML_TYPE_Q4_K:
  1776. case GGML_TYPE_Q5_K:
  1777. case GGML_TYPE_Q6_K:
  1778. case GGML_TYPE_IQ4_NL:
  1779. break;
  1780. default:
  1781. return nullptr;
  1782. }
  1783. return ctx->device->pipeline_dequant[type];
  1784. }
  1785. static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type) {
  1786. VK_LOG_DEBUG("ggml_vk_get_mul_mat_mat_pipeline(" << ggml_type_name(src0_type) << ", " << ggml_type_name(src1_type) << ")");
  1787. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  1788. return ctx->device->pipeline_matmul_f32;
  1789. }
  1790. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
  1791. return ctx->device->pipeline_matmul_f32_f16;
  1792. }
  1793. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  1794. return ctx->device->pipeline_matmul_f16_f32;
  1795. }
  1796. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  1797. return ctx->device->pipeline_matmul_f16;
  1798. }
  1799. if (src1_type != GGML_TYPE_F32) {
  1800. return nullptr;
  1801. }
  1802. switch (src0_type) {
  1803. case GGML_TYPE_Q4_0:
  1804. case GGML_TYPE_Q4_1:
  1805. case GGML_TYPE_Q5_0:
  1806. case GGML_TYPE_Q5_1:
  1807. case GGML_TYPE_Q8_0:
  1808. case GGML_TYPE_Q2_K:
  1809. case GGML_TYPE_Q3_K:
  1810. case GGML_TYPE_Q4_K:
  1811. case GGML_TYPE_Q5_K:
  1812. case GGML_TYPE_Q6_K:
  1813. case GGML_TYPE_IQ4_NL:
  1814. break;
  1815. default:
  1816. return nullptr;
  1817. }
  1818. return ctx->device->pipeline_dequant_mul_mat_mat[src0_type];
  1819. }
  1820. static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type) {
  1821. VK_LOG_DEBUG("ggml_vk_get_dequantize_mul_mat_vec()");
  1822. GGML_ASSERT(b_type == GGML_TYPE_F32 || b_type == GGML_TYPE_F16);
  1823. switch (a_type) {
  1824. case GGML_TYPE_F32:
  1825. case GGML_TYPE_F16:
  1826. case GGML_TYPE_Q4_0:
  1827. case GGML_TYPE_Q4_1:
  1828. case GGML_TYPE_Q5_0:
  1829. case GGML_TYPE_Q5_1:
  1830. case GGML_TYPE_Q8_0:
  1831. case GGML_TYPE_Q2_K:
  1832. case GGML_TYPE_Q3_K:
  1833. case GGML_TYPE_Q4_K:
  1834. case GGML_TYPE_Q5_K:
  1835. case GGML_TYPE_Q6_K:
  1836. case GGML_TYPE_IQ4_NL:
  1837. break;
  1838. default:
  1839. return nullptr;
  1840. }
  1841. return b_type == GGML_TYPE_F32 ? ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[a_type] : ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[a_type];
  1842. }
  1843. static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_id_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type) {
  1844. VK_LOG_DEBUG("ggml_vk_get_mul_mat_mat_id_pipeline()");
  1845. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  1846. return ctx->device->pipeline_matmul_id_f32;
  1847. }
  1848. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  1849. return ctx->device->pipeline_matmul_id_f16_f32;
  1850. }
  1851. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  1852. return ctx->device->pipeline_matmul_id_f16;
  1853. }
  1854. GGML_ASSERT(src1_type == GGML_TYPE_F32);
  1855. switch (src0_type) {
  1856. case GGML_TYPE_Q4_0:
  1857. case GGML_TYPE_Q4_1:
  1858. case GGML_TYPE_Q5_0:
  1859. case GGML_TYPE_Q5_1:
  1860. case GGML_TYPE_Q8_0:
  1861. case GGML_TYPE_Q2_K:
  1862. case GGML_TYPE_Q3_K:
  1863. case GGML_TYPE_Q4_K:
  1864. case GGML_TYPE_Q5_K:
  1865. case GGML_TYPE_Q6_K:
  1866. case GGML_TYPE_IQ4_NL:
  1867. break;
  1868. default:
  1869. return nullptr;
  1870. }
  1871. return ctx->device->pipeline_dequant_mul_mat_mat_id[src0_type];
  1872. }
  1873. static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec_id(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type) {
  1874. VK_LOG_DEBUG("ggml_vk_get_dequantize_mul_mat_vec()");
  1875. GGML_ASSERT(b_type == GGML_TYPE_F32);
  1876. switch (a_type) {
  1877. case GGML_TYPE_F32:
  1878. case GGML_TYPE_F16:
  1879. case GGML_TYPE_Q4_0:
  1880. case GGML_TYPE_Q4_1:
  1881. case GGML_TYPE_Q5_0:
  1882. case GGML_TYPE_Q5_1:
  1883. case GGML_TYPE_Q8_0:
  1884. case GGML_TYPE_Q2_K:
  1885. case GGML_TYPE_Q3_K:
  1886. case GGML_TYPE_Q4_K:
  1887. case GGML_TYPE_Q5_K:
  1888. case GGML_TYPE_Q6_K:
  1889. case GGML_TYPE_IQ4_NL:
  1890. break;
  1891. default:
  1892. return nullptr;
  1893. }
  1894. return ctx->device->pipeline_dequant_mul_mat_vec_id_f32[a_type];
  1895. }
  1896. static vk_buffer ggml_vk_pool_malloc(ggml_backend_vk_context * ctx, size_t size) {
  1897. VK_LOG_DEBUG("ggml_vk_pool_malloc(" << size << ")");
  1898. VK_LOG_MEMORY("ggml_vk_pool_malloc");
  1899. int best_i = -1;
  1900. size_t best_size = std::numeric_limits<size_t>::max(); //smallest unused buffer that fits our needs
  1901. int worst_i = -1;
  1902. size_t worst_size = 0; //largest unused buffer seen so far
  1903. for (int i = 0; i < MAX_VK_BUFFERS; ++i) {
  1904. vk_buffer &b = ctx->buffer_pool[i];
  1905. if (b != nullptr && b->size >= size && b->size < best_size) {
  1906. best_i = i;
  1907. best_size = b->size;
  1908. }
  1909. if (b != nullptr && b->size > worst_size) {
  1910. worst_i = i;
  1911. worst_size = b->size;
  1912. }
  1913. }
  1914. if(best_i != -1) {
  1915. //found the smallest buffer that fits our needs
  1916. vk_buffer b = ctx->buffer_pool[best_i];
  1917. ctx->buffer_pool[best_i].reset();
  1918. return b;
  1919. }
  1920. if(worst_i != -1) {
  1921. //no buffer that fits our needs, resize largest one to save memory
  1922. vk_buffer& b = ctx->buffer_pool[worst_i];
  1923. ggml_vk_destroy_buffer(b);
  1924. }
  1925. return ggml_vk_create_buffer_device(ctx->device, size);
  1926. }
  1927. static void ggml_vk_pool_free(ggml_backend_vk_context * ctx, vk_buffer& buffer) {
  1928. VK_LOG_DEBUG("ggml_vk_pool_free(" << buffer->size << ")");
  1929. for (int i = 0; i < MAX_VK_BUFFERS; ++i) {
  1930. vk_buffer& b = ctx->buffer_pool[i];
  1931. if (b == nullptr) {
  1932. b = buffer;
  1933. return;
  1934. }
  1935. }
  1936. std::cerr << "ggml_vulkan: WARNING: vk buffer pool full, increase MAX_VK_BUFFERS" << std::endl;
  1937. ggml_vk_destroy_buffer(buffer);
  1938. }
  1939. // Returns an available temporary buffer that may only be used temporarily, it will be reused
  1940. static vk_buffer ggml_vk_create_buffer_temp(ggml_backend_vk_context * ctx, size_t size) {
  1941. // Try to find existing temp buffer with enough capacity
  1942. for (auto& buffer : ctx->gc.temp_buffers) {
  1943. if (buffer->size >= size) {
  1944. return buffer;
  1945. }
  1946. }
  1947. VK_LOG_MEMORY("ggml_vk_create_buffer_temp(" << size << ")");
  1948. // Otherwise create new buffer
  1949. vk_buffer buf = ggml_vk_pool_malloc(ctx, size);
  1950. ctx->gc.temp_buffers.push_back(buf);
  1951. return buf;
  1952. }
  1953. static void * ggml_vk_host_malloc(vk_device& device, size_t size) {
  1954. VK_LOG_MEMORY("ggml_vk_host_malloc(" << size << ")");
  1955. vk_buffer buf = ggml_vk_create_buffer(device, size,
  1956. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  1957. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  1958. if(!(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible)) {
  1959. fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory\n",
  1960. size/1024.0/1024.0);
  1961. device->device.freeMemory(buf->device_memory);
  1962. device->device.destroyBuffer(buf->buffer);
  1963. return nullptr;
  1964. }
  1965. device->pinned_memory.push_back(std::make_tuple(buf->ptr, size, buf));
  1966. return buf->ptr;
  1967. }
  1968. static void ggml_vk_host_free(vk_device& device, void* ptr) {
  1969. if (ptr == nullptr) {
  1970. return;
  1971. }
  1972. VK_LOG_MEMORY("ggml_vk_host_free(" << ptr << ")");
  1973. vk_buffer buf;
  1974. size_t index;
  1975. for (size_t i = 0; i < device->pinned_memory.size(); i++) {
  1976. const uint8_t* addr = (const uint8_t*) std::get<0>(device->pinned_memory[i]);
  1977. const uint8_t* endr = addr + std::get<1>(device->pinned_memory[i]);
  1978. if (ptr >= addr && ptr < endr) {
  1979. buf = std::get<2>(device->pinned_memory[i]);
  1980. index = i;
  1981. break;
  1982. }
  1983. }
  1984. if (buf == nullptr) {
  1985. fprintf(stderr, "WARNING: failed to free pinned memory: memory not in map\n");
  1986. return;
  1987. }
  1988. ggml_vk_destroy_buffer(buf);
  1989. device->pinned_memory.erase(device->pinned_memory.begin() + index);
  1990. }
  1991. static void ggml_vk_host_get(vk_device& device, const void * ptr, vk_buffer& buf, size_t& buf_offset) {
  1992. buf = nullptr;
  1993. buf_offset = 0;
  1994. for (size_t i = 0; i < device->pinned_memory.size(); i++) {
  1995. const uint8_t* addr = (const uint8_t*) std::get<0>(device->pinned_memory[i]);
  1996. const uint8_t* endr = addr + std::get<1>(device->pinned_memory[i]);
  1997. if (ptr >= addr && ptr < endr) {
  1998. buf = std::get<2>(device->pinned_memory[i]);
  1999. buf_offset = ((const uint8_t *)ptr) - addr;
  2000. break;
  2001. }
  2002. }
  2003. }
  2004. static vk_submission ggml_vk_begin_submission(vk_device& device, vk_queue& q, bool one_time = true) {
  2005. vk_submission s;
  2006. s.buffer = ggml_vk_create_cmd_buffer(device, q);
  2007. if (one_time) {
  2008. s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
  2009. } else {
  2010. s.buffer.begin({ vk::CommandBufferUsageFlags{} });
  2011. }
  2012. return s;
  2013. }
  2014. static void ggml_vk_dispatch_pipeline(ggml_backend_vk_context* ctx, vk_context& subctx, vk_pipeline& pipeline, std::initializer_list<vk::DescriptorBufferInfo> const& descriptor_buffer_infos, size_t push_constant_size, const void* push_constants, std::array<uint32_t, 3> elements) {
  2015. const uint32_t wg0 = CEIL_DIV(elements[0], pipeline->wg_denoms[0]);
  2016. const uint32_t wg1 = CEIL_DIV(elements[1], pipeline->wg_denoms[1]);
  2017. const uint32_t wg2 = CEIL_DIV(elements[2], pipeline->wg_denoms[2]);
  2018. VK_LOG_DEBUG("ggml_vk_dispatch_pipeline(" << pipeline->name << ", {";
  2019. for (auto& buffer : descriptor_buffer_infos) {
  2020. std::cerr << "(" << buffer << ", " << buffer.offset << ", " << buffer.size << "), ";
  2021. }
  2022. std::cerr << "}, (" << wg0 << "," << wg1 << "," << wg2 << "))");
  2023. GGML_ASSERT(pipeline->descriptor_set_idx < pipeline->descriptor_sets.size());
  2024. GGML_ASSERT(descriptor_buffer_infos.size() == pipeline->parameter_count);
  2025. vk::DescriptorSet& descriptor_set = pipeline->descriptor_sets[pipeline->descriptor_set_idx++];
  2026. vk::WriteDescriptorSet write_descriptor_set{ descriptor_set, 0, 0, pipeline->parameter_count, vk::DescriptorType::eStorageBuffer, nullptr, descriptor_buffer_infos.begin() };
  2027. ctx->device->device.updateDescriptorSets({ write_descriptor_set }, {});
  2028. subctx->s->buffer.pushConstants(pipeline->layout, vk::ShaderStageFlagBits::eCompute, 0, push_constant_size, push_constants);
  2029. subctx->s->buffer.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->pipeline);
  2030. subctx->s->buffer.bindDescriptorSets(vk::PipelineBindPoint::eCompute,
  2031. pipeline->layout,
  2032. 0,
  2033. { descriptor_set },
  2034. {});
  2035. subctx->s->buffer.dispatch(wg0, wg1, wg2);
  2036. }
  2037. static void ggml_vk_end_submission(vk_submission& s, std::vector<vk_semaphore> wait_semaphores, std::vector<vk_semaphore> signal_semaphores) {
  2038. s.buffer.end();
  2039. s.wait_semaphores = std::move(wait_semaphores);
  2040. s.signal_semaphores = std::move(signal_semaphores);
  2041. }
  2042. static void ggml_vk_ctx_end(vk_context& ctx) {
  2043. VK_LOG_DEBUG("ggml_vk_ctx_end(" << ctx << ", " << ctx->seqs.size() << ")");
  2044. if (ctx->s == nullptr) {
  2045. return;
  2046. }
  2047. ctx->s->buffer.end();
  2048. ctx->s = nullptr;
  2049. }
  2050. static void ggml_vk_ctx_begin(vk_device& device, vk_context& subctx) {
  2051. VK_LOG_DEBUG("ggml_vk_ctx_begin(" << device->name << ")");
  2052. if (subctx->s != nullptr) {
  2053. ggml_vk_ctx_end(subctx);
  2054. }
  2055. subctx->seqs.push_back({ ggml_vk_begin_submission(device, *subctx->q) });
  2056. subctx->s = subctx->seqs[subctx->seqs.size() - 1].data();
  2057. }
  2058. static size_t ggml_vk_align_size(size_t width, size_t align) {
  2059. VK_LOG_DEBUG("ggml_vk_align_size(" << width << ", " << align << ")");
  2060. return CEIL_DIV(width, align) * align;
  2061. }
  2062. static void deferred_memcpy(void * dst, const void * src, size_t size, std::vector<vk_staging_memcpy>* memcpys = nullptr) {
  2063. if (memcpys == nullptr) {
  2064. memcpy(dst, src, size);
  2065. } else {
  2066. memcpys->emplace_back(dst, src, size);
  2067. }
  2068. }
  2069. static void ggml_vk_ensure_sync_staging_buffer(vk_device& device, size_t size) {
  2070. if (device->sync_staging == nullptr || device->sync_staging->size < size) {
  2071. VK_LOG_MEMORY("ggml_vk_ensure_sync_staging_buffer(" << size << ")");
  2072. ggml_vk_destroy_buffer(device->sync_staging);
  2073. device->sync_staging = ggml_vk_create_buffer_check(device, size,
  2074. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  2075. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  2076. }
  2077. }
  2078. static void ggml_vk_buffer_write_nc_async(ggml_backend_vk_context * ctx, vk_context& subctx, vk_buffer& dst, size_t offset, const ggml_tensor * tensor, bool sync_staging = false) {
  2079. VK_LOG_DEBUG("ggml_vk_buffer_write_nc_async(" << tensor << ")");
  2080. GGML_ASSERT(!ggml_is_contiguous(tensor));
  2081. // Buffer is already mapped
  2082. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  2083. std::cerr << "ggml_vulkan: buffer_write_nc_async dst buffer is host_visible. Use synchronous write." << std::endl;
  2084. GGML_ABORT("fatal error");
  2085. }
  2086. // Check if src is pinned memory
  2087. vk_buffer buf;
  2088. size_t buf_offset;
  2089. ggml_vk_host_get(ctx->device, tensor->data, buf, buf_offset);
  2090. const uint64_t ne0 = tensor->ne[0];
  2091. const uint64_t ne1 = tensor->ne[1];
  2092. const uint64_t ne2 = tensor->ne[2];
  2093. const uint64_t ne3 = tensor->ne[3];
  2094. const uint64_t nb0 = tensor->nb[0];
  2095. const uint64_t nb1 = tensor->nb[1];
  2096. const uint64_t nb2 = tensor->nb[2];
  2097. const uint64_t nb3 = tensor->nb[3];
  2098. const ggml_type type = tensor->type;
  2099. const uint64_t ts = ggml_type_size(type);
  2100. const uint64_t bs = ggml_blck_size(type);
  2101. const uint64_t dstnb0 = ts;
  2102. const uint64_t dstnb1 = dstnb0*(ne0/bs);
  2103. const uint64_t dstnb2 = dstnb1*ne1;
  2104. const uint64_t dstnb3 = dstnb2*ne2;
  2105. const uint64_t ne = ggml_nelements(tensor);
  2106. if (buf != nullptr) {
  2107. // Memory is pinned, use as staging buffer
  2108. std::vector<vk::BufferCopy> slices;
  2109. for (uint64_t i3 = 0; i3 < ne3; i3++) {
  2110. for (uint64_t i2 = 0; i2 < ne2; i2++) {
  2111. // Find longest contiguous slice
  2112. if (ne1*nb1 == dstnb2) {
  2113. slices.push_back({ buf_offset + i3*nb3 + i2*nb2, offset + i3*dstnb3 + i2*dstnb2, dstnb2 });
  2114. } else {
  2115. for (uint64_t i1 = 0; i1 < ne1; i1++) {
  2116. if (ne0*nb0/bs == dstnb1) {
  2117. slices.push_back({ buf_offset + i3*nb3 + i2*nb2 + i1*nb1, offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1, dstnb1 });
  2118. } else {
  2119. const uint64_t s_off = buf_offset + i3*nb3 + i2*nb2 + i1*nb1;
  2120. const uint64_t d_off = offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1;
  2121. for (uint64_t i0 = 0; i0 < ne0; i0++) {
  2122. slices.push_back({ s_off + i1*nb0, d_off + i0*dstnb0, dstnb0 });
  2123. }
  2124. }
  2125. }
  2126. }
  2127. }
  2128. }
  2129. ggml_vk_sync_buffers(subctx);
  2130. subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices);
  2131. return;
  2132. }
  2133. // Staging buffer required
  2134. vk_buffer staging = ctx->staging;
  2135. size_t staging_offset = ctx->staging_offset;
  2136. const size_t copy_size = ts*ne/bs;
  2137. if (ctx->staging->size < ctx->staging_offset + copy_size) {
  2138. if (sync_staging) {
  2139. // Create temporary larger buffer
  2140. ggml_vk_ensure_sync_staging_buffer(ctx->device, copy_size);
  2141. staging = ctx->device->sync_staging;
  2142. staging_offset = 0;
  2143. } else {
  2144. GGML_ABORT("fatal error");
  2145. }
  2146. }
  2147. VkBufferCopy buf_copy{ staging_offset, offset, copy_size };
  2148. ggml_vk_sync_buffers(subctx);
  2149. vkCmdCopyBuffer(subctx->s->buffer, staging->buffer, dst->buffer, 1, &buf_copy);
  2150. for (uint64_t i3 = 0; i3 < ne3; i3++) {
  2151. for (uint64_t i2 = 0; i2 < ne2; i2++) {
  2152. // Find longest contiguous slice
  2153. if (ne1*nb1 == dstnb2) {
  2154. deferred_memcpy((uint8_t *)staging->ptr + staging_offset + i3*dstnb3 + i2*dstnb2, (const uint8_t *) tensor->data + buf_offset + i3*nb3 + i2*nb2, dstnb2, &subctx->in_memcpys);
  2155. } else {
  2156. for (uint64_t i1 = 0; i1 < ne1; i1++) {
  2157. if (ne0*nb0/bs == dstnb1) {
  2158. deferred_memcpy((uint8_t *)staging->ptr + staging_offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1, (const uint8_t *) tensor->data + buf_offset + i3*nb3 + i2*nb2 + i1*nb1, dstnb1, &subctx->in_memcpys);
  2159. } else {
  2160. const uint64_t s_off = buf_offset + i3*nb3 + i2*nb2 + i1*nb1;
  2161. const uint64_t d_off = staging_offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1;
  2162. for (uint64_t i0 = 0; i0 < ne0; i0++) {
  2163. deferred_memcpy((uint8_t *)staging->ptr + d_off + i0*dstnb0, (const uint8_t *) tensor->data + s_off + i0*nb0, dstnb0, &subctx->in_memcpys);
  2164. }
  2165. }
  2166. }
  2167. }
  2168. }
  2169. }
  2170. }
  2171. static void ggml_vk_buffer_write_2d_async(vk_context subctx, vk_buffer& dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height, vk_buffer staging_buffer, size_t staging_offset, bool sync_staging = false) {
  2172. VK_LOG_DEBUG("ggml_vk_buffer_write_2d_async(" << width << ", " << height << ")");
  2173. // Buffer is already mapped
  2174. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  2175. std::cerr << "ggml_vulkan: buffer_write_async dst buffer is host_visible. Use synchronous write." << std::endl;
  2176. GGML_ABORT("fatal error");
  2177. }
  2178. // Check if src is pinned memory
  2179. vk_buffer buf = nullptr;
  2180. size_t buf_offset;
  2181. ggml_vk_host_get(dst->device, src, buf, buf_offset);
  2182. if (buf != nullptr) {
  2183. // Memory is pinned, use as staging buffer
  2184. std::vector<vk::BufferCopy> slices(1);
  2185. if (width == spitch) {
  2186. // Only do single write if stride is equal
  2187. slices[0].srcOffset = buf_offset;
  2188. slices[0].dstOffset = offset;
  2189. slices[0].size = width * height;
  2190. } else {
  2191. slices.resize(height);
  2192. for (size_t i = 0; i < height; i++) {
  2193. slices[i].srcOffset = buf_offset + i * spitch;
  2194. slices[i].dstOffset = offset + i * width;
  2195. slices[i].size = width;
  2196. }
  2197. }
  2198. ggml_vk_sync_buffers(subctx);
  2199. subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices);
  2200. return;
  2201. }
  2202. VK_LOG_DEBUG("STAGING");
  2203. // Staging buffer required
  2204. const size_t copy_size = width*height;
  2205. if (staging_buffer == nullptr || staging_buffer->size < staging_offset + copy_size) {
  2206. if (sync_staging) {
  2207. ggml_vk_ensure_sync_staging_buffer(dst->device, copy_size);
  2208. staging_buffer = dst->device->sync_staging;
  2209. staging_offset = 0;
  2210. } else {
  2211. GGML_ABORT("fatal error");
  2212. }
  2213. }
  2214. VkBufferCopy buf_copy = {
  2215. staging_offset,
  2216. offset,
  2217. copy_size};
  2218. ggml_vk_sync_buffers(subctx);
  2219. vkCmdCopyBuffer(subctx->s->buffer, staging_buffer->buffer, dst->buffer, 1, &buf_copy);
  2220. if (width == spitch) {
  2221. deferred_memcpy((uint8_t *)staging_buffer->ptr + staging_offset, src, width * height, &subctx->in_memcpys);
  2222. } else {
  2223. for (size_t i = 0; i < height; i++) {
  2224. deferred_memcpy((uint8_t *)staging_buffer->ptr + staging_offset + i * width, (const uint8_t *) src + i * spitch, width, &subctx->in_memcpys);
  2225. }
  2226. }
  2227. }
  2228. static void ggml_vk_buffer_write_async(vk_context subctx, vk_buffer& dst, size_t offset, const void * src, size_t size, vk_buffer staging_buffer, size_t staging_offset, bool sync_staging = false) {
  2229. VK_LOG_DEBUG("ggml_vk_buffer_write_async(" << size << ")");
  2230. return ggml_vk_buffer_write_2d_async(subctx, dst, offset, src, size, size, 1, staging_buffer, staging_offset, sync_staging);
  2231. }
  2232. static void ggml_vk_buffer_write_2d(vk_buffer& dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height) {
  2233. VK_LOG_DEBUG("ggml_vk_buffer_write_2d(" << width << ", " << height << ")");
  2234. // Buffer is already mapped
  2235. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  2236. GGML_ASSERT(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostCoherent);
  2237. for (size_t i = 0; i < height; i++) {
  2238. memcpy((uint8_t *)dst->ptr + offset + i * width, (const uint8_t *) src + i * spitch, width);
  2239. }
  2240. } else {
  2241. vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue);
  2242. ggml_vk_ctx_begin(dst->device, subctx);
  2243. ggml_vk_buffer_write_2d_async(subctx, dst, offset, src, spitch, width, height, nullptr, 0, true);
  2244. ggml_vk_ctx_end(subctx);
  2245. for (auto& cpy : subctx->in_memcpys) {
  2246. memcpy(cpy.dst, cpy.src, cpy.n);
  2247. }
  2248. ggml_vk_submit(subctx, dst->device->fence);
  2249. VK_CHECK(dst->device->device.waitForFences({ dst->device->fence }, true, UINT64_MAX), "vk_buffer_write_2d waitForFences");
  2250. dst->device->device.resetFences({ dst->device->fence });
  2251. }
  2252. }
  2253. static void ggml_vk_buffer_write(vk_buffer& dst, size_t offset, const void * src, size_t size) {
  2254. VK_LOG_DEBUG("ggml_vk_buffer_write(" << size << ")");
  2255. ggml_vk_buffer_write_2d(dst, offset, src, 0, size, 1);
  2256. }
  2257. static void ggml_vk_buffer_read_2d_async(vk_context subctx, vk_buffer& src, size_t offset, void * dst, size_t spitch, size_t dpitch, size_t width, size_t height, vk_buffer staging_buffer, size_t staging_offset, bool sync_staging = false) {
  2258. VK_LOG_DEBUG("ggml_vk_buffer_read_2d_async(offset=" << offset << ", width=" << width << ", height=" << height << ")");
  2259. GGML_ASSERT(width > 0);
  2260. GGML_ASSERT(height > 0);
  2261. GGML_ASSERT(src != nullptr);
  2262. // TODO: staging_offset is not used
  2263. // Check if dst is pinned memory
  2264. vk_buffer buf = nullptr;
  2265. size_t buf_offset;
  2266. ggml_vk_host_get(src->device, dst, buf, buf_offset);
  2267. std::vector<vk::BufferCopy> slices(1);
  2268. if (width == spitch && width == dpitch) {
  2269. // Only do single write if stride is equal
  2270. slices[0].srcOffset = offset;
  2271. slices[0].dstOffset = buf_offset;
  2272. slices[0].size = width * height;
  2273. } else {
  2274. slices.resize(height);
  2275. for (size_t i = 0; i < height; i++) {
  2276. slices[i].srcOffset = offset + i * spitch;
  2277. slices[i].dstOffset = buf_offset + i * dpitch;
  2278. slices[i].size = width;
  2279. }
  2280. }
  2281. if (buf != nullptr) {
  2282. // Memory is pinned, use as staging buffer
  2283. ggml_vk_sync_buffers(subctx);
  2284. subctx->s->buffer.copyBuffer(src->buffer, buf->buffer, slices);
  2285. return;
  2286. }
  2287. VK_LOG_DEBUG("STAGING");
  2288. // Fall back to staging buffer
  2289. const size_t copy_size = dpitch * height;
  2290. if (staging_buffer == nullptr || staging_buffer->size < staging_offset + copy_size) {
  2291. if (sync_staging) {
  2292. // Create temporary larger buffer
  2293. ggml_vk_ensure_sync_staging_buffer(src->device, copy_size);
  2294. staging_buffer = src->device->sync_staging;
  2295. } else {
  2296. GGML_ABORT("fatal error");
  2297. }
  2298. }
  2299. ggml_vk_sync_buffers(subctx);
  2300. subctx->s->buffer.copyBuffer(src->buffer, staging_buffer->buffer, slices);
  2301. deferred_memcpy(dst, staging_buffer->ptr, copy_size, &subctx->out_memcpys);
  2302. }
  2303. static void ggml_vk_buffer_read_async(vk_context subctx, vk_buffer& src, size_t offset, void * dst, size_t size, vk_buffer staging_buffer, size_t staging_offset, bool sync_staging = false) {
  2304. return ggml_vk_buffer_read_2d_async(subctx, src, offset, dst, size, size, size, 1, staging_buffer, staging_offset, sync_staging);
  2305. }
  2306. static void ggml_vk_buffer_read(vk_buffer& src, size_t offset, void * dst, size_t size) {
  2307. VK_LOG_DEBUG("ggml_vk_buffer_read(" << src->buffer << ", " << offset << ", " << size << ")");
  2308. if(src->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  2309. GGML_ASSERT(src->memory_property_flags & vk::MemoryPropertyFlagBits::eHostCoherent);
  2310. memcpy(dst, (uint8_t *) src->ptr + offset, size);
  2311. } else {
  2312. vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue);
  2313. ggml_vk_ctx_begin(src->device, subctx);
  2314. ggml_vk_buffer_read_async(subctx, src, offset, dst, size, nullptr, 0, true);
  2315. ggml_vk_ctx_end(subctx);
  2316. ggml_vk_submit(subctx, src->device->fence);
  2317. VK_CHECK(src->device->device.waitForFences({ src->device->fence }, true, UINT64_MAX), "vk_buffer_read waitForFences");
  2318. src->device->device.resetFences({ src->device->fence });
  2319. for (auto& cpy : subctx->out_memcpys) {
  2320. memcpy(cpy.dst, cpy.src, cpy.n);
  2321. }
  2322. }
  2323. }
  2324. static void ggml_vk_buffer_copy_async(vk_context& ctx, vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) {
  2325. VK_LOG_DEBUG("ggml_vk_buffer_copy_async(" << size << ")");
  2326. // Make sure both buffers are on same device
  2327. GGML_ASSERT(src->device == dst->device);
  2328. VkBufferCopy bc{ src_offset, dst_offset, size };
  2329. vkCmdCopyBuffer(ctx->s->buffer, src->buffer, dst->buffer, 1, &bc);
  2330. }
  2331. static void ggml_vk_buffer_copy(vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) {
  2332. if (src->device == dst->device) {
  2333. VK_LOG_DEBUG("ggml_vk_buffer_copy(SINGLE_DEVICE, " << size << ")");
  2334. // Copy within the device
  2335. vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue);
  2336. ggml_vk_ctx_begin(src->device, subctx);
  2337. ggml_vk_buffer_copy_async(subctx, dst, dst_offset, src, src_offset, size);
  2338. ggml_vk_ctx_end(subctx);
  2339. ggml_vk_submit(subctx, src->device->fence);
  2340. VK_CHECK(src->device->device.waitForFences({ src->device->fence }, true, UINT64_MAX), "vk_buffer_copy waitForFences");
  2341. src->device->device.resetFences({ src->device->fence });
  2342. } else {
  2343. VK_LOG_DEBUG("ggml_vk_buffer_copy(MULTI_DEVICE, " << size << ")");
  2344. // Copy device to device
  2345. ggml_vk_ensure_sync_staging_buffer(src->device, size);
  2346. ggml_vk_ensure_sync_staging_buffer(dst->device, size);
  2347. // Copy to src staging buffer
  2348. ggml_vk_buffer_copy(src->device->sync_staging, 0, src, src_offset, size);
  2349. // memcpy to dst staging buffer
  2350. memcpy(dst->device->sync_staging->ptr, src->device->sync_staging->ptr, size);
  2351. // Copy to dst buffer
  2352. ggml_vk_buffer_copy(dst, dst_offset, dst->device->sync_staging, 0, size);
  2353. }
  2354. }
  2355. static void ggml_vk_buffer_memset(vk_buffer& dst, size_t offset, uint32_t c, size_t size) {
  2356. VK_LOG_DEBUG("ggml_vk_buffer_memset(" << offset << ", " << c << ", " << size << ")");
  2357. vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue);
  2358. ggml_vk_ctx_begin(dst->device, subctx);
  2359. subctx->s->buffer.fillBuffer(dst->buffer, offset, size, c);
  2360. ggml_vk_ctx_end(subctx);
  2361. ggml_vk_submit(subctx, dst->device->fence);
  2362. VK_CHECK(dst->device->device.waitForFences({ dst->device->fence }, true, UINT64_MAX), "vk_memset waitForFences");
  2363. dst->device->device.resetFences({ dst->device->fence });
  2364. }
  2365. static uint32_t ggml_vk_guess_split_k(int m, int n, int k) {
  2366. VK_LOG_DEBUG("ggml_vk_guess_split_k(" << m << ", " << n << ", " << k << ")");
  2367. // if (k > 128 && (m < 128 || n < 128) && m > 2 && n > 2) {
  2368. // return 4;
  2369. // }
  2370. return 1;
  2371. GGML_UNUSED(m); GGML_UNUSED(n); GGML_UNUSED(k);
  2372. }
  2373. static vk_pipeline ggml_vk_guess_matmul_pipeline_amd(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, bool aligned) {
  2374. if (m <= 32 || n <= 32) {
  2375. return aligned ? mmp->a_s : mmp->s;
  2376. }
  2377. return aligned ? mmp->a_m : mmp->m;
  2378. GGML_UNUSED(ctx);
  2379. }
  2380. static vk_pipeline ggml_vk_guess_matmul_pipeline_apple(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, bool aligned) {
  2381. return aligned ? mmp->a_m : mmp->m;
  2382. GGML_UNUSED(ctx);
  2383. }
  2384. static vk_pipeline ggml_vk_guess_matmul_pipeline_intel(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, bool aligned) {
  2385. return aligned ? mmp->a_s : mmp->s;
  2386. GGML_UNUSED(ctx);
  2387. }
  2388. static vk_pipeline ggml_vk_guess_matmul_pipeline(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, bool aligned) {
  2389. VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline(" << m << ", " << n << ", " << aligned << ")");
  2390. switch (ctx->device->vendor_id) {
  2391. case VK_VENDOR_ID_AMD:
  2392. return ggml_vk_guess_matmul_pipeline_amd(ctx, mmp, m, n, aligned);
  2393. case VK_VENDOR_ID_APPLE:
  2394. return ggml_vk_guess_matmul_pipeline_apple(ctx, mmp, aligned);
  2395. case VK_VENDOR_ID_INTEL:
  2396. return ggml_vk_guess_matmul_pipeline_intel(ctx, mmp, aligned);
  2397. default:
  2398. break;
  2399. }
  2400. if (m <= 32 || n <= 32) {
  2401. return aligned ? mmp->a_s : mmp->s;
  2402. }
  2403. if (m <= 64 || n <= 64) {
  2404. return aligned ? mmp->a_m : mmp->m;
  2405. }
  2406. return aligned ? mmp->a_l : mmp->l;
  2407. }
  2408. static uint32_t ggml_vk_guess_matmul_pipeline_align(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n) {
  2409. VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline_align(" << m << ", " << n << ")");
  2410. return ggml_vk_guess_matmul_pipeline(ctx, mmp, m, n, true)->align;
  2411. }
  2412. static void ggml_vk_matmul(
  2413. ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline& pipeline,
  2414. vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& split_k_buffer,
  2415. uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d,
  2416. uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d,
  2417. uint32_t split_k, uint32_t batch, uint32_t ne02, uint32_t ne12, uint32_t broadcast2, uint32_t broadcast3) {
  2418. VK_LOG_DEBUG("ggml_vk_matmul(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), d: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), split_k: (" << (split_k_buffer.buffer != nullptr ? split_k_buffer.buffer->buffer : VK_NULL_HANDLE) << ", " << split_k_buffer.offset << ", " << split_k_buffer.size << "), m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ", split_k: " << split_k << ", batch: " << batch << ", ne02: " << ne02 << ", ne12: " << ne12 << ", broadcast2: " << broadcast2 << ", broadcast3: " << broadcast3 << ")");
  2419. ggml_vk_sync_buffers(subctx);
  2420. if (split_k == 1) {
  2421. const vk_mat_mat_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, k, ne02, ne12, broadcast2, broadcast3 };
  2422. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d }, sizeof(vk_mat_mat_push_constants), &pc, { m, n, batch });
  2423. return;
  2424. }
  2425. GGML_ASSERT(batch_stride_d == m * n);
  2426. const vk_mat_mat_push_constants pc1 = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, CEIL_DIV(k, split_k), ne02, ne12, broadcast2, broadcast3 };
  2427. // Make sure enough workgroups get assigned for split k to work
  2428. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, split_k_buffer }, sizeof(vk_mat_mat_push_constants), &pc1, { (CEIL_DIV(m, pipeline->wg_denoms[0]) * pipeline->wg_denoms[0]) * split_k, n, batch });
  2429. ggml_vk_sync_buffers(subctx);
  2430. const std::array<uint32_t, 2> pc2 = { (uint32_t)(m * n * batch), split_k };
  2431. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_matmul_split_k_reduce, { split_k_buffer, d }, pc2.size() * sizeof(uint32_t), pc2.data(), { m * n * batch, 1, 1 });
  2432. }
  2433. static void ggml_vk_matmul_id(
  2434. ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline& pipeline,
  2435. vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& ids,
  2436. uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d,
  2437. uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d,
  2438. uint32_t n_as, uint32_t nei0, uint32_t nei1, uint32_t nbi1, uint32_t ne11) {
  2439. VK_LOG_DEBUG("ggml_vk_matmul_id(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), d: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), ids: (" << ids.buffer->buffer << ", " << ids.offset << ", " << ids.size << "), " <<
  2440. "m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", " <<
  2441. "batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ", " <<
  2442. "n_as: " << n_as << ", nei0: " << nei0 << ", nei1: " << nei1 << ", nbi1: " << nbi1 << ", ne11: " << ne11 << ")");
  2443. ggml_vk_sync_buffers(subctx);
  2444. const vk_mat_mat_id_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d,
  2445. nei0, nei1, nbi1, ne11 };
  2446. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d, ids }, sizeof(vk_mat_mat_id_push_constants), &pc, { m, nei1, n_as });
  2447. }
  2448. static bool ggml_vk_dim01_contiguous(const ggml_tensor * tensor) {
  2449. return
  2450. tensor->nb[0] == ggml_type_size(tensor->type) &&
  2451. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  2452. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  2453. }
  2454. static vk_pipeline ggml_vk_get_cpy_pipeline(ggml_backend_vk_context * ctx, ggml_type from, ggml_type to) {
  2455. if (from == GGML_TYPE_F32 && to == GGML_TYPE_F32) {
  2456. return ctx->device->pipeline_cpy_f32_f32;
  2457. }
  2458. if (from == GGML_TYPE_F32 && to == GGML_TYPE_F16) {
  2459. return ctx->device->pipeline_cpy_f32_f16;
  2460. }
  2461. if (from == GGML_TYPE_F16 && to == GGML_TYPE_F16) {
  2462. return ctx->device->pipeline_cpy_f16_f16;
  2463. }
  2464. std::cerr << "Missing CPY op for types: " << ggml_type_name(from) << " " << ggml_type_name(to) << std::endl;
  2465. GGML_ABORT("fatal error");
  2466. }
  2467. static void ggml_vk_cpy_to_contiguous(ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline pipeline, const ggml_tensor * tensor, vk_subbuffer&& in, vk_subbuffer&& out) {
  2468. VK_LOG_DEBUG("ggml_vk_cpy_to_contiguous((" << tensor << ", type=" << tensor->type << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << "), ";
  2469. std::cerr << "buffer in size=" << in.buffer->size << ", buffer out size=" << out.buffer->size << ")");
  2470. const int tensor_type_size = ggml_type_size(tensor->type);
  2471. const uint32_t ne = ggml_nelements(tensor);
  2472. const vk_op_unary_push_constants pc = {
  2473. (uint32_t)ne,
  2474. (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->ne[2], (uint32_t)tensor->ne[3], (uint32_t)tensor->nb[0] / tensor_type_size, (uint32_t)tensor->nb[1] / tensor_type_size, (uint32_t)tensor->nb[2] / tensor_type_size, (uint32_t)tensor->nb[3] / tensor_type_size,
  2475. (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->ne[2], (uint32_t)tensor->ne[3], 1 , (uint32_t)tensor->ne[0] , (uint32_t)(tensor->ne[0] * tensor->ne[1]) , (uint32_t)(tensor->ne[0] * tensor->ne[1] * tensor->ne[2]),
  2476. 0,
  2477. 0.0f, 0.0f,
  2478. };
  2479. ggml_vk_sync_buffers(subctx);
  2480. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { in, out }, sizeof(vk_op_unary_push_constants), &pc, { ne, 1, 1 });
  2481. }
  2482. static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2483. VK_LOG_DEBUG("ggml_vk_mul_mat_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2484. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2485. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  2486. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT
  2487. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  2488. const uint64_t ne00 = src0->ne[0];
  2489. const uint64_t ne01 = src0->ne[1];
  2490. const uint64_t ne02 = src0->ne[2];
  2491. const uint64_t ne03 = src0->ne[3];
  2492. const uint64_t ne10 = src1->ne[0];
  2493. const uint64_t ne11 = src1->ne[1];
  2494. const uint64_t ne12 = src1->ne[2];
  2495. const uint64_t ne13 = src1->ne[3];
  2496. const uint64_t ne20 = dst->ne[0];
  2497. const uint64_t ne21 = dst->ne[1];
  2498. const uint64_t r2 = ne12 / ne02;
  2499. const uint64_t r3 = ne13 / ne03;
  2500. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2501. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2502. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  2503. vk_buffer d_Qx;
  2504. size_t qx_buf_offset = 0;
  2505. vk_buffer d_Qy;
  2506. size_t qy_buf_offset = 0;
  2507. bool src0_uma = false;
  2508. bool src1_uma = false;
  2509. if (ctx->device->uma) {
  2510. ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
  2511. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  2512. src0_uma = d_Qx != nullptr;
  2513. src1_uma = d_Qy != nullptr;
  2514. }
  2515. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  2516. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  2517. const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig;
  2518. vk_matmul_pipeline mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, src0->type, y_non_contig ? GGML_TYPE_F16 : src1->type);
  2519. const bool qx_needs_dequant = mmp == nullptr || x_non_contig;
  2520. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !y_f32_kernel) || y_non_contig;
  2521. if (mmp == nullptr) {
  2522. // Fall back to dequant + f16 mulmat
  2523. mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, GGML_TYPE_F16, y_f32_kernel ? GGML_TYPE_F32 : GGML_TYPE_F16);
  2524. }
  2525. // Not implemented
  2526. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  2527. const int x_ne = ne01 * ne00;
  2528. const int y_ne = ne11 * ne10;
  2529. const int d_ne = ne11 * ne01;
  2530. const uint32_t kpad = ggml_vk_align_size(ne10, ggml_vk_guess_matmul_pipeline_align(ctx, mmp, ne01, ne11));
  2531. const bool aligned = ne10 == kpad && ne01 > 8 && ne11 > 8;
  2532. const uint32_t split_k = ggml_vk_guess_split_k(ne01, ne11, ne10);
  2533. vk_pipeline pipeline = ggml_vk_guess_matmul_pipeline(ctx, mmp, ne01, ne11, aligned);
  2534. const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type);
  2535. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  2536. const uint64_t x_sz = !qx_needs_dequant ? qx_sz : sizeof(ggml_fp16_t) * x_ne;
  2537. const uint64_t y_sz = y_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  2538. const uint64_t d_sz = sizeof(float) * d_ne;
  2539. vk_buffer d_D = extra->buffer_gpu.lock();
  2540. const uint64_t d_buf_offset = extra->offset + dst->view_offs;
  2541. GGML_ASSERT(d_D != nullptr);
  2542. GGML_ASSERT(d_D->size >= d_buf_offset + d_sz * ne02 * ne03);
  2543. vk_buffer d_X;
  2544. uint64_t x_buf_offset = 0;
  2545. vk_buffer d_Y;
  2546. uint64_t y_buf_offset = 0;
  2547. if (!src0_uma) {
  2548. d_Qx = extra_src0->buffer_gpu.lock();
  2549. qx_buf_offset = extra_src0->offset + src0->view_offs;
  2550. GGML_ASSERT(d_Qx != nullptr);
  2551. }
  2552. if (!src1_uma) {
  2553. d_Qy = extra_src1->buffer_gpu.lock();
  2554. qy_buf_offset = extra_src1->offset + src1->view_offs;
  2555. GGML_ASSERT(d_Qy != nullptr);
  2556. }
  2557. if (qx_needs_dequant) {
  2558. d_X = ctx->prealloc_x;
  2559. GGML_ASSERT(d_X->size >= x_sz * ne02 * ne03);
  2560. } else {
  2561. d_X = d_Qx;
  2562. x_buf_offset = qx_buf_offset;
  2563. GGML_ASSERT(qx_sz == x_sz);
  2564. }
  2565. if (qy_needs_dequant) {
  2566. d_Y = ctx->prealloc_y;
  2567. GGML_ASSERT(d_Y->size >= y_sz * ne02 * ne03);
  2568. } else {
  2569. d_Y = d_Qy;
  2570. y_buf_offset = qy_buf_offset;
  2571. GGML_ASSERT(qy_sz == y_sz);
  2572. }
  2573. vk_pipeline to_fp16_vk_0 = nullptr;
  2574. vk_pipeline to_fp16_vk_1 = nullptr;
  2575. if (x_non_contig) {
  2576. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0->type, GGML_TYPE_F16);
  2577. } else {
  2578. to_fp16_vk_0 = ggml_vk_get_to_fp16(ctx, src0->type);
  2579. }
  2580. if (y_non_contig) {
  2581. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1->type, GGML_TYPE_F16);
  2582. } else {
  2583. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  2584. }
  2585. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  2586. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  2587. // Allocate descriptor sets
  2588. ggml_pipeline_allocate_descriptor_sets(ctx->device, pipeline, 1);
  2589. if (qx_needs_dequant) {
  2590. ggml_pipeline_allocate_descriptor_sets(ctx->device, to_fp16_vk_0, 1);
  2591. }
  2592. if (qy_needs_dequant) {
  2593. ggml_pipeline_allocate_descriptor_sets(ctx->device, to_fp16_vk_1, 1);
  2594. }
  2595. if (split_k > 1) {
  2596. ggml_pipeline_allocate_descriptor_sets(ctx->device, ctx->device->pipeline_matmul_split_k_reduce, 1);
  2597. }
  2598. if (x_non_contig) {
  2599. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
  2600. } else if (qx_needs_dequant) {
  2601. const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) };
  2602. ggml_vk_sync_buffers(subctx);
  2603. ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0, { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, vk_subbuffer{ d_X, 0, x_sz * ne02 * ne03 } }, pc.size() * sizeof(uint32_t), pc.data(), { (uint32_t)(x_ne * ne02 * ne03), 1, 1});
  2604. }
  2605. if (y_non_contig) {
  2606. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
  2607. }
  2608. uint32_t stride_batch_x = ne00*ne01;
  2609. uint32_t stride_batch_y = ne10*ne11;
  2610. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  2611. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  2612. }
  2613. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  2614. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  2615. }
  2616. // compute
  2617. ggml_vk_matmul(
  2618. ctx, subctx, pipeline,
  2619. { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz * ne12 * ne13 },
  2620. { d_D, d_buf_offset, d_sz * ne12 * ne13 }, { ctx->prealloc_split_k, 0, d_sz * ne12 * ne13 * split_k },
  2621. ne01, ne11, ne10,
  2622. ne10, ne10, ne01, stride_batch_x, stride_batch_y, ne20*ne21,
  2623. split_k, ne12*ne13, ne02, ne12, r2, r3
  2624. ); // NOLINT
  2625. }
  2626. static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2627. VK_LOG_DEBUG("ggml_vk_mul_mat_vec_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2628. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2629. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  2630. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT
  2631. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  2632. const uint64_t ne00 = src0->ne[0];
  2633. const uint64_t ne01 = src0->ne[1];
  2634. const uint64_t ne02 = src0->ne[2];
  2635. const uint64_t ne03 = src0->ne[3];
  2636. const uint64_t ne10 = src1->ne[0];
  2637. const uint64_t ne11 = src1->ne[1];
  2638. const uint64_t ne12 = src1->ne[2];
  2639. const uint64_t ne13 = src1->ne[3];
  2640. GGML_ASSERT(ne11 == 1);
  2641. const uint64_t ne20 = dst->ne[0];
  2642. const uint64_t ne21 = dst->ne[1];
  2643. const uint64_t ne22 = dst->ne[2];
  2644. const uint64_t ne23 = dst->ne[3];
  2645. const uint64_t r2 = ne12 / ne02;
  2646. const uint64_t r3 = ne13 / ne03;
  2647. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2648. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2649. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  2650. vk_buffer d_Qx;
  2651. size_t qx_buf_offset = 0;
  2652. vk_buffer d_Qy;
  2653. size_t qy_buf_offset = 0;
  2654. bool src0_uma = false;
  2655. bool src1_uma = false;
  2656. if (ctx->device->uma) {
  2657. ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
  2658. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  2659. src0_uma = d_Qx != nullptr;
  2660. src1_uma = d_Qy != nullptr;
  2661. }
  2662. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  2663. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  2664. const bool f16_f32_kernel = src1->type == GGML_TYPE_F32;
  2665. const bool qx_needs_dequant = x_non_contig;
  2666. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig;
  2667. // Not implemented
  2668. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  2669. const uint64_t x_ne = ne01 * ne00;
  2670. const uint64_t y_ne = ne11 * ne10;
  2671. const uint64_t d_ne = ne11 * ne01;
  2672. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
  2673. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  2674. const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) : qx_sz;
  2675. const uint64_t y_sz = f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  2676. const uint64_t d_sz = sizeof(float) * d_ne;
  2677. vk_buffer d_D = extra->buffer_gpu.lock();
  2678. const uint64_t d_buf_offset = extra->offset + dst->view_offs;
  2679. GGML_ASSERT(d_D != nullptr);
  2680. vk_buffer d_X;
  2681. uint64_t x_buf_offset = 0;
  2682. vk_buffer d_Y;
  2683. uint64_t y_buf_offset = 0;
  2684. if(!src0_uma) {
  2685. d_Qx = extra_src0->buffer_gpu.lock();
  2686. qx_buf_offset = extra_src0->offset + src0->view_offs;
  2687. GGML_ASSERT(d_Qx != nullptr);
  2688. }
  2689. if(!src1_uma) {
  2690. d_Qy = extra_src1->buffer_gpu.lock();
  2691. qy_buf_offset = extra_src1->offset + src1->view_offs;
  2692. GGML_ASSERT(d_Qy != nullptr);
  2693. }
  2694. if (qx_needs_dequant) {
  2695. d_X = ctx->prealloc_x;
  2696. } else {
  2697. d_X = d_Qx;
  2698. x_buf_offset = qx_buf_offset;
  2699. GGML_ASSERT(qx_sz == x_sz);
  2700. }
  2701. if (qy_needs_dequant) {
  2702. d_Y = ctx->prealloc_y;
  2703. } else {
  2704. d_Y = d_Qy;
  2705. y_buf_offset = qy_buf_offset;
  2706. GGML_ASSERT(qy_sz == y_sz);
  2707. }
  2708. vk_pipeline to_fp16_vk_0 = nullptr;
  2709. vk_pipeline to_fp16_vk_1 = nullptr;
  2710. if (x_non_contig) {
  2711. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0->type, src0->type);
  2712. }
  2713. if (y_non_contig) {
  2714. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1->type, src1->type);
  2715. } else {
  2716. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  2717. }
  2718. vk_pipeline dmmv = ggml_vk_get_dequantize_mul_mat_vec(ctx, src0->type, src1->type);
  2719. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  2720. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  2721. GGML_ASSERT(dmmv != nullptr);
  2722. // Allocate descriptor sets
  2723. if (qx_needs_dequant) {
  2724. ggml_pipeline_allocate_descriptor_sets(ctx->device, to_fp16_vk_0, 1);
  2725. }
  2726. if (qy_needs_dequant) {
  2727. ggml_pipeline_allocate_descriptor_sets(ctx->device, to_fp16_vk_1, y_non_contig ? 1 : ne12 * ne13);
  2728. }
  2729. ggml_pipeline_allocate_descriptor_sets(ctx->device, dmmv, ne12 * ne13);
  2730. if (x_non_contig) {
  2731. GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment));
  2732. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
  2733. }
  2734. if (y_non_contig) {
  2735. GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne);
  2736. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
  2737. }
  2738. uint32_t stride_batch_x = ne00*ne01;
  2739. uint32_t stride_batch_y = ne10*ne11;
  2740. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  2741. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  2742. }
  2743. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  2744. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  2745. }
  2746. const uint32_t max_groups_x = ctx->device->properties.limits.maxComputeWorkGroupCount[0];
  2747. uint32_t groups_x = ne01;
  2748. uint32_t groups_z = 1;
  2749. if (ne01 > max_groups_x) {
  2750. groups_z = 64;
  2751. groups_x /= groups_z;
  2752. }
  2753. // compute
  2754. const vk_mat_vec_push_constants pc = {
  2755. (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01,
  2756. stride_batch_x, stride_batch_y, (uint32_t)(ne20*ne21),
  2757. (uint32_t)ne02, (uint32_t)ne12, (uint32_t)r2, (uint32_t)r3,
  2758. };
  2759. ggml_vk_sync_buffers(subctx);
  2760. ggml_vk_dispatch_pipeline(ctx, subctx, dmmv,
  2761. { vk_subbuffer{ d_X, x_buf_offset, x_sz * ne02 * ne03 }, vk_subbuffer{ d_Y, y_buf_offset, y_sz * ne12 * ne13 }, vk_subbuffer{ d_D, d_buf_offset, d_sz * ne22 * ne23} },
  2762. sizeof(vk_mat_vec_push_constants), &pc, { groups_x, (uint32_t)(ne12 * ne13), groups_z });
  2763. }
  2764. static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2765. VK_LOG_DEBUG("ggml_vk_mul_mat_p021_f16_f32(" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2766. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2767. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  2768. GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
  2769. GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // NOLINT
  2770. GGML_ASSERT(src1->nb[0] <= src1->nb[1] && src1->nb[2] <= src1->nb[3]); // NOLINT
  2771. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  2772. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  2773. const uint64_t ne00 = src0->ne[0];
  2774. const uint64_t ne01 = src0->ne[1];
  2775. const uint64_t ne02 = src0->ne[2];
  2776. // const uint64_t ne03 = src0->ne[3];
  2777. const uint64_t ne10 = src1->ne[0];
  2778. const uint64_t ne11 = src1->ne[1];
  2779. const uint64_t ne12 = src1->ne[2];
  2780. // const uint64_t ne13 = src1->ne[3];
  2781. GGML_ASSERT(ne11 == 1);
  2782. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2783. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2784. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  2785. vk_buffer d_Qy;
  2786. size_t qy_buf_offset = 0;
  2787. bool src1_uma = false;
  2788. if (ctx->device->uma) {
  2789. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  2790. src1_uma = d_Qy != nullptr;
  2791. }
  2792. const uint64_t x_ne = ne00 * ne01 * ne02;
  2793. const uint64_t y_ne = ne10 * ne11 * ne12;
  2794. const uint64_t d_ne = ne01 * ne11 * ne12;
  2795. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
  2796. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  2797. const uint64_t d_sz = sizeof(float) * d_ne;
  2798. vk_buffer d_D = extra->buffer_gpu.lock();
  2799. const uint64_t d_buf_offset = extra->offset + dst->view_offs;
  2800. GGML_ASSERT(d_D != nullptr);
  2801. vk_buffer d_Qx = extra_src0->buffer_gpu.lock();
  2802. const uint64_t qx_buf_offset = extra_src0->offset + src0->view_offs;
  2803. GGML_ASSERT(d_Qx != nullptr);
  2804. if (!src1_uma) {
  2805. d_Qy = extra_src1->buffer_gpu.lock();
  2806. qy_buf_offset = extra_src1->offset + src1->view_offs;
  2807. GGML_ASSERT(d_Qx != nullptr);
  2808. }
  2809. // Allocate descriptor sets
  2810. ggml_pipeline_allocate_descriptor_sets(ctx->device, ctx->device->pipeline_mul_mat_vec_p021_f16_f32, 1);
  2811. const uint64_t qy_buffer_offset = (qy_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  2812. const uint64_t qy_shader_offset = qy_buf_offset - qy_buffer_offset;
  2813. const uint64_t d_buffer_offset = (d_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  2814. const uint64_t d_shader_offset = d_buf_offset - d_buffer_offset;
  2815. // compute
  2816. const std::array<uint32_t, 6> pc = { (uint32_t)ne00, (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne12, (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) };
  2817. ggml_vk_sync_buffers(subctx);
  2818. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32, { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz }, vk_subbuffer{ d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, vk_subbuffer{ d_D, d_buffer_offset, d_sz + d_shader_offset } }, 6 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 });
  2819. }
  2820. static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2821. VK_LOG_DEBUG("ggml_vk_mul_mat_nc_f16_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2822. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2823. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  2824. GGML_ASSERT(!ggml_is_transposed(src0));
  2825. GGML_ASSERT(!ggml_is_transposed(src1));
  2826. GGML_ASSERT(!ggml_is_permuted(src0));
  2827. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  2828. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  2829. const uint64_t ne00 = src0->ne[0];
  2830. const uint64_t ne01 = src0->ne[1];
  2831. const uint64_t ne02 = src0->ne[2];
  2832. // const uint64_t ne03 = src0->ne[3];
  2833. const uint64_t nb01 = src0->nb[1];
  2834. const uint64_t nb02 = src0->nb[2];
  2835. // const uint64_t ne10 = src1->ne[0];
  2836. const uint64_t ne11 = src1->ne[1];
  2837. const uint64_t ne12 = src1->ne[2];
  2838. // const uint64_t ne13 = src1->ne[3];
  2839. GGML_ASSERT(ne11 == 1);
  2840. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2841. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2842. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  2843. vk_buffer d_Qy = nullptr;
  2844. size_t qy_buf_offset = 0;
  2845. bool src1_uma = false;
  2846. if (ctx->device->uma) {
  2847. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  2848. src1_uma = d_Qy != nullptr;
  2849. }
  2850. const uint64_t d_ne = ne01 * ne11 * ne12;
  2851. const uint32_t row_stride_x = nb01 / sizeof(ggml_fp16_t);
  2852. const uint32_t channel_stride_x = nb02 / sizeof(ggml_fp16_t);
  2853. const uint64_t qx_sz = ggml_nbytes(src0);
  2854. const uint64_t qy_sz = ggml_nbytes(src1);
  2855. const uint64_t d_sz = sizeof(float) * d_ne;
  2856. vk_buffer d_D = extra->buffer_gpu.lock();
  2857. const uint64_t d_buf_offset = extra->offset + dst->view_offs;
  2858. GGML_ASSERT(d_D != nullptr);
  2859. vk_buffer d_Qx = extra_src0->buffer_gpu.lock();
  2860. const uint64_t qx_buf_offset = extra_src0->offset + src0->view_offs;
  2861. GGML_ASSERT(d_Qx != nullptr);
  2862. if (!src1_uma) {
  2863. d_Qy = extra_src1->buffer_gpu.lock();
  2864. qy_buf_offset = extra_src1->offset + src1->view_offs;
  2865. GGML_ASSERT(d_Qx != nullptr);
  2866. }
  2867. // Allocate descriptor sets
  2868. ggml_pipeline_allocate_descriptor_sets(ctx->device, ctx->device->pipeline_mul_mat_vec_nc_f16_f32, 1);
  2869. const uint64_t qy_buffer_offset = (qy_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  2870. const uint64_t qy_shader_offset = qy_buf_offset - qy_buffer_offset;
  2871. const uint64_t d_buffer_offset = (d_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  2872. const uint64_t d_shader_offset = d_buf_offset - d_buffer_offset;
  2873. // compute
  2874. const std::array<uint32_t, 7> pc = { (uint32_t)ne00, (uint32_t)ne01, row_stride_x, channel_stride_x, (uint32_t)(ne12 / ne02), (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) };
  2875. ggml_vk_sync_buffers(subctx);
  2876. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32,
  2877. { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz }, vk_subbuffer{ d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, vk_subbuffer{ d_D, d_buffer_offset, d_sz + d_shader_offset } }, 7 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 });
  2878. }
  2879. static void ggml_vk_mul_mat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2880. VK_LOG_DEBUG("ggml_vk_mul_mat(" << src0 << ", " << src1 << ", " << dst << ")");
  2881. if (src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && dst->ne[1] == 1) {
  2882. ggml_vk_mul_mat_vec_p021_f16_f32(ctx, subctx, src0, src1, dst);
  2883. } else if (src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && dst->ne[1] == 1) {
  2884. ggml_vk_mul_mat_vec_nc_f16_f32(ctx, subctx, src0, src1, dst);
  2885. } else if (dst->ne[1] == 1 && (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type))) {
  2886. ggml_vk_mul_mat_vec_q_f16(ctx, subctx, src0, src1, dst);
  2887. } else {
  2888. ggml_vk_mul_mat_q_f16(ctx, subctx, src0, src1, dst);
  2889. }
  2890. }
  2891. static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst) {
  2892. VK_LOG_DEBUG("ggml_vk_mul_mat_id_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2893. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2894. std::cerr << "), (" << ids << ", name=" << ids->name << ", type=" << ids->type << ", ne0=" << ids->ne[0] << ", ne1=" << ids->ne[1] << ", ne2=" << ids->ne[2] << ", ne3=" << ids->ne[3] << ", nb0=" << ids->nb[0] << ", nb1=" << ids->nb[1] << ", nb2=" << ids->nb[2] << ", nb3=" << ids->nb[3];
  2895. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  2896. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  2897. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  2898. const uint64_t ne00 = src0->ne[0];
  2899. const uint64_t ne01 = src0->ne[1];
  2900. const uint64_t ne02 = src0->ne[2];
  2901. const uint64_t ne03 = src0->ne[3];
  2902. const uint64_t ne10 = src1->ne[0];
  2903. const uint64_t ne11 = src1->ne[1];
  2904. const uint64_t ne12 = src1->ne[2];
  2905. const uint64_t ne13 = src1->ne[3];
  2906. const uint64_t nei0 = ids->ne[0];
  2907. const uint64_t nei1 = ids->ne[1];
  2908. GGML_ASSERT(nei0 * nei1 <= 3072);
  2909. const uint32_t nbi1 = ids->nb[1];
  2910. const uint32_t nbi2 = ids->nb[2];
  2911. const uint64_t ne20 = dst->ne[0];
  2912. const uint64_t ne21 = dst->ne[1];
  2913. const uint64_t ne22 = dst->ne[2];
  2914. const uint64_t ne23 = dst->ne[3];
  2915. const uint64_t n_as = ne02;
  2916. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2917. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2918. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  2919. ggml_tensor_extra_gpu * extra_ids = (ggml_tensor_extra_gpu *) ids->extra;
  2920. vk_buffer d_Qx;
  2921. size_t qx_buf_offset = 0;
  2922. vk_buffer d_Qy;
  2923. size_t qy_buf_offset = 0;
  2924. vk_buffer d_ids;
  2925. size_t ids_buf_offset = 0;
  2926. bool src0_uma = false;
  2927. bool src1_uma = false;
  2928. bool ids_uma = false;
  2929. if (ctx->device->uma) {
  2930. ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
  2931. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  2932. ggml_vk_host_get(ctx->device, ids->data, d_ids, ids_buf_offset);
  2933. src0_uma = d_Qx != nullptr;
  2934. src1_uma = d_Qy != nullptr;
  2935. ids_uma = d_ids != nullptr;
  2936. }
  2937. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  2938. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  2939. const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig;
  2940. vk_matmul_pipeline mmp = ggml_vk_get_mul_mat_mat_id_pipeline(ctx, src0->type, y_non_contig ? GGML_TYPE_F16 : src1->type);
  2941. const bool qx_needs_dequant = mmp == nullptr || x_non_contig;
  2942. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !y_f32_kernel) || y_non_contig;
  2943. if (mmp == nullptr) {
  2944. GGML_ABORT("fatal error");
  2945. }
  2946. // Not implemented
  2947. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  2948. const uint64_t x_ne = ne01 * ne00;
  2949. const uint64_t y_ne = ne11 * ne10;
  2950. const uint64_t d_ne = ne21 * ne20;
  2951. const uint32_t kpad = ggml_vk_align_size(ne10, ggml_vk_guess_matmul_pipeline_align(ctx, mmp, ne01, nei1));
  2952. const bool aligned = ne10 == kpad && ne01 > 8 && nei1 > 8;
  2953. vk_pipeline pipeline = ggml_vk_guess_matmul_pipeline(ctx, mmp, ne01, nei1, aligned);
  2954. const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type);
  2955. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  2956. const uint64_t x_sz = !qx_needs_dequant ? qx_sz : sizeof(ggml_fp16_t) * x_ne;
  2957. const uint64_t y_sz = y_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  2958. const uint64_t ids_sz = nbi2;
  2959. const uint64_t d_sz = sizeof(float) * d_ne;
  2960. vk_buffer d_D = extra->buffer_gpu.lock();
  2961. const uint64_t d_buf_offset = extra->offset + dst->view_offs;
  2962. GGML_ASSERT(d_D != nullptr);
  2963. vk_buffer d_X;
  2964. uint64_t x_buf_offset = 0;
  2965. vk_buffer d_Y;
  2966. uint64_t y_buf_offset = 0;
  2967. if (!src0_uma) {
  2968. d_Qx = extra_src0->buffer_gpu.lock();
  2969. qx_buf_offset = extra_src0->offset + src0->view_offs;
  2970. GGML_ASSERT(d_Qx != nullptr);
  2971. }
  2972. if (!src1_uma) {
  2973. d_Qy = extra_src1->buffer_gpu.lock();
  2974. qy_buf_offset = extra_src1->offset + src1->view_offs;
  2975. GGML_ASSERT(d_Qy != nullptr);
  2976. }
  2977. if (!ids_uma) {
  2978. d_ids = extra_ids->buffer_gpu.lock();
  2979. ids_buf_offset = extra_ids->offset + ids->view_offs;
  2980. GGML_ASSERT(d_ids != nullptr);
  2981. }
  2982. if (qx_needs_dequant) {
  2983. d_X = ctx->prealloc_x;
  2984. GGML_ASSERT(d_X->size >= x_sz * ne02 * ne03);
  2985. } else {
  2986. d_X = d_Qx;
  2987. x_buf_offset = qx_buf_offset;
  2988. GGML_ASSERT(qx_sz == x_sz);
  2989. }
  2990. if (qy_needs_dequant) {
  2991. d_Y = ctx->prealloc_y;
  2992. GGML_ASSERT(d_Y->size >= y_sz * ne02 * ne03);
  2993. } else {
  2994. d_Y = d_Qy;
  2995. y_buf_offset = qy_buf_offset;
  2996. GGML_ASSERT(qy_sz == y_sz);
  2997. }
  2998. vk_pipeline to_fp16_vk_0 = nullptr;
  2999. vk_pipeline to_fp16_vk_1 = nullptr;
  3000. if (x_non_contig) {
  3001. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0->type, GGML_TYPE_F16);
  3002. } else {
  3003. to_fp16_vk_0 = ggml_vk_get_to_fp16(ctx, src0->type);
  3004. }
  3005. if (y_non_contig) {
  3006. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1->type, GGML_TYPE_F16);
  3007. } else {
  3008. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  3009. }
  3010. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  3011. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  3012. // Allocate descriptor sets
  3013. ggml_pipeline_allocate_descriptor_sets(ctx->device, pipeline, 1);
  3014. if (qx_needs_dequant) {
  3015. ggml_pipeline_allocate_descriptor_sets(ctx->device, to_fp16_vk_0, 1);
  3016. }
  3017. if (qy_needs_dequant) {
  3018. ggml_pipeline_allocate_descriptor_sets(ctx->device, to_fp16_vk_1, 1);
  3019. }
  3020. if (x_non_contig) {
  3021. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
  3022. } else if (qx_needs_dequant) {
  3023. const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) };
  3024. ggml_vk_sync_buffers(subctx);
  3025. ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0,
  3026. { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, vk_subbuffer{ d_X, 0, x_sz * ne02 * ne03 } }, pc.size() * sizeof(uint32_t), pc.data(), { (uint32_t)(x_ne * ne02 * ne03), 1, 1});
  3027. }
  3028. if (y_non_contig) {
  3029. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
  3030. }
  3031. uint32_t stride_batch_x = ne00*ne01;
  3032. uint32_t stride_batch_y = ne10*ne11;
  3033. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  3034. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  3035. }
  3036. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  3037. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  3038. }
  3039. // compute
  3040. ggml_vk_matmul_id(
  3041. ctx, subctx, pipeline,
  3042. { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz * ne12 * ne13 },
  3043. { d_D, d_buf_offset, d_sz * ne22 * ne23 }, { d_ids, ids_buf_offset, ids_sz },
  3044. ne01, ne21, ne10, ne10, ne10, ne01,
  3045. stride_batch_x, stride_batch_y, ne20*ne21,
  3046. n_as, nei0, nei1, nbi1 / ggml_type_size(ids->type), ne11
  3047. ); // NOLINT
  3048. }
  3049. static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst) {
  3050. VK_LOG_DEBUG("ggml_vk_mul_mat_vec_id_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  3051. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  3052. std::cerr << "), (" << ids << ", name=" << ids->name << ", type=" << ids->type << ", ne0=" << ids->ne[0] << ", ne1=" << ids->ne[1] << ", ne2=" << ids->ne[2] << ", ne3=" << ids->ne[3] << ", nb0=" << ids->nb[0] << ", nb1=" << ids->nb[1] << ", nb2=" << ids->nb[2] << ", nb3=" << ids->nb[3];
  3053. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  3054. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT
  3055. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  3056. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  3057. const uint64_t ne00 = src0->ne[0];
  3058. const uint64_t ne01 = src0->ne[1];
  3059. const uint64_t ne02 = src0->ne[2];
  3060. const uint64_t ne03 = src0->ne[3];
  3061. const uint64_t ne10 = src1->ne[0];
  3062. const uint64_t ne11 = src1->ne[1];
  3063. const uint64_t ne12 = src1->ne[2];
  3064. const uint64_t ne13 = src1->ne[3];
  3065. const uint64_t nei0 = ids->ne[0];
  3066. const uint64_t nei1 = ids->ne[1];
  3067. const uint64_t nbi2 = ids->nb[2];
  3068. GGML_ASSERT(nei1 == 1);
  3069. const uint64_t ne20 = dst->ne[0];
  3070. const uint64_t ne21 = dst->ne[1];
  3071. const uint64_t ne22 = dst->ne[2];
  3072. const uint64_t ne23 = dst->ne[3];
  3073. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  3074. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  3075. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  3076. ggml_tensor_extra_gpu * extra_ids = (ggml_tensor_extra_gpu *) ids->extra;
  3077. vk_buffer d_Qx;
  3078. size_t qx_buf_offset = 0;
  3079. vk_buffer d_Qy;
  3080. size_t qy_buf_offset = 0;
  3081. vk_buffer d_ids;
  3082. size_t ids_buf_offset = 0;
  3083. bool src0_uma = false;
  3084. bool src1_uma = false;
  3085. bool ids_uma = false;
  3086. if (ctx->device->uma) {
  3087. ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
  3088. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  3089. ggml_vk_host_get(ctx->device, ids->data, d_ids, ids_buf_offset);
  3090. src0_uma = d_Qx != nullptr;
  3091. src1_uma = d_Qy != nullptr;
  3092. ids_uma = d_ids != nullptr;
  3093. }
  3094. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  3095. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  3096. const bool f16_f32_kernel = src1->type == GGML_TYPE_F32;
  3097. const bool qx_needs_dequant = x_non_contig;
  3098. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig;
  3099. // Not implemented
  3100. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  3101. const uint64_t x_ne = ne01 * ne00;
  3102. const uint64_t y_ne = ne11 * ne10;
  3103. const uint64_t d_ne = ne21 * ne20;
  3104. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
  3105. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  3106. const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) : qx_sz;
  3107. const uint64_t y_sz = f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  3108. const uint64_t ids_sz = nbi2;
  3109. const uint64_t d_sz = sizeof(float) * d_ne;
  3110. vk_buffer d_D = extra->buffer_gpu.lock();
  3111. const uint64_t d_buf_offset = extra->offset + dst->view_offs;
  3112. GGML_ASSERT(d_D != nullptr);
  3113. vk_buffer d_X;
  3114. uint64_t x_buf_offset = 0;
  3115. vk_buffer d_Y;
  3116. uint64_t y_buf_offset = 0;
  3117. if(!src0_uma) {
  3118. d_Qx = extra_src0->buffer_gpu.lock();
  3119. qx_buf_offset = extra_src0->offset + src0->view_offs;
  3120. GGML_ASSERT(d_Qx != nullptr);
  3121. }
  3122. if(!src1_uma) {
  3123. d_Qy = extra_src1->buffer_gpu.lock();
  3124. qy_buf_offset = extra_src1->offset + src1->view_offs;
  3125. GGML_ASSERT(d_Qy != nullptr);
  3126. }
  3127. if(!ids_uma) {
  3128. d_ids = extra_ids->buffer_gpu.lock();
  3129. ids_buf_offset = extra_ids->offset + ids->view_offs;
  3130. GGML_ASSERT(d_ids != nullptr);
  3131. }
  3132. if (qx_needs_dequant) {
  3133. d_X = ctx->prealloc_x;
  3134. } else {
  3135. d_X = d_Qx;
  3136. x_buf_offset = qx_buf_offset;
  3137. GGML_ASSERT(qx_sz == x_sz);
  3138. }
  3139. if (qy_needs_dequant) {
  3140. d_Y = ctx->prealloc_y;
  3141. } else {
  3142. d_Y = d_Qy;
  3143. y_buf_offset = qy_buf_offset;
  3144. GGML_ASSERT(qy_sz == y_sz);
  3145. }
  3146. vk_pipeline to_fp16_vk_0 = nullptr;
  3147. vk_pipeline to_fp16_vk_1 = nullptr;
  3148. if (x_non_contig) {
  3149. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0->type, src0->type);
  3150. }
  3151. if (y_non_contig) {
  3152. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1->type, src1->type);
  3153. } else {
  3154. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  3155. }
  3156. vk_pipeline dmmv = ggml_vk_get_dequantize_mul_mat_vec_id(ctx, src0->type, src1->type);
  3157. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  3158. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  3159. GGML_ASSERT(dmmv != nullptr);
  3160. // Allocate descriptor sets
  3161. if (qx_needs_dequant) {
  3162. ggml_pipeline_allocate_descriptor_sets(ctx->device, to_fp16_vk_0, 1);
  3163. }
  3164. if (qy_needs_dequant) {
  3165. ggml_pipeline_allocate_descriptor_sets(ctx->device, to_fp16_vk_1, y_non_contig ? 1 : ne12 * ne13);
  3166. }
  3167. ggml_pipeline_allocate_descriptor_sets(ctx->device, dmmv, ne12 * ne13);
  3168. if (x_non_contig) {
  3169. GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment));
  3170. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
  3171. }
  3172. if (y_non_contig) {
  3173. GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne);
  3174. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
  3175. }
  3176. uint32_t stride_batch_y = ne10*ne11;
  3177. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  3178. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  3179. }
  3180. const uint32_t max_groups_x = ctx->device->properties.limits.maxComputeWorkGroupCount[0];
  3181. uint32_t groups_x = ne01;
  3182. uint32_t groups_z = 1;
  3183. if (ne01 > max_groups_x) {
  3184. groups_z = 64;
  3185. groups_x /= groups_z;
  3186. }
  3187. // compute
  3188. const vk_mat_vec_id_push_constants pc = {
  3189. (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01,
  3190. (uint32_t)x_ne, stride_batch_y, (uint32_t)(ne20*ne21),
  3191. (uint32_t)nei0, (uint32_t)ne11,
  3192. };
  3193. ggml_vk_sync_buffers(subctx);
  3194. ggml_vk_dispatch_pipeline(ctx, subctx, dmmv,
  3195. { vk_subbuffer{ d_X, x_buf_offset, x_sz * ne02 * ne03 },
  3196. vk_subbuffer{ d_Y, y_buf_offset, y_sz * ne12 * ne13 }, vk_subbuffer{ d_D, d_buf_offset, d_sz * ne22 * ne23}, vk_subbuffer{ d_ids, ids_buf_offset, ids_sz } },
  3197. sizeof(vk_mat_vec_id_push_constants), &pc, { groups_x, (uint32_t)nei0, groups_z });
  3198. }
  3199. static void ggml_vk_mul_mat_id(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) {
  3200. VK_LOG_DEBUG("ggml_vk_mul_mat_id(" << src0 << ", " << src1 << ", " << src2 << ", " << dst << ")");
  3201. if (src2->ne[1] == 1 && (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type))) {
  3202. ggml_vk_mul_mat_vec_id_q_f16(ctx, subctx, src0, src1, src2, dst);
  3203. } else {
  3204. ggml_vk_mul_mat_id_q_f16(ctx, subctx, src0, src1, src2, dst);
  3205. }
  3206. }
  3207. static void ggml_vk_op_repeat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3208. VK_LOG_DEBUG("ggml_vk_op_repeat(" << src0 << ", " << src1 << ", " << dst << ")");
  3209. const uint64_t ne0 = dst->ne[0];
  3210. const uint64_t ne1 = dst->ne[1];
  3211. const uint64_t ne2 = dst->ne[2];
  3212. const uint64_t ne3 = dst->ne[3];
  3213. const uint64_t ne00 = src0->ne[0];
  3214. const uint64_t ne01 = src0->ne[1];
  3215. const uint64_t ne02 = src0->ne[2];
  3216. const uint64_t ne03 = src0->ne[3];
  3217. const uint64_t nb0 = dst->nb[0];
  3218. const uint64_t nb1 = dst->nb[1];
  3219. const uint64_t nb2 = dst->nb[2];
  3220. const uint64_t nb3 = dst->nb[3];
  3221. const uint64_t nb00 = src0->nb[0];
  3222. const uint64_t nb01 = src0->nb[1];
  3223. const uint64_t nb02 = src0->nb[2];
  3224. const uint64_t nb03 = src0->nb[3];
  3225. // guaranteed to be an integer due to the check in ggml_can_repeat
  3226. const uint64_t nr0 = ne0/ne00;
  3227. const uint64_t nr1 = ne1/ne01;
  3228. const uint64_t nr2 = ne2/ne02;
  3229. const uint64_t nr3 = ne3/ne03;
  3230. // TODO: support for transposed / permuted tensors
  3231. GGML_ASSERT(nb0 == sizeof(float));
  3232. GGML_ASSERT(nb00 == sizeof(float));
  3233. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  3234. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  3235. const vk_buffer src_buf = extra_src0->buffer_gpu.lock();
  3236. const uint64_t src_offset = extra_src0->offset + src0->view_offs;
  3237. vk_buffer dst_buf = extra->buffer_gpu.lock();
  3238. const uint64_t dst_offset = extra->offset + dst->view_offs;
  3239. std::vector<vk::BufferCopy> copies;
  3240. for (uint64_t i3 = 0; i3 < nr3; i3++) {
  3241. for (uint64_t k3 = 0; k3 < ne03; k3++) {
  3242. for (uint64_t i2 = 0; i2 < nr2; i2++) {
  3243. for (uint64_t k2 = 0; k2 < ne02; k2++) {
  3244. for (uint64_t i1 = 0; i1 < nr1; i1++) {
  3245. for (uint64_t k1 = 0; k1 < ne01; k1++) {
  3246. for (uint64_t i0 = 0; i0 < nr0; i0++) {
  3247. copies.push_back({
  3248. src_offset + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01,
  3249. dst_offset + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0,
  3250. ne00*nb0,
  3251. });
  3252. }
  3253. }
  3254. }
  3255. }
  3256. }
  3257. }
  3258. }
  3259. ggml_vk_sync_buffers(subctx);
  3260. subctx->s->buffer.copyBuffer(src_buf->buffer, dst_buf->buffer, copies);
  3261. GGML_UNUSED(ctx);
  3262. GGML_UNUSED(src1);
  3263. }
  3264. static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, ggml_op op) {
  3265. switch (op) {
  3266. case GGML_OP_GET_ROWS:
  3267. GGML_ASSERT(src1->type == GGML_TYPE_I32);
  3268. if (dst->type == GGML_TYPE_F16) {
  3269. return ctx->device->pipeline_get_rows[src0->type];
  3270. }
  3271. if (dst->type == GGML_TYPE_F32) {
  3272. return ctx->device->pipeline_get_rows_f32[src0->type];
  3273. }
  3274. return nullptr;
  3275. case GGML_OP_ADD:
  3276. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3277. return ctx->device->pipeline_add_f32;
  3278. }
  3279. if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
  3280. return ctx->device->pipeline_add_f16_f32_f16;
  3281. }
  3282. return nullptr;
  3283. case GGML_OP_MUL:
  3284. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3285. return ctx->device->pipeline_mul_f32;
  3286. }
  3287. return nullptr;
  3288. case GGML_OP_DIV:
  3289. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3290. return ctx->device->pipeline_div_f32;
  3291. }
  3292. return nullptr;
  3293. case GGML_OP_CONCAT:
  3294. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3295. return ctx->device->pipeline_concat_f32;
  3296. }
  3297. if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  3298. return ctx->device->pipeline_concat_f16;
  3299. }
  3300. if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32 && dst->type == GGML_TYPE_I32) {
  3301. return ctx->device->pipeline_concat_i32;
  3302. }
  3303. return nullptr;
  3304. case GGML_OP_UPSCALE:
  3305. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3306. return ctx->device->pipeline_upscale_f32;
  3307. }
  3308. return nullptr;
  3309. case GGML_OP_SCALE:
  3310. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3311. return ctx->device->pipeline_scale_f32;
  3312. }
  3313. return nullptr;
  3314. case GGML_OP_SQR:
  3315. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3316. return ctx->device->pipeline_sqr_f32;
  3317. }
  3318. return nullptr;
  3319. case GGML_OP_CLAMP:
  3320. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3321. return ctx->device->pipeline_clamp_f32;
  3322. }
  3323. return nullptr;
  3324. case GGML_OP_PAD:
  3325. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3326. return ctx->device->pipeline_pad_f32;
  3327. }
  3328. return nullptr;
  3329. case GGML_OP_CPY:
  3330. case GGML_OP_CONT:
  3331. case GGML_OP_DUP:
  3332. return ggml_vk_get_cpy_pipeline(ctx, src0->type, dst->type);
  3333. case GGML_OP_NORM:
  3334. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3335. return ctx->device->pipeline_norm_f32;
  3336. }
  3337. return nullptr;
  3338. case GGML_OP_GROUP_NORM:
  3339. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3340. return ctx->device->pipeline_group_norm_f32;
  3341. }
  3342. return nullptr;
  3343. case GGML_OP_RMS_NORM:
  3344. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3345. return ctx->device->pipeline_rms_norm_f32;
  3346. }
  3347. return nullptr;
  3348. case GGML_OP_UNARY:
  3349. switch (ggml_get_unary_op(dst)) {
  3350. case GGML_UNARY_OP_SILU:
  3351. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3352. return ctx->device->pipeline_silu_f32;
  3353. }
  3354. break;
  3355. case GGML_UNARY_OP_GELU:
  3356. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3357. return ctx->device->pipeline_gelu_f32;
  3358. }
  3359. break;
  3360. case GGML_UNARY_OP_GELU_QUICK:
  3361. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3362. return ctx->device->pipeline_gelu_quick_f32;
  3363. }
  3364. break;
  3365. case GGML_UNARY_OP_RELU:
  3366. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3367. return ctx->device->pipeline_relu_f32;
  3368. }
  3369. break;
  3370. case GGML_UNARY_OP_TANH:
  3371. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3372. return ctx->device->pipeline_tanh_f32;
  3373. }
  3374. break;
  3375. default:
  3376. break;
  3377. }
  3378. return nullptr;
  3379. case GGML_OP_DIAG_MASK_INF:
  3380. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3381. return ctx->device->pipeline_diag_mask_inf_f32;
  3382. }
  3383. return nullptr;
  3384. case GGML_OP_SOFT_MAX:
  3385. GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16);
  3386. if (src0->type == GGML_TYPE_F32 && (src1 == nullptr || src1->type == GGML_TYPE_F32) && dst->type == GGML_TYPE_F32) {
  3387. return ctx->device->pipeline_soft_max_f32;
  3388. }
  3389. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
  3390. return ctx->device->pipeline_soft_max_f32_f16;
  3391. }
  3392. return nullptr;
  3393. case GGML_OP_ROPE:
  3394. {
  3395. const int mode = ((const int32_t *) dst->op_params)[2];
  3396. const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
  3397. if (is_neox) {
  3398. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3399. return ctx->device->pipeline_rope_neox_f32;
  3400. }
  3401. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  3402. return ctx->device->pipeline_rope_neox_f16;
  3403. }
  3404. } else {
  3405. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3406. return ctx->device->pipeline_rope_norm_f32;
  3407. }
  3408. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  3409. return ctx->device->pipeline_rope_norm_f16;
  3410. }
  3411. }
  3412. return nullptr;
  3413. }
  3414. case GGML_OP_ARGSORT:
  3415. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_I32) {
  3416. return ctx->device->pipeline_argsort_f32;
  3417. }
  3418. return nullptr;
  3419. case GGML_OP_SUM_ROWS:
  3420. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3421. return ctx->device->pipeline_sum_rows_f32;
  3422. }
  3423. return nullptr;
  3424. case GGML_OP_IM2COL:
  3425. if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3426. return ctx->device->pipeline_im2col_f32;
  3427. }
  3428. if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
  3429. return ctx->device->pipeline_im2col_f32_f16;
  3430. }
  3431. return nullptr;
  3432. case GGML_OP_TIMESTEP_EMBEDDING:
  3433. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3434. return ctx->device->pipeline_timestep_embedding_f32;
  3435. }
  3436. return nullptr;
  3437. case GGML_OP_LEAKY_RELU:
  3438. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3439. return ctx->device->pipeline_leaky_relu_f32;
  3440. }
  3441. return nullptr;
  3442. default:
  3443. return nullptr;
  3444. }
  3445. GGML_UNUSED(src2);
  3446. }
  3447. static ggml_vk_func_t ggml_vk_op_get_func(ggml_op op) {
  3448. switch(op) {
  3449. case GGML_OP_REPEAT:
  3450. return ggml_vk_op_repeat;
  3451. default:
  3452. return nullptr;
  3453. }
  3454. }
  3455. static bool ggml_vk_op_supports_incontiguous(ggml_op op) {
  3456. switch (op) {
  3457. case GGML_OP_CPY:
  3458. case GGML_OP_GET_ROWS:
  3459. case GGML_OP_ADD:
  3460. case GGML_OP_MUL:
  3461. case GGML_OP_DIV:
  3462. case GGML_OP_CONCAT:
  3463. case GGML_OP_UPSCALE:
  3464. case GGML_OP_SCALE:
  3465. case GGML_OP_SQR:
  3466. case GGML_OP_CLAMP:
  3467. case GGML_OP_PAD:
  3468. return true;
  3469. default:
  3470. return false;
  3471. }
  3472. }
  3473. template<typename PC>
  3474. static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, ggml_op op, const PC&& pc) {
  3475. VK_LOG_DEBUG("ggml_vk_op_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  3476. if (src1 != nullptr) {
  3477. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  3478. }
  3479. if (src2 != nullptr) {
  3480. std::cerr << "), (" << src2 << ", name=" << src2->name << ", type=" << src2->type << ", ne0=" << src2->ne[0] << ", ne1=" << src2->ne[1] << ", ne2=" << src2->ne[2] << ", ne3=" << src2->ne[3] << ", nb0=" << src2->nb[0] << ", nb1=" << src2->nb[1] << ", nb2=" << src2->nb[2] << ", nb3=" << src2->nb[3];
  3481. }
  3482. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "), " << ggml_op_name(op) << ")");
  3483. GGML_ASSERT(op == GGML_OP_GET_ROWS || (!ggml_is_quantized(src0->type) && (src1 == nullptr || !ggml_is_quantized(src1->type)))); // NOLINT
  3484. GGML_ASSERT(ggml_vk_op_supports_incontiguous(op) || ggml_vk_dim01_contiguous(src0)); // NOLINT
  3485. GGML_ASSERT(dst->extra != nullptr);
  3486. const uint64_t ne00 = src0->ne[0];
  3487. const uint64_t ne01 = src0->ne[1];
  3488. const uint64_t ne02 = src0->ne[2];
  3489. const uint64_t ne03 = src0->ne[3];
  3490. const uint64_t ne0 = ne00 * ne01;
  3491. const bool use_src1 = src1 != nullptr;
  3492. const uint64_t ne10 = use_src1 ? src1->ne[0] : 0;
  3493. const uint64_t ne11 = use_src1 ? src1->ne[1] : 0;
  3494. const uint64_t ne12 = use_src1 ? src1->ne[2] : 0;
  3495. const uint64_t ne13 = use_src1 ? src1->ne[3] : 0;
  3496. const uint64_t ne1 = ne10 * ne11;
  3497. // const uint64_t nb10 = use_src1 ? src1->nb[0] : 0;
  3498. const bool use_src2 = src2 != nullptr;
  3499. const uint64_t ne20 = use_src2 ? src2->ne[0] : 0;
  3500. const uint64_t ne21 = use_src2 ? src2->ne[1] : 0;
  3501. const uint64_t ne22 = use_src2 ? src2->ne[2] : 0;
  3502. const uint64_t ne23 = use_src2 ? src2->ne[3] : 0;
  3503. const uint64_t ne2 = ne20 * ne21;
  3504. const uint64_t ned0 = dst->ne[0];
  3505. const uint64_t ned1 = dst->ne[1];
  3506. const uint64_t ned2 = dst->ne[2];
  3507. const uint64_t ned3 = dst->ne[3];
  3508. const uint64_t ned = ned0 * ned1;
  3509. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, dst, op);
  3510. ggml_vk_func_t op_func;
  3511. if (pipeline == nullptr) {
  3512. op_func = ggml_vk_op_get_func(op);
  3513. if (op_func == nullptr) {
  3514. std::cerr << "ggml_vulkan: Error: Missing op: " << ggml_op_name(op) << " for " << ggml_type_name(src0->type);
  3515. if (src1 != nullptr) {
  3516. std::cerr << " and " << ggml_type_name(src1->type);
  3517. }
  3518. std::cerr << " to " << ggml_type_name(dst->type) << std::endl;
  3519. GGML_ABORT("fatal error");
  3520. }
  3521. op_func(ctx, subctx, src0, src1, dst);
  3522. return;
  3523. }
  3524. const bool op_supports_incontiguous = ggml_vk_op_supports_incontiguous(op);
  3525. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  3526. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  3527. ggml_tensor_extra_gpu * extra_src1 = use_src1 ? (ggml_tensor_extra_gpu *) src1->extra : nullptr;
  3528. ggml_tensor_extra_gpu * extra_src2 = use_src2 ? (ggml_tensor_extra_gpu *) src2->extra : nullptr;
  3529. vk_buffer d_X = nullptr;
  3530. size_t x_buf_offset = 0;
  3531. vk_buffer d_Y = nullptr;
  3532. size_t y_buf_offset = 0;
  3533. vk_buffer d_Z = nullptr;
  3534. size_t z_buf_offset = 0;
  3535. bool src0_uma = false;
  3536. bool src1_uma = false;
  3537. bool src2_uma = false;
  3538. if (ctx->device->uma) {
  3539. ggml_vk_host_get(ctx->device, src0->data, d_X, x_buf_offset);
  3540. src0_uma = d_X != nullptr;
  3541. if (use_src1) {
  3542. ggml_vk_host_get(ctx->device, src1->data, d_Y, y_buf_offset);
  3543. src1_uma = d_Y != nullptr;
  3544. }
  3545. if (use_src2) {
  3546. ggml_vk_host_get(ctx->device, src2->data, d_Z, z_buf_offset);
  3547. src2_uma = d_Z != nullptr;
  3548. }
  3549. }
  3550. uint64_t x_sz = ggml_type_size(src0->type)/ggml_blck_size(src0->type) * ne0;
  3551. uint64_t y_sz = use_src1 ? ggml_type_size(src1->type) * ne1 : 0;
  3552. uint64_t z_sz = use_src2 ? ggml_type_size(src2->type) * ne2 : 0;
  3553. uint64_t d_sz = ggml_type_size(dst->type) * ned;
  3554. vk_buffer d_D = extra->buffer_gpu.lock();
  3555. // Workaround for tiny tensor inputs on ROPE
  3556. if (op == GGML_OP_ROPE && use_src1 && y_sz > d_D->size) {
  3557. y_sz = VK_WHOLE_SIZE;
  3558. }
  3559. GGML_ASSERT(d_D != nullptr);
  3560. uint64_t d_buf_offset = ((extra->offset + dst->view_offs) / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  3561. GGML_ASSERT(d_buf_offset == extra->offset || op == GGML_OP_CPY); // NOLINT
  3562. if(!src0_uma) {
  3563. d_X = extra_src0->buffer_gpu.lock();
  3564. x_buf_offset = extra_src0->offset + src0->view_offs;
  3565. GGML_ASSERT(d_X != nullptr);
  3566. }
  3567. if (use_src1 && !src1_uma) {
  3568. d_Y = extra_src1->buffer_gpu.lock();
  3569. y_buf_offset = extra_src1->offset + src1->view_offs;
  3570. GGML_ASSERT(d_Y != nullptr);
  3571. }
  3572. if (use_src2 && !src2_uma) {
  3573. d_Z = extra_src2->buffer_gpu.lock();
  3574. z_buf_offset = extra_src2->offset + src2->view_offs;
  3575. GGML_ASSERT(d_Z != nullptr);
  3576. }
  3577. if (op_supports_incontiguous) {
  3578. x_sz = ggml_nbytes(src0);
  3579. y_sz = use_src1 ? ggml_nbytes(src1) : 0;
  3580. z_sz = use_src2 ? ggml_nbytes(src2) : 0;
  3581. d_sz = ggml_nbytes(dst);
  3582. if (x_buf_offset + x_sz >= d_X->size) {
  3583. x_sz = VK_WHOLE_SIZE;
  3584. }
  3585. if (use_src1 && y_buf_offset + y_sz >= d_Y->size) {
  3586. y_sz = VK_WHOLE_SIZE;
  3587. }
  3588. if (use_src2 && z_buf_offset + z_sz >= d_Z->size) {
  3589. z_sz = VK_WHOLE_SIZE;
  3590. }
  3591. if (d_buf_offset + d_sz >= d_D->size) {
  3592. d_sz = VK_WHOLE_SIZE;
  3593. }
  3594. }
  3595. std::array<uint32_t, 3> elements;
  3596. // Single call if dimension 2 is contiguous
  3597. if (op_supports_incontiguous || (ggml_is_contiguous(src0) && (src1 == nullptr || ggml_is_contiguous(src1)))) {
  3598. ggml_pipeline_allocate_descriptor_sets(ctx->device, pipeline, 1);
  3599. switch (op) {
  3600. case GGML_OP_NORM:
  3601. case GGML_OP_RMS_NORM:
  3602. case GGML_OP_SOFT_MAX:
  3603. case GGML_OP_SUM_ROWS:
  3604. {
  3605. const uint32_t nr = ggml_nrows(src0);
  3606. if (nr > 262144) {
  3607. elements = { 512, 512, CEIL_DIV(nr, 262144) };
  3608. } else if (nr > 512) {
  3609. elements = { 512, CEIL_DIV(nr, 512), 1 };
  3610. } else {
  3611. elements = { nr, 1, 1 };
  3612. }
  3613. } break;
  3614. case GGML_OP_GROUP_NORM:
  3615. {
  3616. const uint32_t num_groups = dst->op_params[0];
  3617. elements = { num_groups * (uint32_t)src0->ne[3], 1, 1 };
  3618. } break;
  3619. case GGML_OP_DIAG_MASK_INF:
  3620. case GGML_OP_ROPE:
  3621. elements = { (uint32_t)ggml_nrows(src0), (uint32_t)ne00, 1 };
  3622. break;
  3623. case GGML_OP_GET_ROWS:
  3624. elements = { (uint32_t)ne00, (uint32_t)ne10, (uint32_t)(ne11 * ne12) };
  3625. break;
  3626. case GGML_OP_ARGSORT:
  3627. elements = { (uint32_t)ne00, (uint32_t)ggml_nrows(src0), 1 };
  3628. break;
  3629. case GGML_OP_IM2COL:
  3630. {
  3631. const bool is_2D = dst->op_params[6] == 1;
  3632. const uint32_t IC = src1->ne[is_2D ? 2 : 1];
  3633. const uint32_t KH = is_2D ? src0->ne[1] : 1;
  3634. const uint32_t KW = src0->ne[0];
  3635. const uint32_t OH = is_2D ? dst->ne[2] : 1;
  3636. const uint32_t OW = dst->ne[1];
  3637. const uint32_t batch = src1->ne[3];
  3638. elements = { OW * KW * KH, OH, batch * IC };
  3639. } break;
  3640. case GGML_OP_TIMESTEP_EMBEDDING:
  3641. {
  3642. const uint32_t dim = dst->op_params[0];
  3643. uint32_t half_ceil = (dim + 1) / 2;
  3644. elements = { half_ceil, (uint32_t)src0->ne[0], 1 };
  3645. } break;
  3646. case GGML_OP_ADD:
  3647. case GGML_OP_DIV:
  3648. case GGML_OP_MUL:
  3649. case GGML_OP_SCALE:
  3650. case GGML_OP_SQR:
  3651. case GGML_OP_CLAMP:
  3652. case GGML_OP_PAD:
  3653. case GGML_OP_CPY:
  3654. case GGML_OP_CONCAT:
  3655. case GGML_OP_UPSCALE:
  3656. case GGML_OP_UNARY:
  3657. {
  3658. const uint32_t ne = ggml_nelements(dst);
  3659. if (ne > 262144) {
  3660. elements = { 512, 512, CEIL_DIV(ne, 262144) };
  3661. } else if (ne > 512) {
  3662. elements = { 512, CEIL_DIV(ne, 512), 1 };
  3663. } else {
  3664. elements = { ne, 1, 1 };
  3665. }
  3666. } break;
  3667. default:
  3668. elements = { (uint32_t)ggml_nelements(src0), 1, 1 };
  3669. break;
  3670. }
  3671. if (!op_supports_incontiguous) {
  3672. if (x_sz != VK_WHOLE_SIZE) {
  3673. x_sz *= ne02 * ne03;
  3674. }
  3675. if (use_src1 && y_sz != VK_WHOLE_SIZE) {
  3676. y_sz *= ne12 * ne13;
  3677. }
  3678. if (use_src2 && z_sz != VK_WHOLE_SIZE) {
  3679. z_sz *= ne22 * ne23;
  3680. }
  3681. if (d_sz != VK_WHOLE_SIZE) {
  3682. d_sz *= ned2 * ned3;
  3683. }
  3684. }
  3685. if (op == GGML_OP_SOFT_MAX) {
  3686. // Empty src1 is possible in soft_max, but the shader needs a buffer
  3687. vk_subbuffer subbuf_y;
  3688. if (use_src1) {
  3689. subbuf_y = { d_Y, y_buf_offset, y_sz };
  3690. } else {
  3691. subbuf_y = { d_X, 0, x_sz };
  3692. }
  3693. ggml_vk_sync_buffers(subctx);
  3694. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, subbuf_y, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3695. } else if (op == GGML_OP_ROPE) {
  3696. // Empty src2 is possible in rope, but the shader needs a buffer
  3697. vk_subbuffer subbuf_z;
  3698. if (use_src2) {
  3699. subbuf_z = { d_Z, z_buf_offset, z_sz };
  3700. } else {
  3701. subbuf_z = { d_X, 0, x_sz };
  3702. }
  3703. ggml_vk_sync_buffers(subctx);
  3704. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, subbuf_z, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3705. } else if (op == GGML_OP_IM2COL) {
  3706. // im2col uses only src1 and dst buffers
  3707. ggml_vk_sync_buffers(subctx);
  3708. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3709. } else if (use_src2) {
  3710. ggml_vk_sync_buffers(subctx);
  3711. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_Z, z_buf_offset, z_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3712. } else if (use_src1) {
  3713. ggml_vk_sync_buffers(subctx);
  3714. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3715. } else {
  3716. ggml_vk_sync_buffers(subctx);
  3717. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3718. }
  3719. } else {
  3720. GGML_ASSERT(op != GGML_OP_SOFT_MAX);
  3721. GGML_ASSERT(op != GGML_OP_ARGSORT);
  3722. GGML_ASSERT(!use_src2);
  3723. ggml_pipeline_allocate_descriptor_sets(ctx->device, pipeline, ne02 * ne03);
  3724. switch (op) {
  3725. case GGML_OP_NORM:
  3726. case GGML_OP_GROUP_NORM:
  3727. case GGML_OP_RMS_NORM:
  3728. elements = { (uint32_t)ne01, 1, 1 };
  3729. break;
  3730. case GGML_OP_DIAG_MASK_INF:
  3731. case GGML_OP_ROPE:
  3732. elements = { (uint32_t)ne01, (uint32_t)ne00, 1 };
  3733. break;
  3734. case GGML_OP_GET_ROWS:
  3735. elements = { (uint32_t)ne00, (uint32_t)ne10, (uint32_t)(ne11 * ne12) };
  3736. break;
  3737. default:
  3738. elements = { (uint32_t)ne0, 1, 1 };
  3739. break;
  3740. }
  3741. for (uint64_t i03 = 0; i03 < ne03; i03++) {
  3742. for (uint64_t i02 = 0; i02 < ne02; i02++) {
  3743. const uint32_t it_idx0 = (i03 * ne02 + i02);
  3744. const uint32_t it_idx1 = use_src1 ? ((i03 % ne13) * ne12 + (i02 % ne12)) : 0;
  3745. const uint32_t x_offset = x_sz * it_idx0;
  3746. const uint32_t y_offset = y_sz * it_idx1;
  3747. const uint32_t d_offset = d_sz * it_idx0;
  3748. if (use_src1) {
  3749. ggml_vk_sync_buffers(subctx);
  3750. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset + x_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset + y_offset, y_sz }, vk_subbuffer{ d_D, d_buf_offset + d_offset, d_sz } }, sizeof(PC), &pc, elements);
  3751. } else {
  3752. ggml_vk_sync_buffers(subctx);
  3753. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset + x_offset, x_sz }, vk_subbuffer{ d_D, d_buf_offset + d_offset, d_sz } }, sizeof(PC), &pc, elements);
  3754. }
  3755. }
  3756. }
  3757. }
  3758. }
  3759. static void ggml_vk_repeat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3760. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_REPEAT, {});
  3761. }
  3762. static void ggml_vk_get_rows(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3763. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3764. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3765. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3766. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_GET_ROWS, {
  3767. (uint32_t)ggml_nelements(src0),
  3768. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3769. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3770. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3771. 0,
  3772. 0.0f, 0.0f, 0,
  3773. });
  3774. }
  3775. static void ggml_vk_add(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3776. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3777. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3778. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3779. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_ADD, {
  3780. (uint32_t)ggml_nelements(src0),
  3781. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3782. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3783. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3784. 0,
  3785. 0.0f, 0.0f, 0,
  3786. });
  3787. }
  3788. static void ggml_vk_mul(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3789. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3790. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3791. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3792. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_MUL, {
  3793. (uint32_t)ggml_nelements(src0),
  3794. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3795. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3796. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3797. 0,
  3798. 0.0f, 0.0f, 0,
  3799. });
  3800. }
  3801. static void ggml_vk_div(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3802. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3803. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3804. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3805. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_DIV, {
  3806. (uint32_t)ggml_nelements(src0),
  3807. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3808. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3809. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3810. 0,
  3811. 0.0f, 0.0f, 0,
  3812. });
  3813. }
  3814. static void ggml_vk_concat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3815. int * op_params = (int *)dst->op_params;
  3816. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3817. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3818. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3819. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_CONCAT, {
  3820. (uint32_t)ggml_nelements(dst),
  3821. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3822. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3823. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3824. 0,
  3825. 0.0f, 0.0f, op_params[0],
  3826. });
  3827. }
  3828. static void ggml_vk_upscale(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3829. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3830. const float sf0 = (float)dst->ne[0] / src0->ne[0];
  3831. const float sf1 = (float)dst->ne[1] / src0->ne[1];
  3832. const float sf2 = (float)dst->ne[2] / src0->ne[2];
  3833. const float sf3 = (float)dst->ne[3] / src0->ne[3];
  3834. ggml_vk_op_f32<vk_op_upscale_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_UPSCALE, {
  3835. (uint32_t)ggml_nelements(dst), 0,
  3836. (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3837. (uint32_t)dst->ne[0], (uint32_t)dst->ne[1], (uint32_t)dst->ne[2],(uint32_t)dst->ne[3],
  3838. sf0, sf1, sf2, sf3,
  3839. });
  3840. }
  3841. static void ggml_vk_scale(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3842. float * op_params = (float *)dst->op_params;
  3843. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3844. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3845. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SCALE, {
  3846. (uint32_t)ggml_nelements(src0),
  3847. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3848. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3849. 0,
  3850. op_params[0], 0.0f
  3851. });
  3852. }
  3853. static void ggml_vk_sqr(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3854. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3855. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3856. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SQR, {
  3857. (uint32_t)ggml_nelements(src0),
  3858. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3859. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3860. 0,
  3861. 0.0f, 0.0f,
  3862. });
  3863. }
  3864. static void ggml_vk_clamp(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3865. float * op_params = (float *)dst->op_params;
  3866. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3867. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3868. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_CLAMP, {
  3869. (uint32_t)ggml_nelements(src0),
  3870. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3871. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3872. 0,
  3873. op_params[0], op_params[1],
  3874. });
  3875. }
  3876. static void ggml_vk_pad(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3877. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3878. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3879. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_PAD, {
  3880. (uint32_t)ggml_nelements(dst),
  3881. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3882. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3883. 0,
  3884. 0.0f, 0.0f,
  3885. });
  3886. }
  3887. static void ggml_vk_cpy(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3888. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  3889. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3890. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3891. const uint32_t d_offset = ((extra->offset + dst->view_offs) % ctx->device->properties.limits.minStorageBufferOffsetAlignment) / dst_type_size;
  3892. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_CPY, {
  3893. (uint32_t)ggml_nelements(src0),
  3894. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3895. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3896. d_offset,
  3897. 0.0f, 0.0f,
  3898. });
  3899. }
  3900. static void ggml_vk_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3901. float * op_params = (float *)dst->op_params;
  3902. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f });
  3903. }
  3904. static void ggml_vk_group_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3905. int * op_params = (int *)dst->op_params;
  3906. uint32_t num_groups = op_params[0];
  3907. uint32_t group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups);
  3908. static const float eps = 1e-6f;
  3909. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_GROUP_NORM, { group_size, 0, eps, 0.0f });
  3910. }
  3911. static void ggml_vk_rms_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3912. float * op_params = (float *)dst->op_params;
  3913. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_RMS_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f });
  3914. }
  3915. static void ggml_vk_unary(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3916. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_UNARY, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f });
  3917. }
  3918. static void ggml_vk_diag_mask_inf(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3919. int32_t * op_params = (int32_t *)dst->op_params;
  3920. ggml_vk_op_f32<vk_op_diag_mask_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_DIAG_MASK_INF, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0] });
  3921. }
  3922. static void ggml_vk_soft_max(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3923. float * op_params = (float *)dst->op_params;
  3924. float scale = op_params[0];
  3925. float max_bias = op_params[1];
  3926. const uint32_t ncols = (uint32_t)src0->ne[0];
  3927. const uint32_t nrows_x = (uint32_t)ggml_nrows(src0);
  3928. const uint32_t nrows_y = (uint32_t)src0->ne[1];
  3929. const uint32_t n_head_kv = nrows_x/nrows_y;
  3930. const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
  3931. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  3932. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  3933. ggml_vk_op_f32<vk_op_soft_max_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_SOFT_MAX, {
  3934. ncols,
  3935. src1 != nullptr ? nrows_y : (uint32_t)0,
  3936. scale, max_bias,
  3937. m0, m1,
  3938. n_head_log2,
  3939. });
  3940. }
  3941. static void ggml_vk_rope(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) {
  3942. const int n_dims = ((int32_t *) dst->op_params)[1];
  3943. // const int mode = ((int32_t *) dst->op_params)[2];
  3944. // const int n_ctx = ((int32_t *) dst->op_params)[3];
  3945. const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
  3946. const float freq_base = ((float *) dst->op_params)[5];
  3947. const float freq_scale = ((float *) dst->op_params)[6];
  3948. const float ext_factor = ((float *) dst->op_params)[7];
  3949. const float attn_factor = ((float *) dst->op_params)[8];
  3950. const float beta_fast = ((float *) dst->op_params)[9];
  3951. const float beta_slow = ((float *) dst->op_params)[10];
  3952. float corr_dims[2];
  3953. ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
  3954. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  3955. ggml_vk_op_f32<vk_op_rope_push_constants>(ctx, subctx, src0, src1, src2, dst, GGML_OP_ROPE, {
  3956. (uint32_t)src0->ne[0], (uint32_t)n_dims, freq_scale, (uint32_t)src0->ne[1],
  3957. freq_base, ext_factor, attn_factor, {corr_dims[0], corr_dims[1]}, theta_scale,
  3958. src2 != nullptr,
  3959. });
  3960. }
  3961. static void ggml_vk_argsort(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3962. int32_t * op_params = (int32_t *)dst->op_params;
  3963. uint32_t ncols = src0->ne[0];
  3964. uint32_t ncols_pad = 1;
  3965. while (ncols_pad < ncols) {
  3966. ncols_pad *= 2;
  3967. }
  3968. GGML_ASSERT(ncols_pad <= 1024);
  3969. ggml_vk_op_f32<vk_op_argsort_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_ARGSORT, {
  3970. ncols,
  3971. ncols_pad,
  3972. op_params[0],
  3973. });
  3974. }
  3975. static void ggml_vk_sum_rows(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3976. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SUM_ROWS, { (uint32_t)src0->ne[0], 0, 0.0f, 0.0f });
  3977. }
  3978. static void ggml_vk_im2col(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3979. const int32_t s0 = dst->op_params[0];
  3980. const int32_t s1 = dst->op_params[1];
  3981. const int32_t p0 = dst->op_params[2];
  3982. const int32_t p1 = dst->op_params[3];
  3983. const int32_t d0 = dst->op_params[4];
  3984. const int32_t d1 = dst->op_params[5];
  3985. const bool is_2D = dst->op_params[6] == 1;
  3986. const uint32_t IC = src1->ne[is_2D ? 2 : 1];
  3987. const uint32_t IH = is_2D ? src1->ne[1] : 1;
  3988. const uint32_t IW = src1->ne[0];
  3989. const uint32_t KH = is_2D ? src0->ne[1] : 1;
  3990. const uint32_t KW = src0->ne[0];
  3991. const uint32_t OH = is_2D ? dst->ne[2] : 1;
  3992. const uint32_t OW = dst->ne[1];
  3993. const uint32_t offset_delta = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32
  3994. const uint32_t batch_offset = src1->nb[3] / 4; // nb is byte offset, src is type float32
  3995. const uint32_t pelements = OW * KW * KH;
  3996. ggml_vk_op_f32<vk_op_im2col_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_IM2COL, {
  3997. batch_offset, offset_delta,
  3998. IC, IW, IH, OW, OH, KW, KH,
  3999. pelements,
  4000. IC * KH * KW,
  4001. s0, s1, p0, p1, d0, d1,
  4002. });
  4003. }
  4004. static void ggml_vk_timestep_embedding(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  4005. const uint32_t dim = dst->op_params[0];
  4006. const uint32_t max_period = dst->op_params[1];
  4007. const uint32_t nb1 = dst->nb[1] / ggml_type_size(dst->type);
  4008. ggml_vk_op_f32<vk_op_timestep_embedding_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_TIMESTEP_EMBEDDING, {
  4009. nb1, dim, max_period,
  4010. });
  4011. }
  4012. static void ggml_vk_leaky_relu(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  4013. const float * op_params = (const float *)dst->op_params;
  4014. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_LEAKY_RELU, { (uint32_t)ggml_nelements(src0), 0, op_params[0], 0.0f });
  4015. }
  4016. #ifdef GGML_VULKAN_RUN_TESTS
  4017. static void ggml_vk_print_matrix_area(const void * data, ggml_type type, int ne0, int ne1, int i0, int i1, int i2) {
  4018. if (type != GGML_TYPE_F32 && type != GGML_TYPE_F16) {
  4019. return;
  4020. }
  4021. i0 = std::max(i0, 5);
  4022. i1 = std::max(i1, 5);
  4023. i2 = std::max(i2, 0);
  4024. fprintf(stderr, " ");
  4025. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  4026. fprintf(stderr, "%7d ", idx1);
  4027. }
  4028. fprintf(stderr, "\n");
  4029. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  4030. fprintf(stderr, "%7d: ", idx0);
  4031. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  4032. if (idx0 >= 0 && idx0 < ne0 && idx1 >= 0 && idx1 < ne1) {
  4033. float val;
  4034. if (type == GGML_TYPE_F32) {
  4035. val = *((const float *) data + i2*ne1*ne0 + idx1*ne0 + idx0);
  4036. } else if (type == GGML_TYPE_F16) {
  4037. val = ggml_fp16_to_fp32(*((const ggml_fp16_t *) data + i2*ne1*ne0 + idx1*ne0 + idx0));
  4038. } else {
  4039. GGML_ABORT("fatal error");
  4040. }
  4041. fprintf(stderr, "% 7.2f ", val);
  4042. } else {
  4043. fprintf(stderr, " ");
  4044. }
  4045. }
  4046. fprintf(stderr, "\n");
  4047. }
  4048. }
  4049. template <typename X_TYPE, typename Y_TYPE>
  4050. static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t n, size_t k, size_t batch, size_t num_it, int split_k, int shader_size) {
  4051. VK_LOG_DEBUG("ggml_vk_test_matmul(" << m << ", " << n << ", " << k << ", " << batch << ", " << num_it << ", " << split_k << ", " << shader_size << ")");
  4052. const size_t x_ne = m * k * batch;
  4053. const size_t y_ne = k * n * batch;
  4054. const size_t d_ne = m * n * batch;
  4055. vk_pipeline p;
  4056. std::string shname;
  4057. if (shader_size == 0) {
  4058. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4059. p = ctx->device->pipeline_matmul_f32->a_s;
  4060. shname = "F32_ALIGNED_S";
  4061. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4062. p = ctx->device->pipeline_matmul_f32_f16->a_s;
  4063. shname = "F32_F16_ALIGNED_S";
  4064. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4065. p = ctx->device->pipeline_matmul_f16_f32->a_s;
  4066. shname = "F16_F32_ALIGNED_S";
  4067. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4068. p = ctx->device->pipeline_matmul_f16->a_s;
  4069. shname = "F16_ALIGNED_S";
  4070. } else {
  4071. GGML_ABORT("fatal error");
  4072. }
  4073. } else if (shader_size == 1) {
  4074. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4075. p = ctx->device->pipeline_matmul_f32->a_m;
  4076. shname = "F32_ALIGNED_M";
  4077. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4078. p = ctx->device->pipeline_matmul_f32_f16->a_m;
  4079. shname = "F32_F16_ALIGNED_M";
  4080. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4081. p = ctx->device->pipeline_matmul_f16_f32->a_m;
  4082. shname = "F16_F32_ALIGNED_M";
  4083. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4084. p = ctx->device->pipeline_matmul_f16->a_m;
  4085. shname = "F16_ALIGNED_M";
  4086. } else {
  4087. GGML_ABORT("fatal error");
  4088. }
  4089. } else if (shader_size == 2) {
  4090. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4091. p = ctx->device->pipeline_matmul_f32->a_l;
  4092. shname = "F32_ALIGNED_L";
  4093. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4094. p = ctx->device->pipeline_matmul_f32_f16->a_l;
  4095. shname = "F32_F16_ALIGNED_L";
  4096. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4097. p = ctx->device->pipeline_matmul_f16_f32->a_l;
  4098. shname = "F16_F32_ALIGNED_L";
  4099. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4100. p = ctx->device->pipeline_matmul_f16->a_l;
  4101. shname = "F16_ALIGNED_L";
  4102. } else {
  4103. GGML_ABORT("fatal error");
  4104. }
  4105. } else {
  4106. GGML_ASSERT(0);
  4107. }
  4108. const size_t kpad = ggml_vk_align_size(k, p->align);
  4109. if (k != kpad) {
  4110. if (shader_size == 0) {
  4111. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4112. p = ctx->device->pipeline_matmul_f32->s;
  4113. shname = "F32_S";
  4114. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4115. p = ctx->device->pipeline_matmul_f32_f16->s;
  4116. shname = "F32_F16_S";
  4117. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4118. p = ctx->device->pipeline_matmul_f16_f32->s;
  4119. shname = "F16_F32_S";
  4120. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4121. p = ctx->device->pipeline_matmul_f16->s;
  4122. shname = "F16_S";
  4123. }
  4124. } else if (shader_size == 1) {
  4125. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4126. p = ctx->device->pipeline_matmul_f32->m;
  4127. shname = "F32_M";
  4128. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4129. p = ctx->device->pipeline_matmul_f32_f16->m;
  4130. shname = "F32_F16_M";
  4131. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4132. p = ctx->device->pipeline_matmul_f16_f32->m;
  4133. shname = "F16_F32_M";
  4134. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4135. p = ctx->device->pipeline_matmul_f16->m;
  4136. shname = "F16_M";
  4137. }
  4138. } else if (shader_size == 2) {
  4139. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4140. p = ctx->device->pipeline_matmul_f32->l;
  4141. shname = "F32_L";
  4142. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4143. p = ctx->device->pipeline_matmul_f32_f16->l;
  4144. shname = "F32_F16_L";
  4145. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4146. p = ctx->device->pipeline_matmul_f16_f32->l;
  4147. shname = "F16_F32_L";
  4148. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4149. p = ctx->device->pipeline_matmul_f16->l;
  4150. shname = "F16_L";
  4151. }
  4152. }
  4153. }
  4154. ggml_pipeline_allocate_descriptor_sets(ctx->device, p, num_it);
  4155. if (split_k > 1) {
  4156. ggml_pipeline_allocate_descriptor_sets(ctx->device, ctx->device->pipeline_matmul_split_k_reduce, num_it);
  4157. if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) {
  4158. // Resize buffer
  4159. if (ctx->prealloc_split_k != nullptr) {
  4160. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  4161. }
  4162. ctx->prealloc_split_k = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne * split_k, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4163. }
  4164. }
  4165. vk_buffer d_X = ggml_vk_create_buffer_check(ctx->device, sizeof(X_TYPE) * x_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4166. vk_buffer d_Y = ggml_vk_create_buffer_check(ctx->device, sizeof(Y_TYPE) * y_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4167. vk_buffer d_D = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4168. X_TYPE* x = (X_TYPE *) malloc(sizeof(X_TYPE) * x_ne);
  4169. Y_TYPE* y = (Y_TYPE *) malloc(sizeof(Y_TYPE) * y_ne);
  4170. float* d = (float *) malloc(sizeof(float) * d_ne);
  4171. for (size_t i = 0; i < x_ne; i++) {
  4172. if (std::is_same<float, X_TYPE>()) {
  4173. x[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  4174. } else if (std::is_same<ggml_fp16_t, X_TYPE>()) {
  4175. x[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f);
  4176. } else {
  4177. GGML_ABORT("fatal error");
  4178. }
  4179. }
  4180. for (size_t i = 0; i < y_ne; i++) {
  4181. if (std::is_same<float, Y_TYPE>()) {
  4182. // y[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  4183. y[i] = (i % k == i / k) ? 1.0f : 0.0f;
  4184. } else if (std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4185. // y[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f);
  4186. y[i] = ggml_fp32_to_fp16((i % k == i / k) ? 1.0f : 0.0f);
  4187. } else {
  4188. GGML_ABORT("fatal error");
  4189. }
  4190. }
  4191. ggml_vk_buffer_write(d_X, 0, x, sizeof(X_TYPE) * k * m * batch);
  4192. ggml_vk_buffer_write(d_Y, 0, y, sizeof(Y_TYPE) * k * n * batch);
  4193. vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
  4194. for (size_t i = 0; i < num_it; i++) {
  4195. ggml_vk_ctx_begin(ctx->device, subctx);
  4196. ggml_vk_matmul(
  4197. ctx, subctx, p, ggml_vk_subbuffer(d_X), ggml_vk_subbuffer(d_Y), ggml_vk_subbuffer(d_D), ggml_vk_subbuffer(ctx->prealloc_split_k),
  4198. m, n, k,
  4199. k, k, m, k*m, k*n, m*n,
  4200. split_k, batch, batch, batch, 1, 1
  4201. );
  4202. ggml_vk_ctx_end(subctx);
  4203. }
  4204. auto begin = std::chrono::high_resolution_clock::now();
  4205. ggml_vk_submit(subctx, ctx->fence);
  4206. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_matmul waitForFences");
  4207. ctx->device->device.resetFences({ ctx->fence });
  4208. auto end = std::chrono::high_resolution_clock::now();
  4209. double time = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  4210. // copy dst to host
  4211. ggml_vk_buffer_read(d_D, 0, d, sizeof(float) * d_ne);
  4212. float * d_chk = (float *) malloc(sizeof(float) * d_ne);
  4213. ggml_init_params iparams = {
  4214. /*.mem_size =*/ 1024*1024*1024,
  4215. /*.mem_buffer =*/ NULL,
  4216. /*.no_alloc =*/ true,
  4217. };
  4218. ggml_context * ggml_ctx = ggml_init(iparams);
  4219. ggml_type src0_type;
  4220. ggml_type src1_type;
  4221. if (std::is_same<float, X_TYPE>()) {
  4222. src0_type = GGML_TYPE_F32;
  4223. } else if (std::is_same<ggml_fp16_t, X_TYPE>()) {
  4224. src0_type = GGML_TYPE_F16;
  4225. } else {
  4226. GGML_ABORT("fatal error");
  4227. }
  4228. if (std::is_same<float, Y_TYPE>()) {
  4229. src1_type = GGML_TYPE_F32;
  4230. } else if (std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4231. src1_type = GGML_TYPE_F16;
  4232. } else {
  4233. GGML_ABORT("fatal error");
  4234. }
  4235. ggml_tensor * src0_ggml = ggml_new_tensor_3d(ggml_ctx, src0_type, k, m, batch);
  4236. ggml_tensor * src1_ggml = ggml_new_tensor_3d(ggml_ctx, src1_type, k, n, batch);
  4237. ggml_tensor * tensor_ggml = ggml_mul_mat(ggml_ctx, src0_ggml, src1_ggml);
  4238. src0_ggml->data = x;
  4239. src1_ggml->data = y;
  4240. tensor_ggml->data = d_chk;
  4241. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  4242. ggml_build_forward_expand(cgraph, tensor_ggml);
  4243. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 1);
  4244. ggml_free(ggml_ctx);
  4245. double avg_err = 0.0;
  4246. int first_err_n = -1;
  4247. int first_err_m = -1;
  4248. int first_err_b = -1;
  4249. for (size_t i = 0; i < m*n*batch; i++) {
  4250. double err = std::fabs(d[i] - d_chk[i]);
  4251. avg_err += err;
  4252. if (err > 0.05f && first_err_n == -1) {
  4253. first_err_b = i / (m * n);
  4254. first_err_n = (i % (m * n)) / m;
  4255. first_err_m = (i % (m * n)) % m;
  4256. }
  4257. }
  4258. avg_err /= m * n;
  4259. std::cerr << "TEST " << shname << " m=" << m << " n=" << n << " k=" << k << " batch=" << batch << " split_k=" << split_k << " matmul " << time / num_it << "ms avg_err=" << avg_err << std::endl;
  4260. if (avg_err > 0.1) {
  4261. std::cerr << "m = " << first_err_m << " n = " << first_err_n << " b = " << first_err_b << std::endl;
  4262. std::cerr << "Actual result: " << std::endl << std::endl;
  4263. ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4264. std::cerr << std::endl;
  4265. ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n + 15, first_err_b);
  4266. std::cerr << "Expected result: " << std::endl << std::endl;
  4267. ggml_vk_print_matrix_area(d_chk, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4268. if (split_k > 1) {
  4269. float * split_k_buf = (float *) malloc(sizeof(float) * d_ne * split_k);
  4270. ggml_vk_buffer_read(ctx->prealloc_split_k, 0, split_k_buf, sizeof(float) * d_ne * split_k);
  4271. std::cerr << "d_buf0: " << std::endl << std::endl;
  4272. ggml_vk_print_matrix_area(split_k_buf, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4273. std::cerr << "d_buf1: " << std::endl << std::endl;
  4274. ggml_vk_print_matrix_area(split_k_buf + d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4275. std::cerr << "d_buf2: " << std::endl << std::endl;
  4276. ggml_vk_print_matrix_area(split_k_buf + 2 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4277. std::cerr << "d_buf3: " << std::endl << std::endl;
  4278. ggml_vk_print_matrix_area(split_k_buf + 3 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4279. free(split_k_buf);
  4280. }
  4281. }
  4282. free(d_chk);
  4283. ggml_vk_queue_cleanup(ctx->device, ctx->device->transfer_queue);
  4284. ggml_vk_queue_cleanup(ctx->device, ctx->device->compute_queue);
  4285. ggml_vk_destroy_buffer(d_X);
  4286. ggml_vk_destroy_buffer(d_Y);
  4287. ggml_vk_destroy_buffer(d_D);
  4288. ggml_pipeline_cleanup(p);
  4289. ggml_pipeline_cleanup(ctx->device->pipeline_matmul_split_k_reduce);
  4290. free(x);
  4291. free(y);
  4292. free(d);
  4293. }
  4294. static void ggml_vk_print_tensor_area(const ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  4295. if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16) {
  4296. return;
  4297. }
  4298. i0 = std::max(i0, 5);
  4299. i1 = std::max(i1, 5);
  4300. i2 = std::max(i2, 0);
  4301. i3 = std::max(i3, 0);
  4302. fprintf(stderr, " ");
  4303. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  4304. fprintf(stderr, "%7d ", idx1);
  4305. }
  4306. fprintf(stderr, "\n");
  4307. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  4308. fprintf(stderr, "%7d: ", idx0);
  4309. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  4310. if (idx0 >= 0 && idx0 < tensor->ne[0] && idx1 >= 0 && idx1 < tensor->ne[1] && i2 >= 0 && i2 < tensor->ne[2] && i3 >= 0 && i3 < tensor->ne[3]) {
  4311. float val;
  4312. if (tensor->type == GGML_TYPE_F32) {
  4313. val = *(float *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  4314. } else if (tensor->type == GGML_TYPE_F16) {
  4315. val = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]));
  4316. } else {
  4317. GGML_ABORT("fatal error");
  4318. }
  4319. fprintf(stderr, "% 7.2f ", val);
  4320. } else {
  4321. fprintf(stderr, " ");
  4322. }
  4323. }
  4324. fprintf(stderr, "\n");
  4325. }
  4326. }
  4327. static void ggml_vk_quantize_data(const float * from, void * to, size_t ne, ggml_type quant) {
  4328. ggml_quantize_chunk(quant, from, to, 0, 1, ne, nullptr);
  4329. }
  4330. static void ggml_vk_dequantize_data(const void * from, float * to, size_t ne, ggml_type quant) {
  4331. if (quant == GGML_TYPE_F32) {
  4332. memcpy(to, from, sizeof(float) * ne);
  4333. return;
  4334. }
  4335. ggml_type_traits_t tt = ggml_internal_get_type_traits(quant);
  4336. ggml_to_float_t dequant_fn = tt.to_float;
  4337. dequant_fn(from, to, ne);
  4338. }
  4339. static void ggml_vk_test_dequant(ggml_backend_vk_context * ctx, size_t ne, ggml_type quant) {
  4340. VK_LOG_DEBUG("ggml_vk_test_dequant(" << ne << ")");
  4341. const size_t x_sz = sizeof(float) * ne;
  4342. const size_t x_sz_f16 = sizeof(ggml_fp16_t) * ne;
  4343. const size_t qx_sz = ne * ggml_type_size(quant)/ggml_blck_size(quant);
  4344. float * x = (float *) malloc(x_sz);
  4345. void * qx = malloc(qx_sz);
  4346. vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx->device, qx_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4347. vk_buffer x_buf = ggml_vk_create_buffer_check(ctx->device, x_sz_f16, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4348. float * x_ref = (float *) malloc(x_sz);
  4349. ggml_fp16_t * x_chk = (ggml_fp16_t *) malloc(x_sz_f16);
  4350. for (size_t i = 0; i < ne; i++) {
  4351. x[i] = rand() / (float)RAND_MAX;
  4352. }
  4353. vk_pipeline p = ggml_vk_get_to_fp16(ctx, quant);
  4354. ggml_vk_quantize_data(x, qx, ne, quant);
  4355. ggml_vk_dequantize_data(qx, x_ref, ne, quant);
  4356. ggml_pipeline_allocate_descriptor_sets(ctx->device, p, 1);
  4357. ggml_vk_buffer_write(qx_buf, 0, qx, qx_sz);
  4358. vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
  4359. ggml_vk_ctx_begin(ctx->device, subctx);
  4360. const std::vector<uint32_t> pc = { 1, (uint32_t)ne, (uint32_t)ne, (uint32_t)ne, (uint32_t)ne };
  4361. ggml_vk_dispatch_pipeline(ctx, subctx, p, { { qx_buf, 0, qx_sz }, { x_buf, 0, x_sz_f16 } }, pc.size() * sizeof(int), pc.data(), { (uint32_t)ne, 1, 1});
  4362. ggml_vk_ctx_end(subctx);
  4363. auto begin = std::chrono::high_resolution_clock::now();
  4364. ggml_vk_submit(subctx, ctx->fence);
  4365. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences");
  4366. ctx->device->device.resetFences({ ctx->fence });
  4367. auto end = std::chrono::high_resolution_clock::now();
  4368. double ms_dequant = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  4369. ggml_vk_buffer_read(x_buf, 0, x_chk, x_sz_f16);
  4370. int first_err = -1;
  4371. double avg_err = 0.0;
  4372. for (size_t i = 0; i < ne; i++) {
  4373. double error = std::fabs(x_ref[i] - ggml_fp16_to_fp32(x_chk[i]));
  4374. avg_err += error;
  4375. if (first_err < 0 && error > 0.05) {
  4376. first_err = i;
  4377. }
  4378. }
  4379. avg_err /= ne;
  4380. std::cerr << "TEST DEQUANT " << ggml_type_name(quant) << " time=" << ms_dequant << "ms avg_err=" << avg_err << std::endl;
  4381. if (avg_err > 0.1) {
  4382. std::cerr << "first_error = " << first_err << std::endl;
  4383. std::cerr << "Actual result: " << std::endl << std::endl;
  4384. for (int i = std::max(0, first_err - 5); i < std::min((int)ne, first_err + 5); i++) {
  4385. std::cerr << ggml_fp16_to_fp32(x_chk[i]) << ", ";
  4386. }
  4387. std::cerr << std::endl << "Expected result: " << std::endl << std::endl;
  4388. for (int i = std::max(0, first_err - 5); i < std::min((int)ne, first_err + 5); i++) {
  4389. std::cerr << x_ref[i] << ", ";
  4390. }
  4391. std::cerr << std::endl;
  4392. }
  4393. ggml_vk_destroy_buffer(x_buf);
  4394. ggml_vk_destroy_buffer(qx_buf);
  4395. free(x);
  4396. free(qx);
  4397. free(x_ref);
  4398. free(x_chk);
  4399. }
  4400. static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, size_t n, size_t k, size_t batch, size_t num_it, size_t split_k, size_t shader_size, ggml_type quant) {
  4401. VK_LOG_DEBUG("ggml_vk_test_dequant_matmul(" << m << ", " << n << ", " << k << ", " << batch << ", " << num_it << ", " << split_k << ", " << ggml_type_name(quant) << ")");
  4402. const size_t x_ne = m * k * batch;
  4403. const size_t y_ne = k * n * batch;
  4404. const size_t d_ne = m * n * batch;
  4405. vk_pipeline p;
  4406. std::string shname;
  4407. if (shader_size == 0) {
  4408. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->a_s;
  4409. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_S";
  4410. } else if (shader_size == 1) {
  4411. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->a_m;
  4412. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_M";
  4413. } else if (shader_size == 2) {
  4414. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->a_l;
  4415. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_L";
  4416. } else {
  4417. GGML_ASSERT(0);
  4418. }
  4419. const size_t kpad = ggml_vk_align_size(k, p->align);
  4420. if (k != kpad) {
  4421. if (shader_size == 0) {
  4422. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->s;
  4423. shname = std::string(ggml_type_name(quant)) + "_S";
  4424. } else if (shader_size == 1) {
  4425. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->m;
  4426. shname = std::string(ggml_type_name(quant)) + "_M";
  4427. } else if (shader_size == 2) {
  4428. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->l;
  4429. shname = std::string(ggml_type_name(quant)) + "_L";
  4430. } else {
  4431. GGML_ASSERT(0);
  4432. }
  4433. }
  4434. const size_t x_sz = sizeof(float) * x_ne;
  4435. const size_t y_sz = sizeof(float) * y_ne;
  4436. const size_t qx_sz = x_ne * ggml_type_size(quant)/ggml_blck_size(quant);
  4437. const size_t d_sz = sizeof(float) * d_ne;
  4438. float * x = (float *) malloc(x_sz);
  4439. float * y = (float *) malloc(y_sz);
  4440. void * qx = malloc(qx_sz);
  4441. vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx->device, qx_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4442. vk_buffer y_buf = ggml_vk_create_buffer_check(ctx->device, y_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4443. vk_buffer d_buf = ggml_vk_create_buffer_check(ctx->device, d_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4444. float * d = (float *) malloc(d_sz);
  4445. float * d_chk = (float *) malloc(d_sz);
  4446. for (size_t i = 0; i < x_ne; i++) {
  4447. x[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  4448. }
  4449. ggml_vk_quantize_data(x, qx, x_ne, quant);
  4450. for (size_t i = 0; i < y_ne; i++) {
  4451. // y[i] = rand() / (float)RAND_MAX;
  4452. y[i] = (i % k == i / k) ? 1.0f : 0.0f;
  4453. }
  4454. ggml_pipeline_allocate_descriptor_sets(ctx->device, p, num_it);
  4455. if (split_k > 1) {
  4456. ggml_pipeline_allocate_descriptor_sets(ctx->device, ctx->device->pipeline_matmul_split_k_reduce, num_it);
  4457. if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) {
  4458. // Resize buffer
  4459. if (ctx->prealloc_split_k != nullptr) {
  4460. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  4461. }
  4462. ctx->prealloc_split_k = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne * split_k, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4463. }
  4464. }
  4465. ggml_vk_buffer_write(qx_buf, 0, qx, qx_sz);
  4466. ggml_vk_buffer_write(y_buf, 0, y, y_sz);
  4467. vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
  4468. for (size_t i = 0; i < num_it; i++) {
  4469. ggml_vk_ctx_begin(ctx->device, subctx);
  4470. ggml_vk_matmul(
  4471. ctx, subctx, p, ggml_vk_subbuffer(qx_buf), ggml_vk_subbuffer(y_buf), ggml_vk_subbuffer(d_buf), ggml_vk_subbuffer(ctx->prealloc_split_k),
  4472. m, n, k,
  4473. k, k, m, k*m, k*n, m*n,
  4474. split_k, batch, batch, batch, 1, 1
  4475. );
  4476. ggml_vk_ctx_end(subctx);
  4477. }
  4478. auto begin = std::chrono::high_resolution_clock::now();
  4479. ggml_vk_submit(subctx, ctx->fence);
  4480. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences");
  4481. ctx->device->device.resetFences({ ctx->fence });
  4482. auto end = std::chrono::high_resolution_clock::now();
  4483. double time_ms = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  4484. ggml_vk_buffer_read(d_buf, 0, d, d_sz);
  4485. ggml_init_params iparams = {
  4486. /*.mem_size =*/ 1024*1024*1024,
  4487. /*.mem_buffer =*/ NULL,
  4488. /*.no_alloc =*/ true,
  4489. };
  4490. ggml_context * ggml_ctx = ggml_init(iparams);
  4491. ggml_tensor * src0_ggml = ggml_new_tensor_3d(ggml_ctx, quant, k, m, batch);
  4492. ggml_tensor * src1_ggml = ggml_new_tensor_3d(ggml_ctx, GGML_TYPE_F32, k, n, batch);
  4493. ggml_tensor * tensor_ggml = ggml_mul_mat(ggml_ctx, src0_ggml, src1_ggml);
  4494. src0_ggml->data = qx;
  4495. src1_ggml->data = y;
  4496. tensor_ggml->data = d_chk;
  4497. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  4498. ggml_build_forward_expand(cgraph, tensor_ggml);
  4499. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 1);
  4500. ggml_free(ggml_ctx);
  4501. double avg_err = 0.0;
  4502. int first_err_n = -1;
  4503. int first_err_m = -1;
  4504. int first_err_b = -1;
  4505. for (size_t i = 0; i < m*n*batch; i++) {
  4506. double err = std::fabs(d[i] - d_chk[i]);
  4507. avg_err += err;
  4508. if ((err > 0.05f || std::isnan(err)) && first_err_n == -1) {
  4509. first_err_b = i / (m * n);
  4510. first_err_n = (i % (m * n)) / m;
  4511. first_err_m = (i % (m * n)) % m;
  4512. }
  4513. }
  4514. avg_err /= m * n;
  4515. std::cerr << "TEST MMQ " << shname << " m=" << m << " n=" << n << " k=" << k << " batch=" << batch << " split_k=" << split_k << " matmul " << time_ms / num_it << "ms avg_err=" << avg_err << std::endl;
  4516. if (avg_err > 0.01 || std::isnan(avg_err)) {
  4517. std::cerr << "m = " << first_err_m << " n = " << first_err_n << " b = " << first_err_b << std::endl;
  4518. std::cerr << "Actual result: " << std::endl << std::endl;
  4519. ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4520. std::cerr << std::endl;
  4521. std::cerr << "Expected result: " << std::endl << std::endl;
  4522. ggml_vk_print_matrix_area(d_chk, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4523. if (split_k > 1) {
  4524. float * split_k_buf = (float *) malloc(sizeof(float) * d_ne * split_k);
  4525. ggml_vk_buffer_read(ctx->prealloc_split_k, 0, split_k_buf, sizeof(float) * d_ne * split_k);
  4526. std::cerr << "d_buf0: " << std::endl << std::endl;
  4527. ggml_vk_print_matrix_area(split_k_buf, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4528. std::cerr << "d_buf1: " << std::endl << std::endl;
  4529. ggml_vk_print_matrix_area(split_k_buf + d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4530. std::cerr << "d_buf2: " << std::endl << std::endl;
  4531. ggml_vk_print_matrix_area(split_k_buf + 2 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4532. std::cerr << "d_buf3: " << std::endl << std::endl;
  4533. ggml_vk_print_matrix_area(split_k_buf + 3 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4534. free(split_k_buf);
  4535. }
  4536. }
  4537. ggml_vk_destroy_buffer(qx_buf);
  4538. ggml_vk_destroy_buffer(y_buf);
  4539. ggml_vk_destroy_buffer(d_buf);
  4540. free(x);
  4541. free(qx);
  4542. free(y);
  4543. free(d);
  4544. free(d_chk);
  4545. }
  4546. #endif
  4547. static ggml_tensor_extra_gpu * ggml_vk_tensor_create_extra(ggml_tensor * tensor) {
  4548. VK_LOG_DEBUG("ggml_vk_create_extra(" << tensor << " (" << tensor->name << ", " << ggml_op_name(tensor->op) << "))");
  4549. ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu;
  4550. extra->reset();
  4551. tensor->extra = extra;
  4552. return extra;
  4553. }
  4554. static void ggml_vk_preallocate_buffers_graph(ggml_backend_vk_context * ctx, ggml_tensor * node){
  4555. VK_LOG_DEBUG("ggml_vk_preallocate_buffers_graph(" << node << ")");
  4556. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) node->extra;
  4557. if (extra == nullptr) {
  4558. return;
  4559. }
  4560. ggml_tensor * src0 = node->src[0];
  4561. ggml_tensor * src1 = node->src[1];
  4562. const bool use_src0 = src0 != nullptr;
  4563. const int64_t ne00 = use_src0 ? src0->ne[0] : 0;
  4564. const int64_t ne01 = use_src0 ? src0->ne[1] : 0;
  4565. const int64_t ne02 = use_src0 ? src0->ne[2] : 0;
  4566. const int64_t ne03 = use_src0 ? src0->ne[3] : 0;
  4567. const bool use_src1 = src1 != nullptr && node->op != GGML_OP_CPY && node->op != GGML_OP_CONT && node->op != GGML_OP_DUP;
  4568. const int64_t ne10 = use_src1 ? src1->ne[0] : 0;
  4569. const int64_t ne11 = use_src1 ? src1->ne[1] : 0;
  4570. const int64_t ne12 = use_src1 ? src1->ne[2] : 0;
  4571. const int64_t ne13 = use_src1 ? src1->ne[3] : 0;
  4572. const int64_t ne20 = node->ne[0];
  4573. const int64_t ne21 = node->ne[1];
  4574. const int64_t ne22 = node->ne[2];
  4575. const int64_t ne23 = node->ne[3];
  4576. const ggml_type src0_type = (use_src0 && src0->type == GGML_TYPE_F32) ? src0->type : GGML_TYPE_F16;
  4577. const ggml_type src1_type = (use_src1 && src1->type == GGML_TYPE_F32) ? src1->type : GGML_TYPE_F16;
  4578. const bool x_non_contig = use_src0 && !ggml_vk_dim01_contiguous(src0);
  4579. const bool y_non_contig = use_src1 && !ggml_vk_dim01_contiguous(src1);
  4580. const bool y_f32_kernel = use_src1 && src1->type == GGML_TYPE_F32 && !y_non_contig;
  4581. bool mmp = (use_src0 && use_src1 && (node->op == GGML_OP_MUL_MAT || node->op == GGML_OP_MUL_MAT_ID)) ? ggml_vk_get_mul_mat_mat_pipeline(ctx, src0->type, y_non_contig ? GGML_TYPE_F16 : src1->type) != nullptr : false;
  4582. const bool qx_needs_dequant = use_src0 && (!mmp || x_non_contig);
  4583. const bool qy_needs_dequant = use_src1 && ((src1->type != GGML_TYPE_F16 && !y_f32_kernel) || y_non_contig);
  4584. int split_k;
  4585. if (node->op == GGML_OP_MUL_MAT || node->op == GGML_OP_MUL_MAT_ID) {
  4586. split_k = ggml_vk_guess_split_k(ne01, ne11, ne10);
  4587. } else {
  4588. split_k = 1;
  4589. }
  4590. const uint32_t x_ne = ne00 * ne01;
  4591. const uint32_t y_ne = ne10 * ne11;
  4592. const uint32_t d_ne = ne20 * ne21;
  4593. const uint64_t x_sz = (use_src0 && qx_needs_dequant) ? ggml_vk_align_size(sizeof(src0_type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ne02 * ne03 : 0;
  4594. const uint64_t y_sz = (use_src1 && qy_needs_dequant) ? ggml_vk_align_size(sizeof(src1_type) * y_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ne12 * ne13 : 0;
  4595. uint64_t d_sz = ggml_vk_align_size(ggml_type_size(node->type) * d_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ne22 * ne23;
  4596. const uint64_t split_k_size = split_k > 1 ? d_sz * 4 : 0;
  4597. if (extra->buffer_gpu.expired()) {
  4598. // Workaround for CPU backend BLAS matmul calls
  4599. extra->buffer_gpu = ggml_vk_create_buffer_temp(ctx, d_sz);
  4600. }
  4601. switch (node->op) {
  4602. case GGML_OP_REPEAT:
  4603. case GGML_OP_GET_ROWS:
  4604. case GGML_OP_RESHAPE:
  4605. case GGML_OP_VIEW:
  4606. case GGML_OP_PERMUTE:
  4607. case GGML_OP_TRANSPOSE:
  4608. case GGML_OP_ADD:
  4609. case GGML_OP_SCALE:
  4610. case GGML_OP_SQR:
  4611. case GGML_OP_CLAMP:
  4612. case GGML_OP_PAD:
  4613. case GGML_OP_CPY:
  4614. case GGML_OP_CONT:
  4615. case GGML_OP_DUP:
  4616. case GGML_OP_MUL:
  4617. case GGML_OP_DIV:
  4618. case GGML_OP_CONCAT:
  4619. case GGML_OP_UPSCALE:
  4620. case GGML_OP_NORM:
  4621. case GGML_OP_GROUP_NORM:
  4622. case GGML_OP_RMS_NORM:
  4623. case GGML_OP_DIAG_MASK_INF:
  4624. case GGML_OP_SOFT_MAX:
  4625. case GGML_OP_ROPE:
  4626. case GGML_OP_ARGSORT:
  4627. case GGML_OP_SUM_ROWS:
  4628. case GGML_OP_IM2COL:
  4629. case GGML_OP_TIMESTEP_EMBEDDING:
  4630. case GGML_OP_LEAKY_RELU:
  4631. break;
  4632. case GGML_OP_UNARY:
  4633. switch (ggml_get_unary_op(node)) {
  4634. case GGML_UNARY_OP_SILU:
  4635. case GGML_UNARY_OP_GELU:
  4636. case GGML_UNARY_OP_GELU_QUICK:
  4637. case GGML_UNARY_OP_RELU:
  4638. case GGML_UNARY_OP_TANH:
  4639. break;
  4640. default:
  4641. return;
  4642. }
  4643. break;
  4644. case GGML_OP_MUL_MAT:
  4645. case GGML_OP_MUL_MAT_ID:
  4646. if (
  4647. x_sz > ctx->device->max_memory_allocation_size ||
  4648. y_sz > ctx->device->max_memory_allocation_size ||
  4649. d_sz > ctx->device->max_memory_allocation_size ||
  4650. split_k_size > ctx->device->max_memory_allocation_size) {
  4651. GGML_ABORT("Requested preallocation size is too large");
  4652. }
  4653. if (ctx->prealloc_size_x < x_sz) {
  4654. ctx->prealloc_size_x = x_sz;
  4655. }
  4656. if (ctx->prealloc_size_y < y_sz) {
  4657. ctx->prealloc_size_y = y_sz;
  4658. }
  4659. if (ctx->prealloc_size_split_k < split_k_size) {
  4660. ctx->prealloc_size_split_k = split_k_size;
  4661. }
  4662. if (ctx->staging_size < x_sz + y_sz) {
  4663. ctx->staging_size = x_sz + y_sz;
  4664. }
  4665. break;
  4666. default:
  4667. return;
  4668. }
  4669. }
  4670. static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx) {
  4671. #if defined(GGML_VULKAN_RUN_TESTS)
  4672. ctx->staging = ggml_vk_create_buffer_check(ctx->device, 100ul * 1024ul * 1024ul,
  4673. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  4674. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  4675. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_F32);
  4676. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q4_0);
  4677. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q4_1);
  4678. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q5_0);
  4679. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q5_1);
  4680. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q8_0);
  4681. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q2_K);
  4682. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q3_K);
  4683. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q4_K);
  4684. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q5_K);
  4685. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q6_K);
  4686. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_IQ4_NL);
  4687. ggml_vk_test_matmul<ggml_fp16_t, ggml_fp16_t>(ctx, 512, 512, 100, 32, 100, 1, 2);
  4688. ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 1, 0);
  4689. ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 1, 1);
  4690. ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 1, 2);
  4691. // ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 4, 0);
  4692. // ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 4, 1);
  4693. // ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 4, 2);
  4694. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q4_0);
  4695. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q4_0);
  4696. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q4_0);
  4697. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q4_0);
  4698. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q4_0);
  4699. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q4_0);
  4700. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q4_1);
  4701. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q4_1);
  4702. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q4_1);
  4703. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q4_1);
  4704. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q4_1);
  4705. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q4_1);
  4706. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q5_0);
  4707. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q5_0);
  4708. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q5_0);
  4709. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q5_0);
  4710. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q5_0);
  4711. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q5_0);
  4712. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q5_1);
  4713. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q5_1);
  4714. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q5_1);
  4715. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q5_1);
  4716. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q5_1);
  4717. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q5_1);
  4718. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q8_0);
  4719. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q8_0);
  4720. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q8_0);
  4721. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q8_0);
  4722. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q8_0);
  4723. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q8_0);
  4724. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q2_K);
  4725. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q2_K);
  4726. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q2_K);
  4727. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q2_K);
  4728. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q2_K);
  4729. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q2_K);
  4730. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q3_K);
  4731. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q3_K);
  4732. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q3_K);
  4733. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q3_K);
  4734. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q3_K);
  4735. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q3_K);
  4736. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q4_K);
  4737. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q4_K);
  4738. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q4_K);
  4739. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q4_K);
  4740. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q4_K);
  4741. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q4_K);
  4742. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q5_K);
  4743. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q5_K);
  4744. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q5_K);
  4745. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q5_K);
  4746. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q5_K);
  4747. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q5_K);
  4748. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q6_K);
  4749. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q6_K);
  4750. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q6_K);
  4751. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q6_K);
  4752. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q6_K);
  4753. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q6_K);
  4754. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_IQ4_NL);
  4755. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_IQ4_NL);
  4756. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_IQ4_NL);
  4757. std::cerr << std::endl;
  4758. const std::vector<size_t> vals {
  4759. 8, 8, 8,
  4760. 100, 46, 576,
  4761. 623, 111, 128,
  4762. 100, 46, 558,
  4763. 512, 1, 256,
  4764. 128, 110, 622,
  4765. 511, 511, 127,
  4766. 511, 511, 7,
  4767. 511, 511, 17,
  4768. 49, 49, 128,
  4769. 128, 49, 49,
  4770. 4096, 49, 4096,
  4771. 11008, 49, 4096,
  4772. 4096, 49, 11008,
  4773. 32000, 49, 4096,
  4774. 512, 512, 128,
  4775. 128, 512, 512,
  4776. 4096, 512, 4096,
  4777. 11008, 512, 4096,
  4778. 4096, 512, 11008,
  4779. 32000, 512, 4096,
  4780. };
  4781. const size_t num_it = 1;
  4782. for (size_t i = 0; i < vals.size(); i += 3) {
  4783. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0);
  4784. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1);
  4785. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2);
  4786. // ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0);
  4787. // ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1);
  4788. // ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2);
  4789. std::cerr << std::endl;
  4790. }
  4791. GGML_ABORT("fatal error");
  4792. #endif
  4793. if (ctx->prealloc_x == nullptr || (ctx->prealloc_size_x > 0 && ctx->prealloc_x->size < ctx->prealloc_size_x)) {
  4794. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(x_size: " << ctx->prealloc_size_x << ")");
  4795. // Resize buffer
  4796. if (ctx->prealloc_x != nullptr) {
  4797. ggml_vk_destroy_buffer(ctx->prealloc_x);
  4798. }
  4799. ctx->prealloc_x = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_x);
  4800. }
  4801. if (ctx->prealloc_y == nullptr || (ctx->prealloc_size_y > 0 && ctx->prealloc_y->size < ctx->prealloc_size_y)) {
  4802. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(y_size: " << ctx->prealloc_size_y << ")");
  4803. // Resize buffer
  4804. if (ctx->prealloc_y != nullptr) {
  4805. ggml_vk_destroy_buffer(ctx->prealloc_y);
  4806. }
  4807. ctx->prealloc_y = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_y);
  4808. }
  4809. if (ctx->prealloc_split_k == nullptr || (ctx->prealloc_size_split_k > 0 && ctx->prealloc_split_k->size < ctx->prealloc_size_split_k)) {
  4810. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(split_k_size: " << ctx->prealloc_size_split_k << ")");
  4811. // Resize buffer
  4812. if (ctx->prealloc_split_k != nullptr) {
  4813. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  4814. }
  4815. ctx->prealloc_split_k = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_split_k);
  4816. }
  4817. if (ctx->staging == nullptr || (ctx->staging_size > 0 && ctx->staging->size < ctx->staging_size)) {
  4818. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(staging_size: " << ctx->staging_size << ")");
  4819. // Resize buffer
  4820. if (ctx->staging != nullptr) {
  4821. ggml_vk_destroy_buffer(ctx->staging);
  4822. }
  4823. ctx->staging = ggml_vk_create_buffer_check(ctx->device, ctx->staging_size,
  4824. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  4825. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  4826. }
  4827. }
  4828. static void ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * node, int node_idx, bool last_node){
  4829. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) node->extra;
  4830. if (ggml_is_empty(node) || extra == nullptr) {
  4831. return;
  4832. }
  4833. VK_LOG_DEBUG("ggml_vk_build_graph(" << node << ", " << ggml_op_name(node->op) << ")");
  4834. ctx->semaphore_idx = 0;
  4835. ctx->staging_offset = 0;
  4836. const ggml_tensor * src0 = node->src[0];
  4837. const ggml_tensor * src1 = node->src[1];
  4838. const ggml_tensor * src2 = node->src[2];
  4839. switch (node->op) {
  4840. // Return on empty ops to avoid generating a compute_ctx and setting exit_tensor
  4841. case GGML_OP_RESHAPE:
  4842. case GGML_OP_VIEW:
  4843. case GGML_OP_PERMUTE:
  4844. case GGML_OP_TRANSPOSE:
  4845. case GGML_OP_NONE:
  4846. return;
  4847. case GGML_OP_UNARY:
  4848. switch (ggml_get_unary_op(node)) {
  4849. case GGML_UNARY_OP_SILU:
  4850. case GGML_UNARY_OP_GELU:
  4851. case GGML_UNARY_OP_GELU_QUICK:
  4852. case GGML_UNARY_OP_RELU:
  4853. case GGML_UNARY_OP_TANH:
  4854. break;
  4855. default:
  4856. return;
  4857. }
  4858. break;
  4859. case GGML_OP_REPEAT:
  4860. case GGML_OP_GET_ROWS:
  4861. case GGML_OP_ADD:
  4862. case GGML_OP_MUL:
  4863. case GGML_OP_DIV:
  4864. case GGML_OP_CONCAT:
  4865. case GGML_OP_UPSCALE:
  4866. case GGML_OP_SCALE:
  4867. case GGML_OP_SQR:
  4868. case GGML_OP_CLAMP:
  4869. case GGML_OP_PAD:
  4870. case GGML_OP_CPY:
  4871. case GGML_OP_CONT:
  4872. case GGML_OP_DUP:
  4873. case GGML_OP_NORM:
  4874. case GGML_OP_GROUP_NORM:
  4875. case GGML_OP_RMS_NORM:
  4876. case GGML_OP_DIAG_MASK_INF:
  4877. case GGML_OP_SOFT_MAX:
  4878. case GGML_OP_ROPE:
  4879. case GGML_OP_MUL_MAT:
  4880. case GGML_OP_MUL_MAT_ID:
  4881. case GGML_OP_ARGSORT:
  4882. case GGML_OP_SUM_ROWS:
  4883. case GGML_OP_IM2COL:
  4884. case GGML_OP_TIMESTEP_EMBEDDING:
  4885. case GGML_OP_LEAKY_RELU:
  4886. break;
  4887. default:
  4888. std::cerr << "ggml_vulkan: Error: Missing op: " << ggml_op_name(node->op) << std::endl;
  4889. GGML_ABORT("fatal error");
  4890. return;
  4891. }
  4892. vk_context compute_ctx;
  4893. if (ctx->compute_ctx.expired()) {
  4894. compute_ctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
  4895. ctx->compute_ctx = compute_ctx;
  4896. ggml_vk_ctx_begin(ctx->device, compute_ctx);
  4897. } else {
  4898. compute_ctx = ctx->compute_ctx.lock();
  4899. }
  4900. switch (node->op) {
  4901. case GGML_OP_REPEAT:
  4902. ggml_vk_repeat(ctx, compute_ctx, src0, node);
  4903. break;
  4904. case GGML_OP_GET_ROWS:
  4905. ggml_vk_get_rows(ctx, compute_ctx, src0, src1, node);
  4906. break;
  4907. case GGML_OP_ADD:
  4908. ggml_vk_add(ctx, compute_ctx, src0, src1, node);
  4909. break;
  4910. case GGML_OP_MUL:
  4911. ggml_vk_mul(ctx, compute_ctx, src0, src1, node);
  4912. break;
  4913. case GGML_OP_DIV:
  4914. ggml_vk_div(ctx, compute_ctx, src0, src1, node);
  4915. break;
  4916. case GGML_OP_CONCAT:
  4917. ggml_vk_concat(ctx, compute_ctx, src0, src1, node);
  4918. break;
  4919. case GGML_OP_UPSCALE:
  4920. ggml_vk_upscale(ctx, compute_ctx, src0, node);
  4921. break;
  4922. case GGML_OP_SCALE:
  4923. ggml_vk_scale(ctx, compute_ctx, src0, node);
  4924. break;
  4925. case GGML_OP_SQR:
  4926. ggml_vk_sqr(ctx, compute_ctx, src0, node);
  4927. break;
  4928. case GGML_OP_CLAMP:
  4929. ggml_vk_clamp(ctx, compute_ctx, src0, node);
  4930. break;
  4931. case GGML_OP_PAD:
  4932. ggml_vk_pad(ctx, compute_ctx, src0, node);
  4933. break;
  4934. case GGML_OP_CPY:
  4935. case GGML_OP_CONT:
  4936. case GGML_OP_DUP:
  4937. ggml_vk_cpy(ctx, compute_ctx, src0, node);
  4938. break;
  4939. case GGML_OP_NORM:
  4940. ggml_vk_norm(ctx, compute_ctx, src0, node);
  4941. break;
  4942. case GGML_OP_GROUP_NORM:
  4943. ggml_vk_group_norm(ctx, compute_ctx, src0, node);
  4944. break;
  4945. case GGML_OP_RMS_NORM:
  4946. ggml_vk_rms_norm(ctx, compute_ctx, src0, node);
  4947. break;
  4948. case GGML_OP_UNARY:
  4949. switch (ggml_get_unary_op(node)) {
  4950. case GGML_UNARY_OP_SILU:
  4951. case GGML_UNARY_OP_GELU:
  4952. case GGML_UNARY_OP_GELU_QUICK:
  4953. case GGML_UNARY_OP_RELU:
  4954. case GGML_UNARY_OP_TANH:
  4955. ggml_vk_unary(ctx, compute_ctx, src0, node);
  4956. break;
  4957. default:
  4958. return;
  4959. }
  4960. break;
  4961. case GGML_OP_DIAG_MASK_INF:
  4962. ggml_vk_diag_mask_inf(ctx, compute_ctx, src0, node);
  4963. break;
  4964. case GGML_OP_SOFT_MAX:
  4965. ggml_vk_soft_max(ctx, compute_ctx, src0, src1, node);
  4966. break;
  4967. case GGML_OP_ROPE:
  4968. ggml_vk_rope(ctx, compute_ctx, src0, src1, src2, node);
  4969. break;
  4970. case GGML_OP_ARGSORT:
  4971. ggml_vk_argsort(ctx, compute_ctx, src0, node);
  4972. break;
  4973. case GGML_OP_SUM_ROWS:
  4974. ggml_vk_sum_rows(ctx, compute_ctx, src0, node);
  4975. break;
  4976. case GGML_OP_IM2COL:
  4977. ggml_vk_im2col(ctx, compute_ctx, src0, src1, node);
  4978. break;
  4979. case GGML_OP_TIMESTEP_EMBEDDING:
  4980. ggml_vk_timestep_embedding(ctx, compute_ctx, src0, node);
  4981. break;
  4982. case GGML_OP_LEAKY_RELU:
  4983. ggml_vk_leaky_relu(ctx, compute_ctx, src0, node);
  4984. break;
  4985. case GGML_OP_MUL_MAT:
  4986. ggml_vk_mul_mat(ctx, compute_ctx, src0, src1, node);
  4987. break;
  4988. case GGML_OP_MUL_MAT_ID:
  4989. ggml_vk_mul_mat_id(ctx, compute_ctx, src0, src1, src2, node);
  4990. break;
  4991. default:
  4992. return;
  4993. }
  4994. ctx->tensor_ctxs[node_idx] = compute_ctx;
  4995. #ifdef GGML_VULKAN_CHECK_RESULTS
  4996. // Force context reset on each node so that each tensor ends up in its own context
  4997. // and can be run and compared to its CPU equivalent separately
  4998. last_node = true;
  4999. #endif
  5000. if (last_node) {
  5001. ggml_vk_ctx_end(compute_ctx);
  5002. compute_ctx->exit_tensor_idx = node_idx;
  5003. ctx->compute_ctx.reset();
  5004. }
  5005. }
  5006. static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_tensor * tensor, int tensor_idx){
  5007. ggml_tensor_extra_gpu * extra = nullptr;
  5008. switch (tensor->op) {
  5009. case GGML_OP_ADD:
  5010. case GGML_OP_GET_ROWS:
  5011. case GGML_OP_MUL:
  5012. case GGML_OP_DIV:
  5013. case GGML_OP_CONCAT:
  5014. case GGML_OP_UPSCALE:
  5015. case GGML_OP_SCALE:
  5016. case GGML_OP_SQR:
  5017. case GGML_OP_CLAMP:
  5018. case GGML_OP_PAD:
  5019. case GGML_OP_CPY:
  5020. case GGML_OP_CONT:
  5021. case GGML_OP_DUP:
  5022. case GGML_OP_NORM:
  5023. case GGML_OP_GROUP_NORM:
  5024. case GGML_OP_RMS_NORM:
  5025. case GGML_OP_DIAG_MASK_INF:
  5026. case GGML_OP_SOFT_MAX:
  5027. case GGML_OP_ROPE:
  5028. case GGML_OP_RESHAPE:
  5029. case GGML_OP_VIEW:
  5030. case GGML_OP_PERMUTE:
  5031. case GGML_OP_TRANSPOSE:
  5032. case GGML_OP_NONE:
  5033. case GGML_OP_ARGSORT:
  5034. case GGML_OP_SUM_ROWS:
  5035. case GGML_OP_IM2COL:
  5036. case GGML_OP_TIMESTEP_EMBEDDING:
  5037. case GGML_OP_LEAKY_RELU:
  5038. case GGML_OP_REPEAT:
  5039. extra = (ggml_tensor_extra_gpu *) tensor->extra;
  5040. break;
  5041. case GGML_OP_UNARY:
  5042. switch (ggml_get_unary_op(tensor)) {
  5043. case GGML_UNARY_OP_SILU:
  5044. case GGML_UNARY_OP_GELU:
  5045. case GGML_UNARY_OP_GELU_QUICK:
  5046. case GGML_UNARY_OP_RELU:
  5047. case GGML_UNARY_OP_TANH:
  5048. extra = (ggml_tensor_extra_gpu *) tensor->extra;
  5049. break;
  5050. default:
  5051. return false;
  5052. }
  5053. break;
  5054. case GGML_OP_MUL_MAT:
  5055. case GGML_OP_MUL_MAT_ID:
  5056. extra = (ggml_tensor_extra_gpu *) tensor->extra;
  5057. break;
  5058. default:
  5059. return false;
  5060. }
  5061. if (extra == nullptr) {
  5062. return false;
  5063. }
  5064. VK_LOG_DEBUG("ggml_vk_compute_forward(" << tensor << ", name=" << tensor->name << ", op=" << ggml_op_name(tensor->op) << ", type=" << tensor->type << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << ", view_src=" << tensor->view_src << ", view_offs=" << tensor->view_offs << ")");
  5065. #ifdef GGML_VULKAN_CHECK_RESULTS
  5066. ggml_vk_check_results_0(tensor);
  5067. #endif
  5068. vk_context subctx = ctx->tensor_ctxs[tensor_idx].lock();
  5069. // Only run if ctx hasn't been submitted yet
  5070. if (!subctx->seqs.empty()) {
  5071. // Do staging buffer copies
  5072. for (auto& cpy : subctx->in_memcpys) {
  5073. memcpy(cpy.dst, cpy.src, cpy.n);
  5074. }
  5075. ggml_vk_submit(subctx, ctx->fence);
  5076. }
  5077. if (tensor_idx == subctx->exit_tensor_idx) {
  5078. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_compute_forward waitForFences");
  5079. ctx->device->device.resetFences({ ctx->fence });
  5080. // Do staging buffer copies
  5081. for (auto& cpy : subctx->out_memcpys) {
  5082. memcpy(cpy.dst, cpy.src, cpy.n);
  5083. }
  5084. subctx->in_memcpys.clear();
  5085. subctx->out_memcpys.clear();
  5086. }
  5087. return true;
  5088. }
  5089. // Clean up after graph processing is done
  5090. static void ggml_vk_graph_cleanup(ggml_backend_vk_context * ctx) {
  5091. VK_LOG_DEBUG("ggml_vk_graph_cleanup()");
  5092. for (auto& buffer : ctx->gc.temp_buffers) {
  5093. ggml_vk_pool_free(ctx, buffer);
  5094. }
  5095. ctx->gc.temp_buffers.clear();
  5096. for (auto& pipeline : ctx->device->pipelines) {
  5097. if (pipeline.expired()) {
  5098. continue;
  5099. }
  5100. vk_pipeline pl = pipeline.lock();
  5101. ggml_pipeline_cleanup(pl);
  5102. }
  5103. ggml_vk_queue_cleanup(ctx->device, ctx->device->compute_queue);
  5104. ggml_vk_queue_cleanup(ctx->device, ctx->device->transfer_queue);
  5105. for (size_t i = 0; i < ctx->gc.semaphores.size(); i++) {
  5106. ctx->device->device.destroySemaphore({ ctx->gc.semaphores[i].s });
  5107. }
  5108. ctx->gc.semaphores.clear();
  5109. for (size_t i = 0; i < ctx->gc.tl_semaphores.size(); i++) {
  5110. ctx->device->device.destroySemaphore({ ctx->gc.tl_semaphores[i].s });
  5111. }
  5112. ctx->gc.tl_semaphores.clear();
  5113. ctx->semaphore_idx = 0;
  5114. ctx->event_idx = 0;
  5115. for (auto& event : ctx->gc.events) {
  5116. ctx->device->device.resetEvent(event);
  5117. }
  5118. ctx->staging_offset = 0;
  5119. ctx->tensor_ctxs.clear();
  5120. ctx->gc.contexts.clear();
  5121. }
  5122. // Clean up on backend free
  5123. static void ggml_vk_cleanup(ggml_backend_vk_context * ctx) {
  5124. VK_LOG_DEBUG("ggml_vk_cleanup(" << ctx->name << ")");
  5125. ggml_vk_graph_cleanup(ctx);
  5126. ggml_vk_destroy_buffer(ctx->prealloc_x);
  5127. ggml_vk_destroy_buffer(ctx->prealloc_y);
  5128. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  5129. ggml_vk_destroy_buffer(ctx->staging);
  5130. for (auto& buffer : ctx->buffer_pool) {
  5131. ggml_vk_destroy_buffer(buffer);
  5132. }
  5133. ctx->prealloc_size_x = 0;
  5134. ctx->prealloc_size_y = 0;
  5135. ctx->prealloc_size_split_k = 0;
  5136. ctx->staging_size = 0;
  5137. for (auto& event : ctx->gc.events) {
  5138. ctx->device->device.destroyEvent(event);
  5139. }
  5140. ctx->gc.events.clear();
  5141. ctx->device->device.destroyFence(ctx->fence);
  5142. }
  5143. GGML_CALL static int ggml_vk_get_device_count() {
  5144. ggml_vk_instance_init();
  5145. return vk_instance.device_indices.size();
  5146. }
  5147. GGML_CALL static void ggml_vk_get_device_description(int device, char * description, size_t description_size) {
  5148. ggml_vk_instance_init();
  5149. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  5150. vk::PhysicalDeviceProperties props;
  5151. devices[device].getProperties(&props);
  5152. snprintf(description, description_size, "%s", props.deviceName.data());
  5153. }
  5154. // backend interface
  5155. #define UNUSED GGML_UNUSED
  5156. // device backend
  5157. static void * const vk_ptr_base = (void *)(uintptr_t) 0x1000; // NOLINT
  5158. struct ggml_backend_vk_buffer_context {
  5159. vk_device_ref device;
  5160. vk_buffer dev_buffer;
  5161. ggml_tensor_extra_gpu * temp_tensor_extras = nullptr;
  5162. size_t temp_tensor_extra_index = 0;
  5163. std::string name;
  5164. ggml_backend_vk_buffer_context(vk_device_ref device, vk_buffer&& dev_buffer, std::string& name) :
  5165. device(device),
  5166. dev_buffer(dev_buffer),
  5167. name(name) {
  5168. }
  5169. ~ggml_backend_vk_buffer_context() {
  5170. ggml_vk_destroy_buffer(dev_buffer);
  5171. if (temp_tensor_extras != nullptr) {
  5172. delete[] temp_tensor_extras;
  5173. }
  5174. }
  5175. ggml_tensor_extra_gpu * ggml_vk_alloc_temp_tensor_extra() {
  5176. if (temp_tensor_extras == nullptr) {
  5177. temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_VK_MAX_NODES];
  5178. }
  5179. size_t alloc_index = temp_tensor_extra_index;
  5180. temp_tensor_extra_index = (temp_tensor_extra_index + 1) % GGML_VK_MAX_NODES;
  5181. ggml_tensor_extra_gpu * extra = &temp_tensor_extras[alloc_index];
  5182. extra->reset();
  5183. return extra;
  5184. }
  5185. };
  5186. GGML_CALL static const char * ggml_backend_vk_buffer_get_name(ggml_backend_buffer_t buffer) {
  5187. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  5188. return ctx->name.c_str();
  5189. }
  5190. GGML_CALL static bool ggml_backend_buffer_is_vk(ggml_backend_buffer_t buffer) {
  5191. return buffer->iface.get_name == ggml_backend_vk_buffer_get_name;
  5192. }
  5193. GGML_CALL static void ggml_backend_vk_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  5194. VK_LOG_MEMORY("ggml_backend_vk_buffer_free_buffer()");
  5195. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  5196. ggml_vk_destroy_buffer(ctx->dev_buffer);
  5197. delete ctx;
  5198. }
  5199. GGML_CALL static void * ggml_backend_vk_buffer_get_base(ggml_backend_buffer_t buffer) {
  5200. return vk_ptr_base;
  5201. UNUSED(buffer);
  5202. }
  5203. GGML_CALL static void ggml_backend_vk_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
  5204. VK_LOG_DEBUG("ggml_backend_vk_buffer_init_tensor(" << buffer << " (" << buffer->context << "), " << tensor << ")");
  5205. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  5206. if (tensor->view_src != nullptr) {
  5207. GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft);
  5208. GGML_ASSERT(tensor->view_src->extra != nullptr);
  5209. tensor->extra = tensor->view_src->extra;
  5210. } else {
  5211. ggml_tensor_extra_gpu * extra = ctx->ggml_vk_alloc_temp_tensor_extra();
  5212. extra->buffer_gpu = ctx->dev_buffer;
  5213. extra->offset = (uint8_t *) tensor->data - (uint8_t *) vk_ptr_base;
  5214. tensor->extra = extra;
  5215. }
  5216. }
  5217. GGML_CALL static void ggml_backend_vk_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  5218. VK_LOG_DEBUG("ggml_backend_vk_buffer_set_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")");
  5219. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  5220. vk_buffer buf = extra->buffer_gpu.lock();
  5221. ggml_vk_buffer_write(buf, extra->offset + tensor->view_offs + offset, data, size);
  5222. GGML_UNUSED(buffer);
  5223. }
  5224. GGML_CALL static void ggml_backend_vk_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  5225. VK_LOG_DEBUG("ggml_backend_vk_buffer_get_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")");
  5226. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  5227. vk_buffer buf = extra->buffer_gpu.lock();
  5228. ggml_vk_buffer_read(buf, extra->offset + tensor->view_offs + offset, data, size);
  5229. GGML_UNUSED(buffer);
  5230. }
  5231. GGML_CALL static bool ggml_backend_vk_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
  5232. if (ggml_backend_buffer_is_vk(src->buffer)) {
  5233. ggml_tensor_extra_gpu * src_extra = (ggml_tensor_extra_gpu *) src->extra;
  5234. ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
  5235. vk_buffer src_buf = src_extra->buffer_gpu.lock();
  5236. vk_buffer dst_buf = dst_extra->buffer_gpu.lock();
  5237. ggml_vk_buffer_copy(dst_buf, dst_extra->offset + dst->view_offs, src_buf, src_extra->offset + src->view_offs, ggml_nbytes(src));
  5238. return true;
  5239. }
  5240. return false;
  5241. UNUSED(buffer);
  5242. }
  5243. GGML_CALL static void ggml_backend_vk_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  5244. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  5245. ggml_vk_buffer_memset(ctx->dev_buffer, 0, value, buffer->size);
  5246. }
  5247. static ggml_backend_buffer_i ggml_backend_vk_buffer_interface = {
  5248. /* .get_name = */ ggml_backend_vk_buffer_get_name,
  5249. /* .free_buffer = */ ggml_backend_vk_buffer_free_buffer,
  5250. /* .get_base = */ ggml_backend_vk_buffer_get_base,
  5251. /* .init_tensor = */ ggml_backend_vk_buffer_init_tensor,
  5252. /* .set_tensor = */ ggml_backend_vk_buffer_set_tensor,
  5253. /* .get_tensor = */ ggml_backend_vk_buffer_get_tensor,
  5254. /* .cpy_tensor = */ ggml_backend_vk_buffer_cpy_tensor,
  5255. /* .clear = */ ggml_backend_vk_buffer_clear,
  5256. /* .reset = */ NULL,
  5257. };
  5258. // vk buffer type
  5259. GGML_CALL static const char * ggml_backend_vk_buffer_type_name(ggml_backend_buffer_type_t buft) {
  5260. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *)buft->context;
  5261. return ctx->name.c_str();
  5262. }
  5263. GGML_CALL static ggml_backend_buffer_t ggml_backend_vk_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  5264. VK_LOG_MEMORY("ggml_backend_vk_buffer_type_alloc_buffer(" << size << ")");
  5265. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  5266. vk_buffer dev_buffer = nullptr;
  5267. try {
  5268. dev_buffer = ggml_vk_create_buffer_device(ctx->device, size);
  5269. } catch (const vk::SystemError& e) {
  5270. return nullptr;
  5271. }
  5272. ggml_backend_vk_buffer_context * bufctx = new ggml_backend_vk_buffer_context(ctx->device, std::move(dev_buffer), ctx->name);
  5273. return ggml_backend_buffer_init(buft, ggml_backend_vk_buffer_interface, bufctx, size);
  5274. }
  5275. GGML_CALL static size_t ggml_backend_vk_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  5276. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  5277. return ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  5278. }
  5279. GGML_CALL static size_t ggml_backend_vk_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
  5280. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  5281. return ctx->device->max_memory_allocation_size;
  5282. }
  5283. GGML_CALL static size_t ggml_backend_vk_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
  5284. return ggml_nbytes(tensor);
  5285. UNUSED(buft);
  5286. }
  5287. GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num) {
  5288. ggml_vk_instance_init();
  5289. VK_LOG_DEBUG("ggml_backend_vk_buffer_type(" << dev_num << ")");
  5290. vk_device dev = ggml_vk_get_device(dev_num);
  5291. return &dev->buffer_type;
  5292. }
  5293. // host buffer type
  5294. GGML_CALL static const char * ggml_backend_vk_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
  5295. return GGML_VK_NAME "_Host";
  5296. UNUSED(buft);
  5297. }
  5298. GGML_CALL static const char * ggml_backend_vk_host_buffer_name(ggml_backend_buffer_t buffer) {
  5299. return GGML_VK_NAME "_Host";
  5300. UNUSED(buffer);
  5301. }
  5302. GGML_CALL static void ggml_backend_vk_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  5303. VK_LOG_MEMORY("ggml_backend_vk_host_buffer_free_buffer()");
  5304. ggml_vk_host_free(vk_instance.devices[0], buffer->context);
  5305. }
  5306. GGML_CALL static ggml_backend_buffer_t ggml_backend_vk_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  5307. VK_LOG_MEMORY("ggml_backend_vk_host_buffer_type_alloc_buffer(" << size << ")");
  5308. size += 32; // Behave like the CPU buffer type
  5309. void * ptr = nullptr;
  5310. try {
  5311. ptr = ggml_vk_host_malloc(vk_instance.devices[0], size);
  5312. } catch (vk::SystemError& e) {
  5313. std::cerr << "ggml_vulkan: Failed to allocate pinned memory." << std::endl;
  5314. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  5315. // fallback to cpu buffer
  5316. return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
  5317. }
  5318. ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
  5319. buffer->buft = buft;
  5320. buffer->iface.get_name = ggml_backend_vk_host_buffer_name;
  5321. buffer->iface.free_buffer = ggml_backend_vk_host_buffer_free_buffer;
  5322. return buffer;
  5323. UNUSED(buft);
  5324. }
  5325. GGML_CALL static size_t ggml_backend_vk_host_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  5326. return vk_instance.devices[0]->properties.limits.minMemoryMapAlignment;
  5327. UNUSED(buft);
  5328. }
  5329. // Should be changed to return device-specific host buffer type
  5330. // but that probably requires changes in llama.cpp
  5331. GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type() {
  5332. static struct ggml_backend_buffer_type ggml_backend_vk_buffer_type_host = {
  5333. /* .iface = */ {
  5334. /* .get_name = */ ggml_backend_vk_host_buffer_type_name,
  5335. /* .alloc_buffer = */ ggml_backend_vk_host_buffer_type_alloc_buffer,
  5336. /* .get_alignment = */ ggml_backend_vk_host_buffer_type_get_alignment,
  5337. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  5338. /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
  5339. /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
  5340. },
  5341. /* .context = */ nullptr,
  5342. };
  5343. // Make sure device 0 is initialized
  5344. ggml_vk_instance_init();
  5345. ggml_vk_get_device(0);
  5346. return &ggml_backend_vk_buffer_type_host;
  5347. }
  5348. // backend
  5349. GGML_CALL static const char * ggml_backend_vk_name(ggml_backend_t backend) {
  5350. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5351. return ctx->name.c_str();
  5352. }
  5353. GGML_CALL static void ggml_backend_vk_free(ggml_backend_t backend) {
  5354. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5355. VK_LOG_DEBUG("ggml_backend_vk_free(" << ctx->name << ")");
  5356. ggml_vk_cleanup(ctx);
  5357. delete ctx;
  5358. delete backend;
  5359. }
  5360. GGML_CALL static ggml_backend_buffer_type_t ggml_backend_vk_get_default_buffer_type(ggml_backend_t backend) {
  5361. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5362. return &ctx->device->buffer_type;
  5363. }
  5364. GGML_CALL static void ggml_backend_vk_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  5365. VK_LOG_DEBUG("ggml_backend_vk_set_tensor_async(" << size << ")");
  5366. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5367. GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
  5368. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  5369. vk_context transfer_ctx;
  5370. if (ctx->transfer_ctx.expired()) {
  5371. // Initialize new transfer context
  5372. transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
  5373. ctx->transfer_ctx = transfer_ctx;
  5374. ggml_vk_ctx_begin(ctx->device, transfer_ctx);
  5375. } else {
  5376. transfer_ctx = ctx->transfer_ctx.lock();
  5377. }
  5378. vk_buffer buf = extra->buffer_gpu.lock();
  5379. ggml_vk_buffer_write_async(transfer_ctx, buf, extra->offset + tensor->view_offs + offset, data, size, ctx->staging, ctx->staging_offset);
  5380. }
  5381. GGML_CALL static void ggml_backend_vk_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  5382. VK_LOG_DEBUG("ggml_backend_vk_get_tensor_async(" << size << ")");
  5383. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5384. GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
  5385. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  5386. vk_context transfer_ctx;
  5387. if (ctx->transfer_ctx.expired()) {
  5388. // Initialize new transfer context
  5389. transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
  5390. ctx->transfer_ctx = transfer_ctx;
  5391. ggml_vk_ctx_begin(ctx->device, transfer_ctx);
  5392. } else {
  5393. transfer_ctx = ctx->transfer_ctx.lock();
  5394. }
  5395. vk_buffer buf = extra->buffer_gpu.lock();
  5396. ggml_vk_buffer_read_async(transfer_ctx, buf, extra->offset + tensor->view_offs + offset, data, size, ctx->staging, ctx->staging_offset);
  5397. }
  5398. GGML_CALL static bool ggml_backend_vk_cpy_tensor_async(ggml_backend_t backend, const ggml_tensor * src, ggml_tensor * dst) {
  5399. VK_LOG_DEBUG("ggml_backend_vk_cpy_tensor_async()");
  5400. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5401. if ((dst->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || dst->buffer->buft == ggml_backend_vk_host_buffer_type()) && ggml_backend_buffer_is_vk(src->buffer)) {
  5402. ggml_tensor_extra_gpu * src_extra = (ggml_tensor_extra_gpu *) src->extra;
  5403. ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
  5404. vk_context transfer_ctx;
  5405. if (ctx->transfer_ctx.expired()) {
  5406. // Initialize new transfer context
  5407. transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
  5408. ctx->transfer_ctx = transfer_ctx;
  5409. ggml_vk_ctx_begin(ctx->device, transfer_ctx);
  5410. } else {
  5411. transfer_ctx = ctx->transfer_ctx.lock();
  5412. }
  5413. vk_buffer src_buf = src_extra->buffer_gpu.lock();
  5414. vk_buffer dst_buf = dst_extra->buffer_gpu.lock();
  5415. ggml_vk_buffer_copy_async(transfer_ctx, dst_buf, dst_extra->offset + dst->view_offs, src_buf, src_extra->offset + src->view_offs, ggml_nbytes(src));
  5416. return true;
  5417. }
  5418. return false;
  5419. }
  5420. GGML_CALL static void ggml_backend_vk_synchronize(ggml_backend_t backend) {
  5421. VK_LOG_DEBUG("ggml_backend_vk_synchronize()");
  5422. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5423. if(ctx->transfer_ctx.expired()) {
  5424. return;
  5425. }
  5426. vk_context transfer_ctx = ctx->transfer_ctx.lock();
  5427. ggml_vk_ctx_end(transfer_ctx);
  5428. for (auto& cpy : transfer_ctx->in_memcpys) {
  5429. memcpy(cpy.dst, cpy.src, cpy.n);
  5430. }
  5431. ggml_vk_submit(transfer_ctx, ctx->fence);
  5432. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_backend_vk_synchronize waitForFences");
  5433. ctx->device->device.resetFences({ ctx->fence });
  5434. for (auto& cpy : transfer_ctx->out_memcpys) {
  5435. memcpy(cpy.dst, cpy.src, cpy.n);
  5436. }
  5437. ctx->transfer_ctx.reset();
  5438. }
  5439. static bool ggml_vk_is_empty(ggml_tensor * node) {
  5440. return ggml_is_empty(node) || node->op == GGML_OP_NONE || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE;
  5441. }
  5442. GGML_CALL static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
  5443. VK_LOG_DEBUG("ggml_backend_vk_graph_compute(" << cgraph->n_nodes << " nodes)");
  5444. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5445. for (int i = 0; i < cgraph->n_nodes; i++) {
  5446. ggml_vk_preallocate_buffers_graph(ctx, cgraph->nodes[i]);
  5447. }
  5448. ggml_vk_preallocate_buffers(ctx);
  5449. int last_node = cgraph->n_nodes - 1;
  5450. // If the last op in the cgraph isn't backend GPU, the command buffer doesn't get closed properly
  5451. while (last_node > 0 && ggml_vk_is_empty(cgraph->nodes[last_node])) {
  5452. last_node -= 1;
  5453. }
  5454. // Reserve tensor context space for all nodes
  5455. ctx->tensor_ctxs.resize(cgraph->n_nodes);
  5456. for (int i = 0; i < cgraph->n_nodes; i++) {
  5457. ggml_vk_build_graph(ctx, cgraph->nodes[i], i, i == last_node);
  5458. }
  5459. for (int i = 0; i < cgraph->n_nodes; i++) {
  5460. ggml_tensor * node = cgraph->nodes[i];
  5461. if (ggml_vk_is_empty(node)) {
  5462. continue;
  5463. }
  5464. bool ok = ggml_vk_compute_forward(ctx, node, i);
  5465. if (!ok) {
  5466. if (node->op == GGML_OP_UNARY) {
  5467. std::cerr << __func__ << ": error: op not supported UNARY " << node->name << " (" << ggml_unary_op_name(static_cast<ggml_unary_op>(node->op_params[0])) << ")" << std::endl;
  5468. } else {
  5469. std::cerr << __func__ << ": error: op not supported " << node->name << " (" << ggml_op_name(node->op) << ")" << std::endl;
  5470. }
  5471. }
  5472. #ifdef GGML_VULKAN_CHECK_RESULTS
  5473. else {
  5474. ggml_vk_check_results_1(node);
  5475. }
  5476. #endif
  5477. GGML_ASSERT(ok);
  5478. }
  5479. ggml_vk_graph_cleanup(ctx);
  5480. return GGML_STATUS_SUCCESS;
  5481. UNUSED(backend);
  5482. }
  5483. GGML_CALL static bool ggml_backend_vk_supports_op(ggml_backend_t backend, const ggml_tensor * op) {
  5484. // ggml_backend_vk_context * ctx = (ggml_backend_vk_context *) backend->context;
  5485. switch (op->op) {
  5486. case GGML_OP_UNARY:
  5487. switch (ggml_get_unary_op(op)) {
  5488. case GGML_UNARY_OP_GELU:
  5489. case GGML_UNARY_OP_GELU_QUICK:
  5490. case GGML_UNARY_OP_SILU:
  5491. case GGML_UNARY_OP_RELU:
  5492. case GGML_UNARY_OP_TANH:
  5493. return ggml_is_contiguous(op->src[0]);
  5494. default:
  5495. return false;
  5496. }
  5497. break;
  5498. case GGML_OP_MUL_MAT:
  5499. case GGML_OP_MUL_MAT_ID:
  5500. {
  5501. switch (op->src[0]->type) {
  5502. case GGML_TYPE_F32:
  5503. case GGML_TYPE_F16:
  5504. case GGML_TYPE_Q4_0:
  5505. case GGML_TYPE_Q4_1:
  5506. case GGML_TYPE_Q5_0:
  5507. case GGML_TYPE_Q5_1:
  5508. case GGML_TYPE_Q8_0:
  5509. case GGML_TYPE_Q2_K:
  5510. case GGML_TYPE_Q3_K:
  5511. case GGML_TYPE_Q4_K:
  5512. case GGML_TYPE_Q5_K:
  5513. case GGML_TYPE_Q6_K:
  5514. case GGML_TYPE_IQ4_NL:
  5515. break;
  5516. default:
  5517. return false;
  5518. }
  5519. struct ggml_tensor * a;
  5520. struct ggml_tensor * b;
  5521. if (op->op == GGML_OP_MUL_MAT) {
  5522. a = op->src[0];
  5523. b = op->src[1];
  5524. } else {
  5525. a = op->src[2];
  5526. b = op->src[1];
  5527. }
  5528. if (a->ne[3] != b->ne[3]) {
  5529. return false;
  5530. }
  5531. return true;
  5532. } break;
  5533. case GGML_OP_GET_ROWS:
  5534. {
  5535. switch (op->src[0]->type) {
  5536. case GGML_TYPE_F32:
  5537. case GGML_TYPE_F16:
  5538. case GGML_TYPE_Q4_0:
  5539. case GGML_TYPE_Q4_1:
  5540. case GGML_TYPE_Q5_0:
  5541. case GGML_TYPE_Q5_1:
  5542. case GGML_TYPE_Q8_0:
  5543. case GGML_TYPE_IQ4_NL:
  5544. return true;
  5545. default:
  5546. return false;
  5547. }
  5548. } break;
  5549. case GGML_OP_CPY:
  5550. case GGML_OP_DUP:
  5551. {
  5552. ggml_type src0_type = op->src[0]->type;
  5553. ggml_type src1_type = op->src[1] != nullptr ? op->src[1]->type : src0_type;
  5554. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  5555. return true;
  5556. }
  5557. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
  5558. return true;
  5559. }
  5560. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  5561. return true;
  5562. }
  5563. return false;
  5564. } break;
  5565. case GGML_OP_REPEAT:
  5566. {
  5567. ggml_type src0_type = op->src[0]->type;
  5568. return src0_type != GGML_TYPE_I32 && src0_type != GGML_TYPE_I16;
  5569. } break;
  5570. case GGML_OP_ROPE:
  5571. return ggml_is_contiguous(op->src[0]);
  5572. case GGML_OP_NONE:
  5573. case GGML_OP_RESHAPE:
  5574. case GGML_OP_VIEW:
  5575. case GGML_OP_PERMUTE:
  5576. case GGML_OP_TRANSPOSE:
  5577. case GGML_OP_NORM:
  5578. case GGML_OP_GROUP_NORM:
  5579. case GGML_OP_RMS_NORM:
  5580. case GGML_OP_ADD:
  5581. case GGML_OP_MUL:
  5582. case GGML_OP_DIV:
  5583. case GGML_OP_CONCAT:
  5584. case GGML_OP_UPSCALE:
  5585. case GGML_OP_SCALE:
  5586. case GGML_OP_SQR:
  5587. case GGML_OP_CLAMP:
  5588. case GGML_OP_PAD:
  5589. case GGML_OP_CONT:
  5590. case GGML_OP_DIAG_MASK_INF:
  5591. case GGML_OP_SOFT_MAX:
  5592. case GGML_OP_ARGSORT:
  5593. case GGML_OP_SUM_ROWS:
  5594. case GGML_OP_IM2COL:
  5595. case GGML_OP_TIMESTEP_EMBEDDING:
  5596. case GGML_OP_LEAKY_RELU:
  5597. return true;
  5598. default:
  5599. return false;
  5600. }
  5601. UNUSED(backend);
  5602. }
  5603. GGML_CALL static bool ggml_backend_vk_offload_op(ggml_backend_t backend, const ggml_tensor * op) {
  5604. const int min_batch_size = 32;
  5605. return (op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS) ||
  5606. (op->ne[2] >= min_batch_size && op->op == GGML_OP_MUL_MAT_ID);
  5607. UNUSED(backend);
  5608. }
  5609. GGML_CALL static bool ggml_backend_vk_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
  5610. if (buft->iface.get_name != ggml_backend_vk_buffer_type_name) {
  5611. return false;
  5612. }
  5613. ggml_backend_vk_buffer_type_context * buft_ctx = (ggml_backend_vk_buffer_type_context *)buft->context;
  5614. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5615. return buft_ctx->device == ctx->device;
  5616. }
  5617. // TODO: enable async and synchronize
  5618. static ggml_backend_i ggml_backend_vk_interface = {
  5619. /* .get_name = */ ggml_backend_vk_name,
  5620. /* .free = */ ggml_backend_vk_free,
  5621. /* .get_default_buffer_type = */ ggml_backend_vk_get_default_buffer_type,
  5622. /* .set_tensor_async = */ NULL, // ggml_backend_vk_set_tensor_async,
  5623. /* .get_tensor_async = */ NULL, // ggml_backend_vk_get_tensor_async,
  5624. /* .cpy_tensor_async = */ NULL, // ggml_backend_vk_cpy_tensor_async,
  5625. /* .synchronize = */ NULL, // ggml_backend_vk_synchronize,
  5626. /* .graph_plan_create = */ NULL,
  5627. /* .graph_plan_free = */ NULL,
  5628. /* .graph_plan_update = */ NULL,
  5629. /* .graph_plan_compute = */ NULL,
  5630. /* .graph_compute = */ ggml_backend_vk_graph_compute,
  5631. /* .supports_op = */ ggml_backend_vk_supports_op,
  5632. /* .supports_buft = */ ggml_backend_vk_supports_buft,
  5633. /* .offload_op = */ ggml_backend_vk_offload_op,
  5634. /* .event_new = */ NULL,
  5635. /* .event_free = */ NULL,
  5636. /* .event_record = */ NULL,
  5637. /* .event_wait = */ NULL,
  5638. /* .event_synchronize = */ NULL,
  5639. };
  5640. static ggml_guid_t ggml_backend_vk_guid() {
  5641. static ggml_guid guid = { 0xb8, 0xf7, 0x4f, 0x86, 0x40, 0x3c, 0xe1, 0x02, 0x91, 0xc8, 0xdd, 0xe9, 0x02, 0x3f, 0xc0, 0x2b };
  5642. return &guid;
  5643. }
  5644. GGML_CALL ggml_backend_t ggml_backend_vk_init(size_t dev_num) {
  5645. VK_LOG_DEBUG("ggml_backend_vk_init(" << dev_num << ")");
  5646. ggml_backend_vk_context * ctx = new ggml_backend_vk_context;
  5647. ggml_vk_init(ctx, dev_num);
  5648. ggml_backend_t vk_backend = new ggml_backend {
  5649. /* .guid = */ ggml_backend_vk_guid(),
  5650. /* .interface = */ ggml_backend_vk_interface,
  5651. /* .context = */ ctx,
  5652. };
  5653. return vk_backend;
  5654. }
  5655. GGML_CALL bool ggml_backend_is_vk(ggml_backend_t backend) {
  5656. return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_vk_guid());
  5657. }
  5658. GGML_CALL int ggml_backend_vk_get_device_count() {
  5659. return ggml_vk_get_device_count();
  5660. }
  5661. GGML_CALL void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size) {
  5662. ggml_vk_get_device_description(device, description, description_size);
  5663. }
  5664. GGML_CALL void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total) {
  5665. GGML_ASSERT(device < (int) vk_instance.device_indices.size());
  5666. vk::PhysicalDevice vkdev = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device]];
  5667. vk::PhysicalDeviceMemoryProperties memprops = vkdev.getMemoryProperties();
  5668. for (const vk::MemoryHeap& heap : memprops.memoryHeaps) {
  5669. if (heap.flags & vk::MemoryHeapFlagBits::eDeviceLocal) {
  5670. *total = heap.size;
  5671. *free = heap.size;
  5672. break;
  5673. }
  5674. }
  5675. }
  5676. // backend registry
  5677. GGML_CALL static ggml_backend_t ggml_backend_reg_vk_init(const char * params, void * user_data) {
  5678. ggml_backend_t vk_backend = ggml_backend_vk_init((int) (intptr_t) user_data);
  5679. return vk_backend;
  5680. UNUSED(params);
  5681. }
  5682. extern "C" GGML_CALL int ggml_backend_vk_reg_devices();
  5683. GGML_CALL int ggml_backend_vk_reg_devices() {
  5684. ggml_vk_instance_init();
  5685. for (size_t i = 0; i < vk_instance.device_indices.size(); i++) {
  5686. char name[128];
  5687. snprintf(name, sizeof(name), "%s%ld", GGML_VK_NAME, i);
  5688. ggml_backend_register(name, ggml_backend_reg_vk_init, ggml_backend_vk_buffer_type(i), (void *) (intptr_t) i); // NOLINT
  5689. }
  5690. return vk_instance.device_indices.size();
  5691. }
  5692. // Extension availability
  5693. static bool ggml_vk_instance_validation_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions) {
  5694. #ifdef GGML_VULKAN_VALIDATE
  5695. bool portability_enumeration_ext = false;
  5696. // Check for portability enumeration extension for MoltenVK support
  5697. for (const auto& properties : instance_extensions) {
  5698. if (strcmp("VK_KHR_portability_enumeration", properties.extensionName) == 0) {
  5699. return true;
  5700. }
  5701. }
  5702. if (!portability_enumeration_ext) {
  5703. std::cerr << "ggml_vulkan: WARNING: Instance extension VK_KHR_portability_enumeration not found." << std::endl;
  5704. }
  5705. #endif
  5706. return false;
  5707. UNUSED(instance_extensions);
  5708. }
  5709. static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions) {
  5710. #ifdef __APPLE__
  5711. bool portability_enumeration_ext = false;
  5712. // Check for portability enumeration extension for MoltenVK support
  5713. for (const auto& properties : instance_extensions) {
  5714. if (strcmp("VK_KHR_portability_enumeration", properties.extensionName) == 0) {
  5715. return true;
  5716. }
  5717. }
  5718. if (!portability_enumeration_ext) {
  5719. std::cerr << "ggml_vulkan: WARNING: Instance extension VK_KHR_portability_enumeration not found." << std::endl;
  5720. }
  5721. #endif
  5722. return false;
  5723. UNUSED(instance_extensions);
  5724. }
  5725. // checks
  5726. #ifdef GGML_VULKAN_CHECK_RESULTS
  5727. static void ggml_vk_print_graph_origin(const ggml_tensor * tensor, std::vector<const ggml_tensor *>& done, int level = 0) {
  5728. if (std::find(done.begin(), done.end(), tensor) != done.end() || level > 10) {
  5729. return;
  5730. }
  5731. for (int j = 0; j < level; j++) {
  5732. std::cerr << " ";
  5733. }
  5734. std::cerr << ggml_op_name(tensor->op) << " gpu=" << (tensor->extra != nullptr) << std::endl;
  5735. done.push_back(tensor);
  5736. for (int i = 0; i < GGML_MAX_SRC; i++) {
  5737. if (tensor->src[i] != nullptr) {
  5738. ggml_vk_print_graph_origin(tensor->src[i], done, level + 1);
  5739. }
  5740. }
  5741. }
  5742. static void ggml_vk_print_tensor_area(const ggml_tensor * tensor, const void * data, int i0, int i1, int i2, int i3) {
  5743. if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16 && tensor->type != GGML_TYPE_I32) {
  5744. return;
  5745. }
  5746. i0 = std::max(i0, 5);
  5747. i1 = std::max(i1, 5);
  5748. i2 = std::max(i2, 0);
  5749. i3 = std::max(i3, 0);
  5750. fprintf(stderr, " ");
  5751. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  5752. fprintf(stderr, "%7d ", idx1);
  5753. }
  5754. fprintf(stderr, "\n");
  5755. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  5756. fprintf(stderr, "%7d: ", idx0);
  5757. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  5758. if (idx0 >= 0 && idx0 < tensor->ne[0] && idx1 >= 0 && idx1 < tensor->ne[1] && i2 >= 0 && i2 < tensor->ne[2] && i3 >= 0 && i3 < tensor->ne[3]) {
  5759. float val;
  5760. if (tensor->type == GGML_TYPE_F32) {
  5761. val = *(const float *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  5762. } else if (tensor->type == GGML_TYPE_F16) {
  5763. val = ggml_fp16_to_fp32(*(const ggml_fp16_t *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]));
  5764. } else if (tensor->type == GGML_TYPE_I32) {
  5765. val = *(const int32_t *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  5766. } else {
  5767. GGML_ABORT("fatal error");
  5768. }
  5769. fprintf(stderr, "% 7.2f ", val);
  5770. } else {
  5771. fprintf(stderr, " ");
  5772. }
  5773. }
  5774. fprintf(stderr, "\n");
  5775. }
  5776. }
  5777. static void ggml_vk_print_tensor(const ggml_tensor * tensor, const char * name) {
  5778. void * tensor_data = tensor->data;
  5779. const bool is_gpu = tensor->buffer != nullptr && ggml_backend_buffer_is_vk(tensor->buffer);
  5780. if (is_gpu) {
  5781. const size_t tensor_size = ggml_nbytes(tensor);
  5782. tensor_data = malloc(tensor_size);
  5783. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  5784. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  5785. ggml_vk_buffer_read(buffer_gpu, extra->offset + tensor->view_offs, tensor_data, tensor_size);
  5786. }
  5787. std::cerr << "TENSOR CHECK " << name << " (" << tensor->name << "): " << ggml_op_name(tensor->op) << std::endl;
  5788. std::cerr << "tensor=" << tensor << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << std::endl;
  5789. if (tensor->src[0] != nullptr) {
  5790. std::cerr << "tensor->src[0]=" << tensor->src[0] << " name=" << tensor->src[0]->name << " op=" << ggml_op_name(tensor->src[0]->op) << " type=" << ggml_type_name(tensor->src[0]->type) << " ne0=" << tensor->src[0]->ne[0] << " nb0=" << tensor->src[0]->nb[0] << " ne1=" << tensor->src[0]->ne[1] << " nb1=" << tensor->src[0]->nb[1] << " ne2=" << tensor->src[0]->ne[2] << " nb2=" << tensor->src[0]->nb[2] << " ne3=" << tensor->src[0]->ne[3] << " nb3=" << tensor->src[0]->nb[3] << std::endl;
  5791. }
  5792. if (tensor->src[1] != nullptr) {
  5793. std::cerr << "tensor->src[1]=" << tensor->src[1] << " name=" << tensor->src[1]->name << " op=" << ggml_op_name(tensor->src[1]->op) << " type=" << ggml_type_name(tensor->src[1]->type) << " ne0=" << tensor->src[1]->ne[0] << " nb0=" << tensor->src[1]->nb[0] << " ne1=" << tensor->src[1]->ne[1] << " nb1=" << tensor->src[1]->nb[1] << " ne2=" << tensor->src[1]->ne[2] << " nb2=" << tensor->src[1]->nb[2] << " ne3=" << tensor->src[1]->ne[3] << " nb3=" << tensor->src[1]->nb[3] << std::endl;
  5794. }
  5795. std::cerr << std::endl << "Result:" << std::endl;
  5796. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 0, 0);
  5797. std::cerr << std::endl;
  5798. std::vector<const ggml_tensor *> done;
  5799. ggml_vk_print_graph_origin(tensor, done);
  5800. if (is_gpu) {
  5801. free(tensor_data);
  5802. }
  5803. }
  5804. void * comp_result;
  5805. size_t comp_size;
  5806. size_t comp_nb[GGML_MAX_DIMS];
  5807. size_t check_counter = 0;
  5808. static void ggml_vk_check_results_0(ggml_tensor * tensor) {
  5809. if (tensor->op == GGML_OP_TRANSPOSE) {
  5810. return;
  5811. }
  5812. check_counter++;
  5813. if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {
  5814. return;
  5815. }
  5816. VK_LOG_DEBUG("ggml_vk_check_results_0(" << tensor->name << ")");
  5817. ggml_tensor * src0 = tensor->src[0];
  5818. ggml_tensor * src1 = tensor->src[1];
  5819. ggml_tensor * src2 = tensor->src[2];
  5820. struct ggml_init_params iparams = {
  5821. /*.mem_size =*/ 2ul*1024ul*1024ul*1024ul,
  5822. /*.mem_buffer =*/ NULL,
  5823. /*.no_alloc =*/ false,
  5824. };
  5825. struct ggml_context * ggml_ctx = ggml_init(iparams);
  5826. struct ggml_tensor * src0_clone = nullptr;
  5827. struct ggml_tensor * src1_clone = nullptr;
  5828. struct ggml_tensor * src2_clone = nullptr;
  5829. struct ggml_tensor * tensor_clone = nullptr;
  5830. size_t src0_size;
  5831. size_t src1_size;
  5832. size_t src2_size;
  5833. void * src0_buffer = nullptr;
  5834. void * src1_buffer = nullptr;
  5835. void * src2_buffer = nullptr;
  5836. if (src0 != nullptr) {
  5837. src0_clone = ggml_dup_tensor(ggml_ctx, src0);
  5838. src0_size = ggml_nbytes(src0);
  5839. src0_buffer = malloc(src0_size);
  5840. src0_clone->data = src0_buffer;
  5841. if (ggml_backend_buffer_is_host(src0->buffer)) {
  5842. memcpy(src0_clone->data, src0->data, src0_size);
  5843. memcpy(src0_clone->nb, src0->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5844. } else if (ggml_backend_buffer_is_vk(src0->buffer)) {
  5845. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src0->extra;
  5846. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  5847. uint64_t offset = extra->offset + src0->view_offs;
  5848. if (!ggml_is_contiguous(src0) && ggml_vk_dim01_contiguous(src0)) {
  5849. for (int i3 = 0; i3 < src0->ne[3]; i3++) {
  5850. for (int i2 = 0; i2 < src0->ne[2]; i2++) {
  5851. const int idx = i3*src0->ne[2] + i2;
  5852. ggml_vk_buffer_read(buffer_gpu, offset + idx * src0->nb[2], ((char *)src0_clone->data + idx * src0_clone->nb[2]), src0->ne[1] * src0->nb[1]);
  5853. }
  5854. }
  5855. src0_clone->nb[0] = src0->nb[0];
  5856. src0_clone->nb[1] = src0->nb[1];
  5857. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  5858. src0_clone->nb[i] = src0_clone->nb[i - 1]*src0_clone->ne[i - 1];
  5859. }
  5860. } else {
  5861. if (offset + src0_size >= buffer_gpu->size) {
  5862. src0_size = buffer_gpu->size - offset;
  5863. }
  5864. ggml_vk_buffer_read(buffer_gpu, offset, src0_clone->data, src0_size);
  5865. memcpy(src0_clone->nb, src0->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5866. }
  5867. } else {
  5868. GGML_ABORT("fatal error");
  5869. }
  5870. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  5871. ggml_vk_print_tensor(src0, "src0");
  5872. }
  5873. }
  5874. if (src1 != nullptr) {
  5875. src1_clone = ggml_dup_tensor(ggml_ctx, src1);
  5876. src1_size = ggml_nbytes(src1);
  5877. src1_buffer = malloc(src1_size);
  5878. src1_clone->data = src1_buffer;
  5879. if (ggml_backend_buffer_is_host(src1->buffer)) {
  5880. memcpy(src1_clone->data, src1->data, src1_size);
  5881. memcpy(src1_clone->nb, src1->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5882. } else if (ggml_backend_buffer_is_vk(src1->buffer)) {
  5883. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src1->extra;
  5884. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  5885. uint64_t offset = extra->offset + src1->view_offs;
  5886. if (!ggml_is_contiguous(src1) && ggml_vk_dim01_contiguous(src1)) {
  5887. for (int i3 = 0; i3 < src1->ne[3]; i3++) {
  5888. for (int i2 = 0; i2 < src1->ne[2]; i2++) {
  5889. const int idx = i3*src1->ne[2] + i2;
  5890. ggml_vk_buffer_read(buffer_gpu, offset + idx * src1->nb[2], ((char *)src1_clone->data + idx * src1_clone->nb[2]), src1->ne[1] * src1->nb[1]);
  5891. }
  5892. }
  5893. src1_clone->nb[0] = src1->nb[0];
  5894. src1_clone->nb[1] = src1->nb[1];
  5895. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  5896. src1_clone->nb[i] = src1_clone->nb[i - 1]*src1_clone->ne[i - 1];
  5897. }
  5898. } else {
  5899. if (offset + src1_size >= buffer_gpu->size) {
  5900. src1_size = buffer_gpu->size - offset;
  5901. }
  5902. ggml_vk_buffer_read(buffer_gpu, offset, src1_clone->data, src1_size);
  5903. memcpy(src1_clone->nb, src1->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5904. }
  5905. } else {
  5906. GGML_ABORT("fatal error");
  5907. }
  5908. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  5909. ggml_vk_print_tensor(src1, "src1");
  5910. }
  5911. }
  5912. if (src2 != nullptr) {
  5913. src2_clone = ggml_dup_tensor(ggml_ctx, src2);
  5914. src2_size = ggml_nbytes(src2);
  5915. src2_buffer = malloc(src2_size);
  5916. src2_clone->data = src2_buffer;
  5917. if (ggml_backend_buffer_is_host(src2->buffer)) {
  5918. memcpy(src2_clone->data, src2->data, src2_size);
  5919. memcpy(src2_clone->nb, src2->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5920. } else if (ggml_backend_buffer_is_vk(src2->buffer)) {
  5921. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src2->extra;
  5922. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  5923. uint64_t offset = extra->offset + src2->view_offs;
  5924. if (!ggml_is_contiguous(src2) && ggml_vk_dim01_contiguous(src2)) {
  5925. for (int i3 = 0; i3 < src2->ne[3]; i3++) {
  5926. for (int i2 = 0; i2 < src2->ne[2]; i2++) {
  5927. const int idx = i3*src2->ne[2] + i2;
  5928. ggml_vk_buffer_read(buffer_gpu, offset + idx * src2->nb[2], ((char *)src2_clone->data + idx * src2_clone->nb[2]), src2->ne[1] * src2->nb[1]);
  5929. }
  5930. }
  5931. src2_clone->nb[0] = src2->nb[0];
  5932. src2_clone->nb[1] = src2->nb[1];
  5933. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  5934. src2_clone->nb[i] = src2_clone->nb[i - 1]*src2_clone->ne[i - 1];
  5935. }
  5936. } else {
  5937. if (offset + src2_size >= buffer_gpu->size) {
  5938. src2_size = buffer_gpu->size - offset;
  5939. }
  5940. ggml_vk_buffer_read(buffer_gpu, offset, src2_clone->data, src2_size);
  5941. memcpy(src2_clone->nb, src2->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5942. }
  5943. } else {
  5944. GGML_ABORT("fatal error");
  5945. }
  5946. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  5947. ggml_vk_print_tensor(src2, "src2");
  5948. }
  5949. }
  5950. if (tensor->op == GGML_OP_MUL_MAT) {
  5951. tensor_clone = ggml_mul_mat(ggml_ctx, src0_clone, src1_clone);
  5952. } else if (tensor->op == GGML_OP_MUL_MAT_ID) {
  5953. tensor_clone = ggml_mul_mat_id(ggml_ctx, src0_clone, src1_clone, src2_clone);
  5954. } else if (tensor->op == GGML_OP_MUL) {
  5955. tensor_clone = ggml_mul(ggml_ctx, src0_clone, src1_clone);
  5956. } else if (tensor->op == GGML_OP_DIV) {
  5957. tensor_clone = ggml_div(ggml_ctx, src0_clone, src1_clone);
  5958. } else if (tensor->op == GGML_OP_CONCAT) {
  5959. tensor_clone = ggml_concat(ggml_ctx, src0_clone, src1_clone, *(int *)tensor->op_params);
  5960. } else if (tensor->op == GGML_OP_UPSCALE) {
  5961. tensor_clone = ggml_upscale_ext(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  5962. } else if (tensor->op == GGML_OP_SCALE) {
  5963. tensor_clone = ggml_scale(ggml_ctx, src0_clone, ((float *)tensor->op_params)[0]);
  5964. } else if (tensor->op == GGML_OP_SQR) {
  5965. tensor_clone = ggml_sqr(ggml_ctx, src0_clone);
  5966. } else if (tensor->op == GGML_OP_CLAMP) {
  5967. tensor_clone = ggml_clamp(ggml_ctx, src0_clone, ((float *)tensor->op_params)[0], ((float *)tensor->op_params)[1]);
  5968. } else if (tensor->op == GGML_OP_PAD) {
  5969. tensor_clone = ggml_pad(ggml_ctx, src0_clone, tensor->ne[0] - src0_clone->ne[0], tensor->ne[1] - src0_clone->ne[1], tensor->ne[2] - src0_clone->ne[2], tensor->ne[3] - src0_clone->ne[3]);
  5970. } else if (tensor->op == GGML_OP_ADD) {
  5971. tensor_clone = ggml_add(ggml_ctx, src0_clone, src1_clone);
  5972. } else if (tensor->op == GGML_OP_NORM) {
  5973. tensor_clone = ggml_norm(ggml_ctx, src0_clone, *(float *)tensor->op_params);
  5974. } else if (tensor->op == GGML_OP_GROUP_NORM) {
  5975. tensor_clone = ggml_group_norm(ggml_ctx, src0_clone, *(int *)tensor->op_params);
  5976. } else if (tensor->op == GGML_OP_RMS_NORM) {
  5977. tensor_clone = ggml_rms_norm(ggml_ctx, src0_clone, *(float *)tensor->op_params);
  5978. } else if (tensor->op == GGML_OP_SOFT_MAX) {
  5979. if (src1 != nullptr) {
  5980. tensor_clone = ggml_soft_max_ext(ggml_ctx, src0_clone, src1_clone, ((float *)tensor->op_params)[0], ((float *)tensor->op_params)[1]);
  5981. } else {
  5982. tensor_clone = ggml_soft_max(ggml_ctx, src0_clone);
  5983. }
  5984. } else if (tensor->op == GGML_OP_DIAG_MASK_INF) {
  5985. tensor_clone = ggml_diag_mask_inf(ggml_ctx, src0_clone, *(int *)tensor->op_params);
  5986. } else if (tensor->op == GGML_OP_ROPE) {
  5987. const int n_dims = ((int32_t *) tensor->op_params)[1];
  5988. const int mode = ((int32_t *) tensor->op_params)[2];
  5989. //const int n_ctx_ggml = ((int32_t *) tensor->op_params)[3];
  5990. const int n_ctx_orig_ggml = ((int32_t *) tensor->op_params)[4];
  5991. const float freq_base = ((float *) tensor->op_params)[5];
  5992. const float freq_scale = ((float *) tensor->op_params)[6];
  5993. const float ext_factor = ((float *) tensor->op_params)[7];
  5994. const float attn_factor = ((float *) tensor->op_params)[8];
  5995. const float beta_fast = ((float *) tensor->op_params)[9];
  5996. const float beta_slow = ((float *) tensor->op_params)[10];
  5997. tensor_clone = ggml_rope_ext(ggml_ctx, src0_clone, src1_clone, src2_clone, n_dims, mode, n_ctx_orig_ggml, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
  5998. } else if (tensor->op == GGML_OP_UNARY) {
  5999. switch (ggml_get_unary_op(tensor)) {
  6000. case GGML_UNARY_OP_SILU:
  6001. tensor_clone = ggml_silu(ggml_ctx, src0_clone);
  6002. break;
  6003. case GGML_UNARY_OP_GELU:
  6004. tensor_clone = ggml_gelu(ggml_ctx, src0_clone);
  6005. break;
  6006. case GGML_UNARY_OP_GELU_QUICK:
  6007. tensor_clone = ggml_gelu_quick(ggml_ctx, src0_clone);
  6008. break;
  6009. case GGML_UNARY_OP_RELU:
  6010. tensor_clone = ggml_relu(ggml_ctx, src0_clone);
  6011. break;
  6012. case GGML_UNARY_OP_TANH:
  6013. tensor_clone = ggml_tanh(ggml_ctx, src0_clone);
  6014. break;
  6015. default:
  6016. std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
  6017. GGML_ABORT("fatal error");
  6018. }
  6019. } else if (tensor->op == GGML_OP_CPY || tensor->op == GGML_OP_DUP) {
  6020. if (src1 == nullptr) {
  6021. tensor_clone = ggml_dup(ggml_ctx, src0_clone);
  6022. tensor_clone->type = tensor->type;
  6023. } else {
  6024. tensor_clone = ggml_cpy(ggml_ctx, src0_clone, src1_clone);
  6025. }
  6026. } else if (tensor->op == GGML_OP_CONT) {
  6027. tensor_clone = ggml_cont_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  6028. } else if (tensor->op == GGML_OP_RESHAPE) {
  6029. tensor_clone = ggml_reshape_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  6030. } else if (tensor->op == GGML_OP_VIEW) {
  6031. tensor_clone = ggml_view_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], tensor->nb[1], tensor->nb[2], tensor->nb[3], ((int32_t *) tensor->op_params)[0]);
  6032. } else if (tensor->op == GGML_OP_PERMUTE) {
  6033. int32_t * params = (int32_t *)tensor->op_params;
  6034. tensor_clone = ggml_permute(ggml_ctx, src0_clone, params[0], params[1], params[2], params[3]);
  6035. } else if (tensor->op == GGML_OP_TRANSPOSE) {
  6036. tensor_clone = ggml_transpose(ggml_ctx, src0_clone);
  6037. } else if (tensor->op == GGML_OP_GET_ROWS) {
  6038. tensor_clone = ggml_get_rows(ggml_ctx, src0_clone, src1_clone);
  6039. } else if (tensor->op == GGML_OP_ARGSORT) {
  6040. tensor_clone = ggml_argsort(ggml_ctx, src0_clone, (ggml_sort_order) *(int *)tensor->op_params);
  6041. } else if (tensor->op == GGML_OP_SUM_ROWS) {
  6042. tensor_clone = ggml_sum_rows(ggml_ctx, src0_clone);
  6043. } else if (tensor->op == GGML_OP_IM2COL) {
  6044. const int32_t s0 = tensor->op_params[0];
  6045. const int32_t s1 = tensor->op_params[1];
  6046. const int32_t p0 = tensor->op_params[2];
  6047. const int32_t p1 = tensor->op_params[3];
  6048. const int32_t d0 = tensor->op_params[4];
  6049. const int32_t d1 = tensor->op_params[5];
  6050. const bool is_2D = tensor->op_params[6] == 1;
  6051. tensor_clone = ggml_im2col(ggml_ctx, src0_clone, src1_clone, s0, s1, p0, p1, d0, d1, is_2D, tensor->type);
  6052. } else if (tensor->op == GGML_OP_TIMESTEP_EMBEDDING) {
  6053. const int32_t dim = tensor->op_params[0];
  6054. const int32_t max_period = tensor->op_params[1];
  6055. tensor_clone = ggml_timestep_embedding(ggml_ctx, src0_clone, dim, max_period);
  6056. } else if (tensor->op == GGML_OP_LEAKY_RELU) {
  6057. const float * op_params = (const float *)tensor->op_params;
  6058. tensor_clone = ggml_leaky_relu(ggml_ctx, src0_clone, op_params[0], false);
  6059. } else {
  6060. std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
  6061. GGML_ABORT("fatal error");
  6062. }
  6063. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  6064. ggml_build_forward_expand(cgraph, tensor_clone);
  6065. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 8);
  6066. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  6067. ggml_vk_print_tensor(tensor_clone, "tensor_clone");
  6068. }
  6069. comp_size = ggml_nbytes(tensor_clone);
  6070. comp_result = malloc(comp_size);
  6071. memcpy(comp_result, tensor_clone->data, comp_size);
  6072. memcpy(comp_nb, tensor_clone->nb, sizeof(size_t) * GGML_MAX_DIMS);
  6073. if (src0 != nullptr) {
  6074. free(src0_buffer);
  6075. }
  6076. if (src1 != nullptr) {
  6077. free(src1_buffer);
  6078. }
  6079. ggml_free(ggml_ctx);
  6080. VK_LOG_DEBUG("END ggml_vk_check_results_0(" << tensor->name << ")");
  6081. }
  6082. static void ggml_vk_check_results_1(ggml_tensor * tensor) {
  6083. if (tensor->op == GGML_OP_TRANSPOSE) {
  6084. return;
  6085. }
  6086. if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {
  6087. return;
  6088. }
  6089. VK_LOG_DEBUG("ggml_vk_check_results_1(" << tensor->name << ")");
  6090. ggml_tensor * src0 = tensor->src[0];
  6091. ggml_tensor * src1 = tensor->src[1];
  6092. ggml_tensor * src2 = tensor->src[2];
  6093. void * tensor_data = tensor->data;
  6094. if (ggml_backend_buffer_is_vk(tensor->buffer)) {
  6095. size_t tensor_size = ggml_nbytes(tensor);
  6096. tensor_data = malloc(tensor_size);
  6097. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  6098. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  6099. if (extra->offset + tensor->view_offs + tensor_size >= buffer_gpu->size) {
  6100. tensor_size = buffer_gpu->size - (extra->offset + tensor->view_offs);
  6101. }
  6102. ggml_vk_buffer_read(buffer_gpu, extra->offset + tensor->view_offs, tensor_data, tensor_size);
  6103. }
  6104. float first_error_result = -1.0f;
  6105. float first_error_correct = -1.0f;
  6106. std::array<int, 4> first_error = { -1, -1, -1, -1 };
  6107. double avg_err = 0.0;
  6108. size_t counter = 0;
  6109. for (int i3 = 0; i3 < tensor->ne[3]; i3++) {
  6110. for (int i2 = 0; i2 < tensor->ne[2]; i2++) {
  6111. for (int i1 = 0; i1 < tensor->ne[1]; i1++) {
  6112. for (int i0 = 0; i0 < tensor->ne[0]; i0++) {
  6113. const bool buffer_size_fit = i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0] < comp_size;
  6114. float correct = 0.0f;
  6115. float result = 0.0f;
  6116. if (buffer_size_fit) {
  6117. if (tensor->type == GGML_TYPE_F32) {
  6118. correct = *(float *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
  6119. result = *(float *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  6120. } else if (tensor->type == GGML_TYPE_F16) {
  6121. correct = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]));
  6122. result = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]));
  6123. } else if (tensor->type == GGML_TYPE_I32) {
  6124. correct = *(int32_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
  6125. result = *(int32_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  6126. } else {
  6127. std::cerr << "Results check not implemented for type " << ggml_type_name(tensor->type) << std::endl;
  6128. }
  6129. } else {
  6130. std::cerr << "Missing debug code for type " << ggml_type_name(tensor->type) << std::endl;
  6131. GGML_ABORT("fatal error");
  6132. }
  6133. if ((std::isnan(correct) != std::isnan(result)) || (std::isinf(correct) != std::isinf(result)) || !buffer_size_fit) {
  6134. std::cerr << "ERROR: Invalid value in " << ggml_op_name(tensor->op) << " i3=" << i3 << " i2=" << i2 << " i1=" << i1 << " i0=" << i0 << " result=" << result << " correct=" << correct << " avg_err=" << (avg_err / counter) << std::endl;
  6135. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  6136. if (src0 != nullptr) {
  6137. std::cerr << "src0=" << src0 << " src0->name=" << src0->name << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  6138. }
  6139. if (src1 != nullptr) {
  6140. std::cerr << "src1=" << src1 << " src1->name=" << src1->name << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  6141. }
  6142. if (src2 != nullptr) {
  6143. std::cerr << "src2=" << src2 << " src2->name=" << src2->name << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  6144. }
  6145. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  6146. std::cerr << std::endl << "Result:" << std::endl;
  6147. ggml_vk_print_tensor_area(tensor, tensor_data, i0, i1, i2, i3);
  6148. std::cerr << std::endl << "Correct:" << std::endl;
  6149. ggml_vk_print_tensor_area(tensor, comp_result, i0, i1, i2, i3);
  6150. std::cerr << std::endl;
  6151. std::vector<const ggml_tensor *> done;
  6152. ggml_vk_print_graph_origin(tensor, done);
  6153. GGML_ABORT("fatal error");
  6154. }
  6155. if (first_error[0] == -1 && std::fabs(correct - result) > 0.1f) {
  6156. first_error[0] = i0;
  6157. first_error[1] = i1;
  6158. first_error[2] = i2;
  6159. first_error[3] = i3;
  6160. first_error_result = result;
  6161. first_error_correct = correct;
  6162. }
  6163. // Special case, value is infinite, avoid NaN result in avg_err
  6164. // NaN also appears in results, if both are nan error is 0
  6165. if (!std::isinf(correct) && !std::isinf(result) && !std::isnan(correct) && !std::isnan(result)) {
  6166. avg_err += std::fabs(correct - result);
  6167. }
  6168. counter++;
  6169. }
  6170. }
  6171. }
  6172. }
  6173. avg_err /= counter;
  6174. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  6175. std::cerr << "TENSOR CHECK: avg_err=" << avg_err << " in " << ggml_op_name(tensor->op) << " (check " << check_counter << ")" << std::endl;
  6176. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  6177. if (src0 != nullptr) {
  6178. std::cerr << "src0=" << src0 << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  6179. }
  6180. if (src1 != nullptr) {
  6181. std::cerr << "src1=" << src1 << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  6182. }
  6183. if (src2 != nullptr) {
  6184. std::cerr << "src2=" << src2 << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  6185. }
  6186. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  6187. std::cerr << std::endl << "Result:" << std::endl;
  6188. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 0, 0);
  6189. std::cerr << std::endl << "Correct:" << std::endl;
  6190. ggml_vk_print_tensor_area(tensor, comp_result, 5, 5, 0, 0);
  6191. std::cerr << std::endl;
  6192. std::vector<const ggml_tensor *> done;
  6193. ggml_vk_print_graph_origin(tensor, done);
  6194. }
  6195. if (avg_err > 0.05 || std::isnan(avg_err)) {
  6196. std::cerr << "ERROR: avg_err=" << avg_err << " in " << ggml_op_name(tensor->op) << " (check " << check_counter << ")" << std::endl;
  6197. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  6198. if (src0 != nullptr) {
  6199. std::cerr << "src0=" << src0 << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  6200. }
  6201. if (src1 != nullptr) {
  6202. std::cerr << "src1=" << src1 << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  6203. }
  6204. if (src2 != nullptr) {
  6205. std::cerr << "src2=" << src2 << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  6206. }
  6207. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  6208. std::cerr << std::endl << "Result:" << std::endl;
  6209. ggml_vk_print_tensor_area(tensor, tensor_data, first_error[0], first_error[1], first_error[2], first_error[3]);
  6210. std::cerr << std::endl << "Correct:" << std::endl;
  6211. ggml_vk_print_tensor_area(tensor, comp_result, first_error[0], first_error[1], first_error[2], first_error[3]);
  6212. std::cerr << std::endl;
  6213. std::vector<const ggml_tensor *> done;
  6214. ggml_vk_print_graph_origin(tensor, done);
  6215. GGML_ABORT("fatal error");
  6216. } else {
  6217. std::cerr << check_counter << " " << tensor->name << " op=" << ggml_op_name(tensor->op) << " avg_err=" << avg_err << std::endl;
  6218. }
  6219. free(comp_result);
  6220. comp_result = nullptr;
  6221. comp_size = 0;
  6222. if (ggml_backend_buffer_is_vk(tensor->buffer)) {
  6223. free(tensor_data);
  6224. }
  6225. VK_LOG_DEBUG("END ggml_vk_check_results_1(" << tensor->name << ")");
  6226. }
  6227. #endif