ggml-vulkan.cpp 733 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812
  1. #include "ggml-vulkan.h"
  2. #include <vulkan/vulkan_core.h>
  3. #if defined(GGML_VULKAN_RUN_TESTS) || defined(GGML_VULKAN_CHECK_RESULTS)
  4. #include <chrono>
  5. #include "ggml-cpu.h"
  6. #endif
  7. // See https://github.com/KhronosGroup/Vulkan-Hpp?tab=readme-ov-file#extensions--per-device-function-pointers-
  8. #define VULKAN_HPP_DISPATCH_LOADER_DYNAMIC 1
  9. // We use VULKAN_HPP_DEFAULT_DISPATCHER, but not VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE
  10. // to avoid conflicts with applications or other libraries who might use it.
  11. #if VK_HEADER_VERSION >= 301
  12. namespace vk::detail { class DispatchLoaderDynamic; }
  13. using vk::detail::DispatchLoaderDynamic;
  14. #else
  15. namespace vk { class DispatchLoaderDynamic; }
  16. using vk::DispatchLoaderDynamic;
  17. #endif
  18. DispatchLoaderDynamic & ggml_vk_default_dispatcher();
  19. #define VULKAN_HPP_DEFAULT_DISPATCHER ggml_vk_default_dispatcher()
  20. #include <vulkan/vulkan.hpp>
  21. #include <algorithm>
  22. #include <cmath>
  23. #include <iomanip>
  24. #include <iostream>
  25. #include <tuple>
  26. #include <vector>
  27. #include <sstream>
  28. #include <utility>
  29. #include <memory>
  30. #include <limits>
  31. #include <map>
  32. #include <unordered_map>
  33. #include <memory>
  34. #include <mutex>
  35. #include <future>
  36. #include <thread>
  37. #if defined(_MSC_VER)
  38. # define NOMINMAX 1
  39. # include <windows.h>
  40. # define YIELD() YieldProcessor()
  41. #elif defined(__clang__) || defined(__GNUC__)
  42. # if defined(__x86_64__) ||defined(__i386__)
  43. # include <immintrin.h>
  44. # define YIELD() _mm_pause()
  45. # elif defined(__arm__) || defined(__aarch64__)
  46. # if defined(__clang__)
  47. # include <arm_acle.h>
  48. # define YIELD() __yield()
  49. # else
  50. # define YIELD() asm volatile("yield")
  51. # endif
  52. # endif
  53. #endif
  54. #if !defined(YIELD)
  55. #define YIELD()
  56. #endif
  57. #include "ggml-impl.h"
  58. #include "ggml-backend-impl.h"
  59. #include "ggml-vulkan-shaders.hpp"
  60. // remove this once it's more widely available in the SDK
  61. #if !defined(VK_KHR_shader_bfloat16)
  62. #define VK_KHR_shader_bfloat16 1
  63. #define VK_KHR_SHADER_BFLOAT16_SPEC_VERSION 1
  64. #define VK_KHR_SHADER_BFLOAT16_EXTENSION_NAME "VK_KHR_shader_bfloat16"
  65. #define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_BFLOAT16_FEATURES_KHR ((VkStructureType)1000141000)
  66. #define VK_COMPONENT_TYPE_BFLOAT16_KHR ((VkComponentTypeKHR)1000141000)
  67. typedef struct VkPhysicalDeviceShaderBfloat16FeaturesKHR {
  68. VkStructureType sType;
  69. void* pNext;
  70. VkBool32 shaderBFloat16Type;
  71. VkBool32 shaderBFloat16DotProduct;
  72. VkBool32 shaderBFloat16CooperativeMatrix;
  73. } VkPhysicalDeviceShaderBfloat16FeaturesKHR;
  74. #endif
  75. #define ROUNDUP_POW2(M, N) (((M) + (N) - 1) & ~((N) - 1))
  76. #define CEIL_DIV(M, N) (((M) + (N)-1) / (N))
  77. static bool is_pow2(uint32_t x) { return x > 1 && (x & (x-1)) == 0; }
  78. #define VK_VENDOR_ID_AMD 0x1002
  79. #define VK_VENDOR_ID_APPLE 0x106b
  80. #define VK_VENDOR_ID_INTEL 0x8086
  81. #define VK_VENDOR_ID_NVIDIA 0x10de
  82. #define VK_DEVICE_DESCRIPTOR_POOL_SIZE 256
  83. #define GGML_VK_MAX_NODES 8192
  84. #define VK_CHECK(err, msg) \
  85. do { \
  86. vk::Result err_ = (err); \
  87. if (err_ != vk::Result::eSuccess) { \
  88. fprintf(stderr, "ggml_vulkan: %s error %s at %s:%d\n", \
  89. #err, to_string(err_).c_str(), __FILE__, __LINE__); \
  90. exit(1); \
  91. } \
  92. } while (0)
  93. #ifdef GGML_VULKAN_DEBUG
  94. #define VK_LOG_DEBUG(msg) std::cerr << msg << std::endl
  95. #else
  96. #define VK_LOG_DEBUG(msg) ((void) 0)
  97. #endif // GGML_VULKAN_DEBUG
  98. struct ggml_backend_vk_context;
  99. #define MAX_PARAMETER_COUNT 12
  100. // Max number of adds that can be fused without exceeding MAX_PARAMETER_COUNT.
  101. #define MAX_FUSED_ADDS (MAX_PARAMETER_COUNT - 3)
  102. struct vk_pipeline_struct {
  103. std::string name;
  104. vk::ShaderModule shader_module;
  105. vk::PipelineLayout layout;
  106. vk::Pipeline pipeline;
  107. uint32_t push_constant_size;
  108. uint32_t parameter_count;
  109. std::array<uint32_t, 3> wg_denoms;
  110. uint32_t align;
  111. // true if fields have been set by ggml_vk_create_pipeline
  112. bool initialized {};
  113. // set to true to request the pipeline is compiled after the dryrun
  114. bool needed {};
  115. // set to true when the shader has been compiled
  116. bool compiled {};
  117. // number of registers used, extracted from pipeline executable properties
  118. uint32_t register_count {};
  119. };
  120. typedef std::shared_ptr<vk_pipeline_struct> vk_pipeline;
  121. typedef std::weak_ptr<vk_pipeline_struct> vk_pipeline_ref;
  122. static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline);
  123. struct vk_matmul_pipeline_struct {
  124. vk_pipeline l, m, s;
  125. vk_pipeline a_l, a_m, a_s;
  126. // Returns true when all unaligned pipelines are null.
  127. // We only check for unaligned variants since one of the unaligned pipelines must exist
  128. // while aligned pipelines are optional
  129. bool is_empty() const {
  130. return l == nullptr && m == nullptr && s == nullptr;
  131. }
  132. };
  133. typedef std::shared_ptr<vk_matmul_pipeline_struct> vk_matmul_pipeline;
  134. struct vk_matmul_pipeline2 {
  135. vk_matmul_pipeline2() {
  136. f16acc = std::make_shared<vk_matmul_pipeline_struct>();
  137. f32acc = std::make_shared<vk_matmul_pipeline_struct>();
  138. }
  139. vk_matmul_pipeline f32acc;
  140. vk_matmul_pipeline f16acc;
  141. };
  142. struct vk_device_struct;
  143. typedef std::shared_ptr<vk_device_struct> vk_device;
  144. typedef std::weak_ptr<vk_device_struct> vk_device_ref;
  145. struct vk_buffer_struct;
  146. typedef std::shared_ptr<vk_buffer_struct> vk_buffer;
  147. typedef std::weak_ptr<vk_buffer_struct> vk_buffer_ref;
  148. struct ggml_backend_vk_buffer_type_context {
  149. std::string name;
  150. vk_device device;
  151. };
  152. struct vk_queue;
  153. // Stores command pool/buffers. There's an instance of this
  154. // for each (context,queue) pair and for each (device,queue) pair.
  155. struct vk_command_pool {
  156. void init(vk_device& device, vk_queue *q_);
  157. void destroy(vk::Device& device);
  158. vk::CommandPool pool;
  159. uint32_t cmd_buffer_idx;
  160. std::vector<vk::CommandBuffer> cmd_buffers;
  161. vk_queue *q;
  162. };
  163. // Prevent simultaneous submissions to the same queue.
  164. // This could be per vk_queue if we stopped having two vk_queue structures
  165. // sharing the same vk::Queue.
  166. static std::mutex queue_mutex;
  167. struct vk_queue {
  168. uint32_t queue_family_index;
  169. vk::Queue queue;
  170. vk_command_pool cmd_pool;
  171. vk::PipelineStageFlags stage_flags;
  172. bool transfer_only;
  173. // copy everything except the cmd_pool
  174. void copyFrom(vk_queue &other) {
  175. queue_family_index = other.queue_family_index;
  176. queue = other.queue;
  177. stage_flags = other.stage_flags;
  178. transfer_only = other.transfer_only;
  179. }
  180. };
  181. static const char * ggml_backend_vk_buffer_type_name(ggml_backend_buffer_type_t buft);
  182. static ggml_backend_buffer_t ggml_backend_vk_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size);
  183. static size_t ggml_backend_vk_buffer_type_get_alignment(ggml_backend_buffer_type_t buft);
  184. static size_t ggml_backend_vk_buffer_type_get_max_size(ggml_backend_buffer_type_t buft);
  185. static size_t ggml_backend_vk_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor);
  186. static ggml_backend_buffer_type_i ggml_backend_vk_buffer_type_interface = {
  187. /* .get_name = */ ggml_backend_vk_buffer_type_name,
  188. /* .alloc_buffer = */ ggml_backend_vk_buffer_type_alloc_buffer,
  189. /* .get_alignment = */ ggml_backend_vk_buffer_type_get_alignment,
  190. /* .get_max_size = */ ggml_backend_vk_buffer_type_get_max_size,
  191. /* .get_alloc_size = */ ggml_backend_vk_buffer_type_get_alloc_size,
  192. /* .is_host = */ NULL,
  193. };
  194. #ifdef GGML_VULKAN_MEMORY_DEBUG
  195. class vk_memory_logger;
  196. #endif
  197. class vk_perf_logger;
  198. static void ggml_vk_destroy_buffer(vk_buffer& buf);
  199. static constexpr uint32_t mul_mat_vec_max_cols = 8;
  200. static constexpr uint32_t p021_max_gqa_ratio = 8;
  201. enum vk_device_architecture {
  202. OTHER,
  203. AMD_GCN,
  204. AMD_RDNA1,
  205. AMD_RDNA2,
  206. AMD_RDNA3,
  207. INTEL_XE2,
  208. NVIDIA_PRE_TURING,
  209. };
  210. static vk_device_architecture get_device_architecture(const vk::PhysicalDevice& device) {
  211. vk::PhysicalDeviceProperties props = device.getProperties();
  212. if (props.vendorID == VK_VENDOR_ID_AMD) {
  213. const std::vector<vk::ExtensionProperties> ext_props = device.enumerateDeviceExtensionProperties();
  214. bool amd_shader_core_properties = false;
  215. bool integer_dot_product = false;
  216. bool subgroup_size_control = false;
  217. for (const auto& properties : ext_props) {
  218. if (strcmp("VK_AMD_shader_core_properties", properties.extensionName) == 0) {
  219. amd_shader_core_properties = true;
  220. } else if (strcmp("VK_KHR_shader_integer_dot_product", properties.extensionName) == 0) {
  221. integer_dot_product = true;
  222. } else if (strcmp("VK_EXT_subgroup_size_control", properties.extensionName) == 0) {
  223. subgroup_size_control = true;
  224. }
  225. }
  226. if (!amd_shader_core_properties || !integer_dot_product || !subgroup_size_control) {
  227. return vk_device_architecture::OTHER;
  228. }
  229. vk::PhysicalDeviceProperties2 props2;
  230. vk::PhysicalDeviceShaderCorePropertiesAMD shader_core_props_amd;
  231. vk::PhysicalDeviceShaderIntegerDotProductPropertiesKHR integer_dot_props;
  232. vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT subgroup_size_control_props;
  233. props2.pNext = &shader_core_props_amd;
  234. shader_core_props_amd.pNext = &integer_dot_props;
  235. integer_dot_props.pNext = &subgroup_size_control_props;
  236. device.getProperties2(&props2);
  237. if (subgroup_size_control_props.maxSubgroupSize == 64 && subgroup_size_control_props.minSubgroupSize == 64) {
  238. return vk_device_architecture::AMD_GCN;
  239. }
  240. if (subgroup_size_control_props.maxSubgroupSize == 64 && subgroup_size_control_props.minSubgroupSize == 32) {
  241. // RDNA
  242. if (shader_core_props_amd.wavefrontsPerSimd == 20) {
  243. return vk_device_architecture::AMD_RDNA1;
  244. }
  245. if (integer_dot_props.integerDotProduct4x8BitPackedMixedSignednessAccelerated) {
  246. return vk_device_architecture::AMD_RDNA3;
  247. }
  248. return vk_device_architecture::AMD_RDNA2;
  249. }
  250. } else if (props.vendorID == VK_VENDOR_ID_INTEL) {
  251. const std::vector<vk::ExtensionProperties> ext_props = device.enumerateDeviceExtensionProperties();
  252. bool subgroup_size_control = false;
  253. for (const auto& properties : ext_props) {
  254. if (strcmp("VK_EXT_subgroup_size_control", properties.extensionName) == 0) {
  255. subgroup_size_control = true;
  256. }
  257. }
  258. if (!subgroup_size_control) {
  259. return vk_device_architecture::OTHER;
  260. }
  261. vk::PhysicalDeviceProperties2 props2;
  262. vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT subgroup_size_control_props;
  263. props2.pNext = &subgroup_size_control_props;
  264. device.getProperties2(&props2);
  265. if (subgroup_size_control_props.minSubgroupSize == 16) {
  266. // Xe2 architecture uses SIMD16 while previous Xe and Gen architecture uses SIMD8.
  267. // Minimum subgroup size matches the SIMD width so we distinguish architecture by checking this value.
  268. // https://www.intel.com/content/www/us/en/content-details/824434/2024-intel-tech-tour-xe2-and-lunar-lake-s-gpu.html
  269. // https://www.intel.com/content/www/us/en/docs/oneapi/optimization-guide-gpu/2025-0/intel-xe-gpu-architecture.html
  270. return vk_device_architecture::INTEL_XE2;
  271. }
  272. } else if (props.vendorID == VK_VENDOR_ID_NVIDIA) {
  273. const std::vector<vk::ExtensionProperties> ext_props = device.enumerateDeviceExtensionProperties();
  274. bool cooperative_matrix = false;
  275. // Detect "pre-turing" based on lack of coopmat support.
  276. for (const auto& properties : ext_props) {
  277. if (strcmp("VK_KHR_cooperative_matrix", properties.extensionName) == 0) {
  278. cooperative_matrix = true;
  279. break;
  280. }
  281. }
  282. if (!cooperative_matrix) {
  283. return vk_device_architecture::NVIDIA_PRE_TURING;
  284. }
  285. }
  286. return vk_device_architecture::OTHER;
  287. }
  288. enum vk_conv_shapes {
  289. CONV_SHAPE_128x128,
  290. CONV_SHAPE_64x32,
  291. CONV_SHAPE_32x256,
  292. CONV_SHAPE_COUNT,
  293. };
  294. enum dmmv_wg_sizes {
  295. DMMV_WG_SIZE_SUBGROUP,
  296. DMMV_WG_SIZE_LARGE,
  297. DMMV_WG_SIZE_COUNT,
  298. };
  299. enum FaCodePath {
  300. FA_SCALAR,
  301. FA_COOPMAT1,
  302. FA_COOPMAT2,
  303. };
  304. struct vk_fa_pipeline_state {
  305. vk_fa_pipeline_state(uint32_t HSK, uint32_t HSV, bool small_rows, FaCodePath path, bool aligned, bool f32acc)
  306. : HSK(HSK), HSV(HSV), small_rows(small_rows), path(path), aligned(aligned), f32acc(f32acc) {}
  307. uint32_t HSK, HSV;
  308. bool small_rows;
  309. FaCodePath path;
  310. bool aligned;
  311. bool f32acc;
  312. bool operator<(const vk_fa_pipeline_state &b) const {
  313. return std::tie(HSK, HSV, small_rows, path, aligned, f32acc) <
  314. std::tie(b.HSK, b.HSV, b.small_rows, b.path, b.aligned, b.f32acc);
  315. }
  316. };
  317. enum shader_reduction_mode {
  318. SHADER_REDUCTION_MODE_SHMEM,
  319. SHADER_REDUCTION_MODE_HYBRID,
  320. SHADER_REDUCTION_MODE_SUBGROUP,
  321. SHADER_REDUCTION_MODE_COUNT,
  322. };
  323. static constexpr uint32_t num_argsort_pipelines = 11;
  324. static constexpr uint32_t max_argsort_cols = 1 << (num_argsort_pipelines-1);
  325. static constexpr uint32_t num_topk_moe_pipelines = 10;
  326. static constexpr std::initializer_list<ggml_op> topk_moe_early_softmax_norm{ GGML_OP_SOFT_MAX, GGML_OP_RESHAPE, GGML_OP_ARGSORT,
  327. GGML_OP_VIEW, GGML_OP_GET_ROWS, GGML_OP_RESHAPE,
  328. GGML_OP_SUM_ROWS, GGML_OP_CLAMP, GGML_OP_DIV,
  329. GGML_OP_RESHAPE };
  330. static constexpr std::initializer_list<ggml_op> topk_moe_early_softmax { GGML_OP_SOFT_MAX, GGML_OP_RESHAPE, GGML_OP_ARGSORT,
  331. GGML_OP_VIEW, GGML_OP_GET_ROWS };
  332. static constexpr std::initializer_list<ggml_op> topk_moe_late_softmax { GGML_OP_ARGSORT, GGML_OP_VIEW,
  333. GGML_OP_GET_ROWS, GGML_OP_RESHAPE,
  334. GGML_OP_SOFT_MAX, GGML_OP_RESHAPE };
  335. //node #978 ( SOFT_MAX): ffn_moe_probs-15 ( 0K) [Vulka ] use=2: ffn_moe_logits-15 ( 0K) [Vulka ]
  336. //node #979 ( RESHAPE): ffn_moe_probs-15 (re ( 0K) [Vulka ] use=1: ffn_moe_probs-15 ( 0K) [Vulka ]
  337. //node #980 ( ARGSORT): ffn_moe_argsort-15 ( 0K) [Vulka ] use=1: ffn_moe_probs-15 ( 0K) [Vulka ]
  338. //node #981 ( VIEW): ffn_moe_topk-15 ( 0K) [Vulka ] use=4: ffn_moe_argsort-15 ( 0K) [Vulka ]
  339. //node #982 ( GET_ROWS): ffn_moe_weights-15 ( 0K) [Vulka ] use=1: ffn_moe_probs-15 (re ( 0K) [Vulka ] ffn_moe_topk-15 ( 0K) [Vulka ]
  340. //node #983 ( RESHAPE): ffn_moe_weights-15 ( ( 0K) [Vulka ] use=2: ffn_moe_weights-15 ( 0K) [Vulka ]
  341. //node #984 ( SUM_ROWS): ffn_moe_weights_sum- ( 0K) [Vulka ] use=1: ffn_moe_weights-15 ( ( 0K) [Vulka ]
  342. //node #985 ( CLAMP): ffn_moe_weights_sum_ ( 0K) [Vulka ] use=1: ffn_moe_weights_sum- ( 0K) [Vulka ]
  343. //node #986 ( DIV): ffn_moe_weights_norm ( 0K) [Vulka ] use=1: ffn_moe_weights-15 ( ( 0K) [Vulka ] ffn_moe_weights_sum_ ( 0K) [Vulka ]
  344. //node #987 ( RESHAPE): ffn_moe_weights_norm ( 0K) [Vulka ] use=1: ffn_moe_weights_norm ( 0K) [Vulka ]
  345. static constexpr std::initializer_list<std::array<int, 3>> topk_moe_early_softmax_norm_edges {
  346. { 1, 0, 0 }, // reshape->src[0] == softmax
  347. { 2, 0, 0 }, // argsort->src[0] == softmax
  348. { 3, 0, 2 }, // view->src[0] == argsort
  349. { 4, 0, 1 }, // get_rows->src[0] == reshape
  350. { 4, 1, 3 }, // get_rows->src[1] == view
  351. { 5, 0, 4 }, // reshape->src[0] == get_rows
  352. { 6, 0, 5 }, // sum_rows->src[0] == reshape
  353. { 7, 0, 6 }, // clamp->src[0] == sum_rows
  354. { 8, 0, 5 }, // div->src[0] == reshape
  355. { 8, 1, 7 }, // div->src[1] == clamp
  356. { 9, 0, 8 }, // reshape->src[0] == div
  357. };
  358. // same as early_softmax_norm but ending after the get_rows
  359. static constexpr std::initializer_list<std::array<int, 3>> topk_moe_early_softmax_edges {
  360. { 1, 0, 0 }, // reshape->src[0] == softmax
  361. { 2, 0, 0 }, // argsort->src[0] == softmax
  362. { 3, 0, 2 }, // view->src[0] == argsort
  363. { 4, 0, 1 }, // get_rows->src[0] == reshape
  364. { 4, 1, 3 }, // get_rows->src[1] == view
  365. };
  366. //node #652 ( ARGSORT): ffn_moe_argsort-11 ( 0K) [Vulka ] use=1: ffn_moe_probs-11 ( 0K) [Vulka ]
  367. //node #653 ( VIEW): ffn_moe_topk-11 ( 0K) [Vulka ] use=7: ffn_moe_argsort-11 ( 0K) [Vulka ]
  368. //node #654 ( GET_ROWS): ffn_moe_weights-11 ( 0K) [Vulka ] use=1: ffn_moe_probs-11 (re ( 0K) [Vulka ] ffn_moe_topk-11 ( 0K) [Vulka ]
  369. //node #655 ( RESHAPE): ffn_moe_weights-11 ( ( 0K) [Vulka ] use=1: ffn_moe_weights-11 ( 0K) [Vulka ]
  370. //node #656 ( SOFT_MAX): node_656 ( 0K) [Vulka ] use=1: ffn_moe_weights-11 ( ( 0K) [Vulka ]
  371. //node #657 ( RESHAPE): ffn_moe_weights_soft ( 0K) [Vulka ] use=1: node_656 ( 0K) [Vulka ]
  372. static constexpr std::initializer_list<std::array<int, 3>> topk_moe_late_softmax_edges {
  373. { 1, 0, 0 }, // view->src[0] == argsort
  374. { 2, 1, 1 }, // get_rows->src[1] == view
  375. { 3, 0, 2 }, // reshape->src[0] == get_rows
  376. { 4, 0, 3 }, // soft_max->src[0] == reshape
  377. { 5, 0, 4 }, // reshape->src[0] == soft_max
  378. };
  379. enum topk_moe_mode {
  380. TOPK_MOE_EARLY_SOFTMAX,
  381. TOPK_MOE_EARLY_SOFTMAX_NORM,
  382. TOPK_MOE_LATE_SOFTMAX,
  383. TOPK_MOE_COUNT,
  384. };
  385. static topk_moe_mode ggml_vk_num_additional_ops_to_topk_moe_mode(uint32_t num) {
  386. topk_moe_mode mode = num == topk_moe_early_softmax_norm.size() - 1 ? TOPK_MOE_EARLY_SOFTMAX_NORM :
  387. num == topk_moe_early_softmax.size() - 1 ? TOPK_MOE_EARLY_SOFTMAX :
  388. TOPK_MOE_LATE_SOFTMAX;
  389. return mode;
  390. }
  391. static constexpr std::initializer_list<std::array<int, 3>> rope_view_set_rows_edges {
  392. { 1, 0, 0 }, // view->src[0] == rope
  393. { 2, 0, 1 }, // set_rows->src[0] == view
  394. };
  395. struct vk_device_struct {
  396. std::recursive_mutex mutex;
  397. vk::PhysicalDevice physical_device;
  398. vk::PhysicalDeviceProperties properties;
  399. std::string name;
  400. uint64_t max_memory_allocation_size;
  401. uint64_t max_buffer_size;
  402. uint64_t suballocation_block_size;
  403. bool fp16;
  404. bool bf16;
  405. bool pipeline_robustness;
  406. vk::Device device;
  407. uint32_t vendor_id;
  408. vk::DriverId driver_id;
  409. vk_device_architecture architecture;
  410. vk_queue compute_queue;
  411. vk_queue transfer_queue;
  412. bool single_queue;
  413. uint32_t subgroup_size;
  414. uint32_t shader_core_count;
  415. bool uma;
  416. bool prefer_host_memory;
  417. bool float_controls_rte_fp16;
  418. bool subgroup_arithmetic;
  419. bool subgroup_shuffle;
  420. bool subgroup_ballot;
  421. bool subgroup_clustered;
  422. bool multi_add;
  423. bool shader_int64;
  424. bool buffer_device_address;
  425. bool add_rms_fusion;
  426. uint32_t partials_binding_alignment;
  427. bool integer_dot_product;
  428. // 0: default, 1: force mmvq, -1: disable mmvq
  429. int32_t mmvq_mode;
  430. bool subgroup_size_control;
  431. uint32_t subgroup_min_size;
  432. uint32_t subgroup_max_size;
  433. bool subgroup_require_full_support;
  434. bool coopmat_support;
  435. bool coopmat_acc_f32_support {};
  436. bool coopmat_acc_f16_support {};
  437. bool coopmat_bf16_support {};
  438. bool coopmat_support_16x16x16_f16acc {};
  439. bool coopmat_support_16x16x16_f32acc {};
  440. bool coopmat1_fa_support {};
  441. uint32_t coopmat_m;
  442. uint32_t coopmat_n;
  443. uint32_t coopmat_k;
  444. bool coopmat_int_support;
  445. uint32_t coopmat_int_m;
  446. uint32_t coopmat_int_n;
  447. uint32_t coopmat_int_k;
  448. bool coopmat2;
  449. bool pipeline_executable_properties_support {};
  450. size_t idx;
  451. bool mul_mat_l[GGML_TYPE_COUNT];
  452. bool mul_mat_m[GGML_TYPE_COUNT];
  453. bool mul_mat_s[GGML_TYPE_COUNT];
  454. bool mul_mat_id_l[GGML_TYPE_COUNT];
  455. bool mul_mat_id_m[GGML_TYPE_COUNT];
  456. bool mul_mat_id_s[GGML_TYPE_COUNT];
  457. // set to true to indicate that some shaders need to be compiled after the dryrun
  458. bool need_compiles {};
  459. vk::DescriptorSetLayout dsl;
  460. vk_matmul_pipeline pipeline_matmul_f32 {};
  461. vk_matmul_pipeline pipeline_matmul_f32_f16 {};
  462. vk_matmul_pipeline pipeline_matmul_bf16 {};
  463. vk_matmul_pipeline2 pipeline_matmul_f16;
  464. vk_matmul_pipeline2 pipeline_matmul_f16_f32;
  465. vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat[GGML_TYPE_COUNT];
  466. vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_COUNT];
  467. vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_COUNT];
  468. vk_matmul_pipeline pipeline_matmul_id_f32 {};
  469. vk_matmul_pipeline pipeline_matmul_id_bf16 {};
  470. vk_matmul_pipeline2 pipeline_matmul_id_f16;
  471. vk_matmul_pipeline2 pipeline_matmul_id_f16_f32;
  472. vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_id[GGML_TYPE_COUNT];
  473. vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_COUNT];
  474. vk_pipeline pipeline_matmul_split_k_reduce;
  475. vk_pipeline pipeline_quantize_q8_1;
  476. vk_pipeline pipeline_quantize_q8_1_x4;
  477. vk_pipeline pipeline_dequant[GGML_TYPE_COUNT];
  478. vk_pipeline pipeline_dequant_mul_mat_vec_f32_f32[DMMV_WG_SIZE_COUNT][GGML_TYPE_COUNT][mul_mat_vec_max_cols];
  479. vk_pipeline pipeline_dequant_mul_mat_vec_f16_f32[DMMV_WG_SIZE_COUNT][GGML_TYPE_COUNT][mul_mat_vec_max_cols];
  480. vk_pipeline pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_COUNT];
  481. vk_pipeline pipeline_dequant_mul_mat_vec_q8_1_f32[DMMV_WG_SIZE_COUNT][GGML_TYPE_COUNT][mul_mat_vec_max_cols];
  482. vk_pipeline pipeline_mul_mat_vec_p021_f16_f32[p021_max_gqa_ratio];
  483. vk_pipeline pipeline_mul_mat_vec_nc_f16_f32;
  484. vk_pipeline pipeline_get_rows[GGML_TYPE_COUNT];
  485. vk_pipeline pipeline_get_rows_f32[GGML_TYPE_COUNT];
  486. vk_pipeline pipeline_acc_f32;
  487. // [src0 0=fp32,1=fp16][src1 0=fp32,1=fp16][dst 0=fp32,1=fp16]
  488. vk_pipeline pipeline_add[2][2][2];
  489. vk_pipeline pipeline_add_norepeat[2][2][2];
  490. vk_pipeline pipeline_sub[2][2][2];
  491. vk_pipeline pipeline_sub_norepeat[2][2][2];
  492. vk_pipeline pipeline_mul[2][2][2];
  493. vk_pipeline pipeline_mul_norepeat[2][2][2];
  494. vk_pipeline pipeline_div[2][2][2];
  495. vk_pipeline pipeline_div_norepeat[2][2][2];
  496. vk_pipeline pipeline_add_rms[2][2][2];
  497. vk_pipeline pipeline_add_rms_norepeat[2][2][2];
  498. // indexed by num_additional_fused_ops == num_adds - 1
  499. vk_pipeline pipeline_multi_add[MAX_FUSED_ADDS];
  500. vk_pipeline pipeline_multi_add_rms[MAX_FUSED_ADDS];
  501. vk_pipeline pipeline_add_id_f32;
  502. vk_pipeline pipeline_concat_f32, pipeline_concat_f16, pipeline_concat_i32;
  503. vk_pipeline pipeline_upscale_nearest_f32, pipeline_upscale_bilinear_f32;
  504. vk_pipeline pipeline_scale_f32;
  505. vk_pipeline pipeline_sqr_f32;
  506. vk_pipeline pipeline_sqrt_f32;
  507. vk_pipeline pipeline_sin_f32;
  508. vk_pipeline pipeline_cos_f32;
  509. vk_pipeline pipeline_clamp_f32;
  510. vk_pipeline pipeline_pad_f32;
  511. vk_pipeline pipeline_roll_f32;
  512. vk_pipeline pipeline_repeat_f32, pipeline_repeat_back_f32;
  513. vk_pipeline pipeline_cpy_f32_f32, pipeline_cpy_f32_f16, pipeline_cpy_f16_f16, pipeline_cpy_f16_f32, pipeline_cpy_f32_bf16, pipeline_cpy_f32_i32, pipeline_cpy_i32_f32;
  514. vk_pipeline pipeline_contig_cpy_f32_f32, pipeline_contig_cpy_f32_f16, pipeline_contig_cpy_f16_f16, pipeline_contig_cpy_f16_f32, pipeline_contig_cpy_f32_bf16, pipeline_contig_cpy_f32_i32, pipeline_contig_cpy_i32_f32;
  515. vk_pipeline pipeline_cpy_f32_quant[GGML_TYPE_COUNT];
  516. vk_pipeline pipeline_cpy_quant_f32[GGML_TYPE_COUNT];
  517. vk_pipeline pipeline_set_rows_i32[GGML_TYPE_COUNT];
  518. vk_pipeline pipeline_set_rows_i64[GGML_TYPE_COUNT];
  519. vk_pipeline pipeline_norm_f32;
  520. vk_pipeline pipeline_group_norm_f32;
  521. vk_pipeline pipeline_rms_norm_f32;
  522. vk_pipeline pipeline_rms_norm_mul_f32;
  523. vk_pipeline pipeline_rms_norm_partials_f32;
  524. vk_pipeline pipeline_rms_norm_mul_partials_f32;
  525. vk_pipeline pipeline_rms_norm_back_f32;
  526. vk_pipeline pipeline_l2_norm_f32;
  527. // [src/dst 0=fp32,1=fp16]
  528. vk_pipeline pipeline_exp[2];
  529. vk_pipeline pipeline_gelu[2];
  530. vk_pipeline pipeline_gelu_erf[2];
  531. vk_pipeline pipeline_gelu_quick[2];
  532. vk_pipeline pipeline_silu[2];
  533. vk_pipeline pipeline_relu[2];
  534. vk_pipeline pipeline_tanh[2];
  535. vk_pipeline pipeline_sigmoid[2];
  536. vk_pipeline pipeline_hardsigmoid[2];
  537. vk_pipeline pipeline_hardswish[2];
  538. vk_pipeline pipeline_geglu[2];
  539. vk_pipeline pipeline_reglu[2];
  540. vk_pipeline pipeline_swiglu[2];
  541. vk_pipeline pipeline_swiglu_oai[2];
  542. vk_pipeline pipeline_geglu_erf[2];
  543. vk_pipeline pipeline_geglu_quick[2];
  544. vk_pipeline pipeline_leaky_relu_f32;
  545. vk_pipeline pipeline_silu_back_f32;
  546. vk_pipeline pipeline_diag_mask_inf_f32;
  547. vk_pipeline pipeline_soft_max_f32, pipeline_soft_max_f32_f16;
  548. vk_pipeline pipeline_soft_max_f32_wg512, pipeline_soft_max_f32_f16_wg512;
  549. vk_pipeline pipeline_soft_max_back_f32;
  550. vk_pipeline pipeline_rope_norm_f32, pipeline_rope_norm_f16, pipeline_rope_norm_f32_f16;
  551. vk_pipeline pipeline_rope_neox_f32, pipeline_rope_neox_f16, pipeline_rope_neox_f32_f16;
  552. vk_pipeline pipeline_rope_multi_f32, pipeline_rope_multi_f16;
  553. vk_pipeline pipeline_rope_vision_f32, pipeline_rope_vision_f16;
  554. vk_pipeline pipeline_argsort_f32[num_argsort_pipelines];
  555. vk_pipeline pipeline_sum_rows_f32;
  556. vk_pipeline pipeline_argmax_f32;
  557. vk_pipeline pipeline_count_equal_i32;
  558. vk_pipeline pipeline_im2col_f32, pipeline_im2col_f32_f16;
  559. vk_pipeline pipeline_im2col_3d_f32, pipeline_im2col_3d_f32_f16;
  560. vk_pipeline pipeline_timestep_embedding_f32;
  561. vk_pipeline pipeline_conv_transpose_1d_f32;
  562. vk_pipeline pipeline_pool2d_f32;
  563. vk_pipeline pipeline_rwkv_wkv6_f32;
  564. vk_pipeline pipeline_rwkv_wkv7_f32;
  565. vk_pipeline pipeline_ssm_scan_f32_d128;
  566. vk_pipeline pipeline_ssm_scan_f32_d256;
  567. vk_pipeline pipeline_ssm_conv_f32;
  568. vk_pipeline pipeline_opt_step_adamw_f32;
  569. vk_pipeline pipeline_opt_step_sgd_f32;
  570. vk_pipeline pipeline_conv2d_f32[CONV_SHAPE_COUNT];
  571. vk_pipeline pipeline_conv2d_f16_f32[CONV_SHAPE_COUNT];
  572. vk_pipeline pipeline_conv_transpose_2d_f32[CONV_SHAPE_COUNT];
  573. vk_pipeline pipeline_conv_transpose_2d_f16_f32[CONV_SHAPE_COUNT];
  574. vk_pipeline pipeline_conv2d_dw_whcn_f32, pipeline_conv2d_dw_whcn_f16_f32;
  575. vk_pipeline pipeline_conv2d_dw_cwhn_f32, pipeline_conv2d_dw_cwhn_f16_f32;
  576. std::map<vk_fa_pipeline_state, vk_pipeline> pipeline_flash_attn_f32_f16[GGML_TYPE_COUNT];
  577. vk_pipeline pipeline_flash_attn_split_k_reduce;
  578. vk_pipeline pipeline_topk_moe[num_topk_moe_pipelines][TOPK_MOE_COUNT];
  579. std::vector<vk_pipeline_ref> all_pipelines;
  580. std::vector<std::tuple<void*, size_t, vk_buffer>> pinned_memory;
  581. vk::Fence fence;
  582. vk_buffer sync_staging;
  583. ggml_backend_buffer_type buffer_type;
  584. bool disable_fusion;
  585. bool disable_host_visible_vidmem;
  586. bool allow_sysmem_fallback;
  587. bool disable_graph_optimize;
  588. #ifdef GGML_VULKAN_MEMORY_DEBUG
  589. std::unique_ptr<vk_memory_logger> memory_logger;
  590. #endif
  591. // for GGML_VK_PERF_LOGGER
  592. std::unique_ptr<vk_perf_logger> perf_logger;
  593. vk::QueryPool query_pool;
  594. int32_t num_queries;
  595. ~vk_device_struct() {
  596. VK_LOG_DEBUG("destroy device " << name);
  597. device.destroyFence(fence);
  598. ggml_vk_destroy_buffer(sync_staging);
  599. compute_queue.cmd_pool.destroy(device);
  600. transfer_queue.cmd_pool.destroy(device);
  601. for (auto& pipeline : all_pipelines) {
  602. if (pipeline.expired()) {
  603. continue;
  604. }
  605. vk_pipeline pl = pipeline.lock();
  606. ggml_vk_destroy_pipeline(device, pl);
  607. }
  608. all_pipelines.clear();
  609. device.destroyDescriptorSetLayout(dsl);
  610. device.destroy();
  611. }
  612. };
  613. void vk_command_pool::init(vk_device& device, vk_queue *q_) {
  614. cmd_buffer_idx = 0;
  615. q = q_;
  616. vk::CommandPoolCreateInfo command_pool_create_info(vk::CommandPoolCreateFlags(VK_COMMAND_POOL_CREATE_TRANSIENT_BIT), q->queue_family_index);
  617. pool = device->device.createCommandPool(command_pool_create_info);
  618. }
  619. void vk_command_pool::destroy(vk::Device& device) {
  620. device.destroyCommandPool(pool);
  621. pool = nullptr;
  622. cmd_buffers.clear();
  623. }
  624. struct vk_buffer_struct {
  625. vk::Buffer buffer = VK_NULL_HANDLE;
  626. vk::DeviceMemory device_memory = VK_NULL_HANDLE;
  627. vk::MemoryPropertyFlags memory_property_flags;
  628. void * ptr;
  629. size_t size = 0;
  630. vk::DeviceAddress bda_addr {};
  631. vk_device device;
  632. ~vk_buffer_struct() {
  633. if (size == 0) {
  634. return;
  635. }
  636. VK_LOG_DEBUG("~vk_buffer_struct(" << buffer << ", " << size << ")");
  637. device->device.freeMemory(device_memory);
  638. device->device.destroyBuffer(buffer);
  639. }
  640. };
  641. struct vk_subbuffer {
  642. vk_buffer buffer;
  643. uint64_t offset;
  644. uint64_t size;
  645. operator vk::DescriptorBufferInfo() const {
  646. return { buffer->buffer, offset, size };
  647. }
  648. };
  649. struct vk_semaphore {
  650. vk::Semaphore s;
  651. uint64_t value;
  652. };
  653. struct vk_submission {
  654. vk::CommandBuffer buffer;
  655. std::vector<vk_semaphore> wait_semaphores;
  656. std::vector<vk_semaphore> signal_semaphores;
  657. };
  658. typedef std::vector<vk_submission> vk_sequence;
  659. struct vk_mat_mat_push_constants {
  660. uint32_t M; uint32_t N; uint32_t K;
  661. uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  662. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  663. uint32_t k_split;
  664. uint32_t ne02; uint32_t ne12; uint32_t broadcast2; uint32_t broadcast3;
  665. uint32_t padded_N;
  666. };
  667. struct vk_mat_vec_push_constants {
  668. uint32_t ncols;
  669. uint32_t stride_a;
  670. uint32_t stride_b;
  671. uint32_t stride_d;
  672. uint32_t batch_stride_a;
  673. uint32_t batch_stride_b;
  674. uint32_t batch_stride_d;
  675. uint32_t enable_bias;
  676. uint32_t ne02;
  677. uint32_t ne12;
  678. uint32_t broadcast2;
  679. uint32_t broadcast3;
  680. };
  681. struct vk_mat_mat_id_push_constants {
  682. uint32_t M; uint32_t N; uint32_t K;
  683. uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  684. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  685. uint32_t nei0; uint32_t nei1; uint32_t nbi1; uint32_t ne11;
  686. uint32_t padded_N;
  687. };
  688. struct vk_mat_vec_id_push_constants {
  689. uint32_t ncols;
  690. uint32_t stride_a;
  691. uint32_t stride_b;
  692. uint32_t stride_d;
  693. uint32_t batch_stride_a;
  694. uint32_t batch_stride_b;
  695. uint32_t batch_stride_d;
  696. uint32_t enable_bias;
  697. uint32_t nei0;
  698. uint32_t ne11;
  699. };
  700. struct vk_flash_attn_push_constants {
  701. uint32_t N;
  702. uint32_t KV;
  703. uint32_t ne1;
  704. uint32_t ne2;
  705. uint32_t ne3;
  706. uint32_t neq2;
  707. uint32_t neq3;
  708. uint32_t nek2;
  709. uint32_t nek3;
  710. uint32_t nev2;
  711. uint32_t nev3;
  712. uint32_t nem1;
  713. uint32_t nem2;
  714. uint32_t nem3;
  715. uint32_t nb01;
  716. uint32_t nb02;
  717. uint32_t nb03;
  718. uint32_t nb11;
  719. uint32_t nb12;
  720. uint32_t nb13;
  721. uint32_t nb21;
  722. uint32_t nb22;
  723. uint32_t nb23;
  724. float scale;
  725. float max_bias;
  726. float logit_softcap;
  727. uint32_t mask_n_head_log2;
  728. float m0;
  729. float m1;
  730. uint32_t gqa_ratio;
  731. uint32_t split_kv;
  732. uint32_t k_num;
  733. };
  734. static_assert(sizeof(vk_flash_attn_push_constants) <= 128, "sizeof(vk_flash_attn_push_constants) must be <= 128");
  735. struct vk_op_push_constants {
  736. uint32_t KX;
  737. uint32_t KY;
  738. float param1;
  739. float param2;
  740. };
  741. struct vk_op_glu_push_constants {
  742. uint32_t N;
  743. uint32_t ne00;
  744. uint32_t ne20;
  745. uint32_t mode; // 0: default, 1: swapped, 2: split
  746. float alpha; // for swiglu_oai
  747. float limit;
  748. };
  749. struct vk_op_unary_push_constants {
  750. uint32_t ne;
  751. uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  752. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13;
  753. uint32_t misalign_offsets;
  754. float param1; float param2;
  755. uint32_t ne0_012mp; uint32_t ne0_012L;
  756. uint32_t ne0_01mp; uint32_t ne0_01L;
  757. uint32_t ne0_0mp; uint32_t ne0_0L;
  758. uint32_t ne1_012mp; uint32_t ne1_012L;
  759. uint32_t ne1_01mp; uint32_t ne1_01L;
  760. uint32_t ne1_0mp; uint32_t ne1_0L;
  761. };
  762. static_assert(sizeof(vk_op_unary_push_constants) <= 128, "sizeof(vk_op_unary_push_constants) must be <= 128");
  763. static vk_op_unary_push_constants vk_op_unary_push_constants_init(const ggml_tensor * src0, const ggml_tensor * dst, int64_t ne = 0) {
  764. GGML_ASSERT(ne != 0 || (ggml_nelements(src0) == ggml_nelements(dst)));
  765. ne = ne != 0 ? ne : ggml_nelements(dst);
  766. GGML_ASSERT(ne <= (int64_t)std::numeric_limits<uint32_t>::max());
  767. vk_op_unary_push_constants p{};
  768. p.ne = (uint32_t)ne;
  769. size_t src0_tsize = ggml_type_size(src0->type);
  770. p.ne00 = (uint32_t)src0->ne[0];
  771. p.ne01 = (uint32_t)src0->ne[1];
  772. p.ne02 = (uint32_t)src0->ne[2];
  773. p.ne03 = (uint32_t)src0->ne[3];
  774. p.nb00 = (uint32_t)(src0->nb[0] / src0_tsize);
  775. p.nb01 = (uint32_t)(src0->nb[1] / src0_tsize);
  776. p.nb02 = (uint32_t)(src0->nb[2] / src0_tsize);
  777. p.nb03 = (uint32_t)(src0->nb[3] / src0_tsize);
  778. size_t dst_tsize = ggml_type_size(dst->type);
  779. p.ne10 = (uint32_t)dst->ne[0];
  780. p.ne11 = (uint32_t)dst->ne[1];
  781. p.ne12 = (uint32_t)dst->ne[2];
  782. p.ne13 = (uint32_t)dst->ne[3];
  783. p.nb10 = (uint32_t)(dst->nb[0] / dst_tsize);
  784. p.nb11 = (uint32_t)(dst->nb[1] / dst_tsize);
  785. p.nb12 = (uint32_t)(dst->nb[2] / dst_tsize);
  786. p.nb13 = (uint32_t)(dst->nb[3] / dst_tsize);
  787. return p; // offsets are initialized later in ggml_vk_op
  788. }
  789. struct vk_op_pad_push_constants {
  790. uint32_t ne;
  791. uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  792. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13;
  793. uint32_t misalign_offsets;
  794. uint32_t lp0; uint32_t rp0;
  795. uint32_t lp1; uint32_t rp1;
  796. uint32_t lp2; uint32_t rp2;
  797. uint32_t lp3; uint32_t rp3;
  798. };
  799. static vk_op_pad_push_constants vk_op_pad_push_constants_init(const ggml_tensor * src0, const ggml_tensor * dst) {
  800. int64_t ne = ggml_nelements(dst);
  801. GGML_ASSERT(ne <= (int64_t)std::numeric_limits<uint32_t>::max());
  802. vk_op_pad_push_constants p{};
  803. p.ne = (uint32_t)ne;
  804. size_t src0_tsize = ggml_type_size(src0->type);
  805. p.ne00 = (uint32_t)src0->ne[0];
  806. p.ne01 = (uint32_t)src0->ne[1];
  807. p.ne02 = (uint32_t)src0->ne[2];
  808. p.ne03 = (uint32_t)src0->ne[3];
  809. p.nb00 = (uint32_t)(src0->nb[0] / src0_tsize);
  810. p.nb01 = (uint32_t)(src0->nb[1] / src0_tsize);
  811. p.nb02 = (uint32_t)(src0->nb[2] / src0_tsize);
  812. p.nb03 = (uint32_t)(src0->nb[3] / src0_tsize);
  813. size_t dst_tsize = ggml_type_size(dst->type);
  814. p.ne10 = (uint32_t)dst->ne[0];
  815. p.ne11 = (uint32_t)dst->ne[1];
  816. p.ne12 = (uint32_t)dst->ne[2];
  817. p.ne13 = (uint32_t)dst->ne[3];
  818. p.nb10 = (uint32_t)(dst->nb[0] / dst_tsize);
  819. p.nb11 = (uint32_t)(dst->nb[1] / dst_tsize);
  820. p.nb12 = (uint32_t)(dst->nb[2] / dst_tsize);
  821. p.nb13 = (uint32_t)(dst->nb[3] / dst_tsize);
  822. p.lp0 = dst->op_params[0];
  823. p.rp0 = dst->op_params[1];
  824. p.lp1 = dst->op_params[2];
  825. p.rp1 = dst->op_params[3];
  826. p.lp2 = dst->op_params[4];
  827. p.rp2 = dst->op_params[5];
  828. p.lp3 = dst->op_params[6];
  829. p.rp3 = dst->op_params[7];
  830. return p; // fastdiv values and offsets are initialized later in ggml_vk_op
  831. }
  832. // See https://gmplib.org/~tege/divcnst-pldi94.pdf figure 4.1.
  833. // Precompute mp (m' in the paper) and L such that division
  834. // can be computed using a multiply (high 32b of 64b result)
  835. // and a shift:
  836. //
  837. // n/d = (mulhi(n, mp) + n) >> L;
  838. static void init_fastdiv_values(uint32_t d, uint32_t &mp, uint32_t &L)
  839. {
  840. // compute L = ceil(log2(d));
  841. L = 0;
  842. while (L < 32 && (uint32_t{1} << L) < d) {
  843. L++;
  844. }
  845. mp = (uint32_t)((uint64_t{1} << 32) * ((uint64_t{1} << L) - d) / d + 1);
  846. }
  847. template <typename T> void init_pushconst_fastdiv(T &p) {
  848. GGML_UNUSED(p);
  849. static_assert(!std::is_const<T>::value, "unexpected type");
  850. }
  851. template <> void init_pushconst_fastdiv(vk_op_unary_push_constants &p) {
  852. // Compute magic values to divide by these six numbers.
  853. init_fastdiv_values(p.ne02*p.ne01*p.ne00, p.ne0_012mp, p.ne0_012L);
  854. init_fastdiv_values(p.ne01*p.ne00, p.ne0_01mp, p.ne0_01L);
  855. init_fastdiv_values(p.ne00, p.ne0_0mp, p.ne0_0L);
  856. init_fastdiv_values(p.ne12*p.ne11*p.ne10, p.ne1_012mp, p.ne1_012L);
  857. init_fastdiv_values(p.ne11*p.ne10, p.ne1_01mp, p.ne1_01L);
  858. init_fastdiv_values(p.ne10, p.ne1_0mp, p.ne1_0L);
  859. }
  860. struct vk_op_binary_push_constants {
  861. uint32_t ne;
  862. uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  863. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13;
  864. uint32_t ne20; uint32_t ne21; uint32_t ne22; uint32_t ne23; uint32_t nb20; uint32_t nb21; uint32_t nb22; uint32_t nb23;
  865. uint32_t misalign_offsets;
  866. float param1; float param2; int32_t param3;
  867. };
  868. struct vk_op_multi_add_push_constants {
  869. // shape for dst
  870. uint32_t ne20; uint32_t ne21; uint32_t ne22; uint32_t ne23;
  871. // strides for srcs+dst
  872. uint32_t nb[MAX_PARAMETER_COUNT][4];
  873. uint32_t rms_partials;
  874. };
  875. // update multi_add.comp if this changes
  876. static_assert(MAX_PARAMETER_COUNT == 12);
  877. static_assert(sizeof(vk_op_multi_add_push_constants) <= 256);
  878. struct vk_op_topk_moe_push_constants {
  879. uint32_t n_rows;
  880. uint32_t n_expert_used;
  881. float clamp_min;
  882. float clamp_max;
  883. };
  884. struct vk_op_add_id_push_constants {
  885. uint32_t ne0;
  886. uint32_t ne1;
  887. uint32_t s01;
  888. uint32_t s02;
  889. uint32_t s11;
  890. uint32_t s21;
  891. };
  892. struct vk_op_diag_mask_push_constants {
  893. uint32_t ncols;
  894. uint32_t rows_per_channel;
  895. int32_t n_past;
  896. };
  897. struct vk_op_rope_push_constants {
  898. uint32_t ncols;
  899. uint32_t n_dims;
  900. float freq_scale;
  901. uint32_t p_delta_rows;
  902. float freq_base;
  903. float ext_factor;
  904. float attn_factor;
  905. float corr_dims[2];
  906. float theta_scale;
  907. uint32_t has_ff;
  908. uint32_t ne02;
  909. uint32_t s1;
  910. uint32_t s2;
  911. int32_t sections[4];
  912. uint32_t is_imrope;
  913. uint32_t is_back;
  914. uint32_t set_rows_stride;
  915. };
  916. struct vk_op_soft_max_push_constants {
  917. uint32_t KX;
  918. uint32_t KY;
  919. uint32_t ne00;
  920. uint32_t ne01;
  921. uint32_t ne02;
  922. uint32_t ne12;
  923. uint32_t ne13;
  924. uint32_t nb11;
  925. uint32_t nb12;
  926. uint32_t nb13;
  927. float scale;
  928. float max_bias;
  929. float m0;
  930. float m1;
  931. uint32_t n_head_log2;
  932. uint32_t nrows_x;
  933. uint32_t has_sinks;
  934. };
  935. struct vk_op_argsort_push_constants {
  936. uint32_t ncols;
  937. uint32_t nrows;
  938. int32_t order;
  939. };
  940. struct vk_op_im2col_push_constants {
  941. uint64_t dst_addr;
  942. uint32_t batch_offset; uint32_t offset_delta;
  943. uint32_t IC;
  944. uint32_t IW; uint32_t IH;
  945. uint32_t OW; uint32_t OH;
  946. uint32_t KW; uint32_t KH;
  947. uint32_t pelements;
  948. uint32_t CHW;
  949. int32_t s0; int32_t s1;
  950. int32_t p0; int32_t p1;
  951. int32_t d0; int32_t d1;
  952. };
  953. struct vk_op_im2col_3d_push_constants {
  954. uint64_t dst_addr;
  955. uint32_t nb10;
  956. uint32_t nb11;
  957. uint32_t nb12;
  958. uint32_t nb13;
  959. uint32_t s0;
  960. uint32_t s1;
  961. uint32_t s2;
  962. uint32_t p0;
  963. uint32_t p1;
  964. uint32_t p2;
  965. uint32_t d0;
  966. uint32_t d1;
  967. uint32_t d2;
  968. uint32_t IW;
  969. uint32_t IH;
  970. uint32_t ID;
  971. uint32_t IC;
  972. uint32_t KW;
  973. uint32_t OH;
  974. uint32_t KD_KH_KW;
  975. uint32_t KH_KW;
  976. uint32_t IC_KD_KH_KW;
  977. uint32_t N_OD_OH;
  978. uint32_t OD_OH;
  979. uint32_t OD_OH_OW_IC_KD_KH_KW;
  980. uint32_t OH_OW_IC_KD_KH_KW;
  981. uint32_t OW_IC_KD_KH_KW;
  982. uint32_t misalign_offsets;
  983. };
  984. struct vk_op_timestep_embedding_push_constants {
  985. uint32_t nb1;
  986. uint32_t dim;
  987. uint32_t max_period;
  988. };
  989. struct vk_op_conv_transpose_1d_push_constants {
  990. uint32_t Cout;
  991. uint32_t Cin;
  992. uint32_t K;
  993. uint32_t L;
  994. uint32_t KL;
  995. uint32_t nb01;
  996. uint32_t nb02;
  997. uint32_t nb11;
  998. uint32_t nb1;
  999. int32_t s0;
  1000. };
  1001. struct vk_op_pool2d_push_constants {
  1002. uint32_t IW; uint32_t IH;
  1003. uint32_t OW; uint32_t OH;
  1004. uint32_t OC;
  1005. uint32_t pelements;
  1006. uint32_t op;
  1007. int32_t k0; int32_t k1;
  1008. int32_t s0; int32_t s1;
  1009. int32_t p0; int32_t p1;
  1010. };
  1011. struct vk_op_rwkv_wkv6_push_constants {
  1012. uint32_t B;
  1013. uint32_t T;
  1014. uint32_t C;
  1015. uint32_t H;
  1016. };
  1017. struct vk_op_rwkv_wkv7_push_constants {
  1018. uint32_t B;
  1019. uint32_t T;
  1020. uint32_t C;
  1021. uint32_t H;
  1022. };
  1023. struct vk_op_ssm_scan_push_constants {
  1024. uint32_t nb02, nb03, nb12, nb13;
  1025. uint32_t nb21, nb22, nb31;
  1026. uint32_t nb42, nb43, nb52, nb53;
  1027. uint32_t s_off;
  1028. uint32_t n_head, d_head, n_group, n_tok;
  1029. };
  1030. struct vk_op_ssm_conv_push_constants {
  1031. uint32_t nb01, nb02;
  1032. uint32_t nb11;
  1033. uint32_t dst_nb0, dst_nb1, dst_nb2;
  1034. uint32_t nc, ncs, nr, n_t, n_s;
  1035. };
  1036. struct vk_op_conv2d_push_constants {
  1037. uint32_t Cout;
  1038. uint32_t Cin;
  1039. uint32_t N;
  1040. uint32_t KW;
  1041. uint32_t KH;
  1042. uint32_t W;
  1043. uint32_t H;
  1044. uint32_t OW;
  1045. uint32_t OH;
  1046. uint32_t s0;
  1047. uint32_t s1;
  1048. uint32_t p0;
  1049. uint32_t p1;
  1050. uint32_t d0;
  1051. uint32_t d1;
  1052. uint32_t nb01;
  1053. uint32_t nb02;
  1054. uint32_t nb03;
  1055. uint32_t nb11;
  1056. uint32_t nb12;
  1057. uint32_t nb13;
  1058. uint32_t nb1;
  1059. uint32_t nb2;
  1060. uint32_t nb3;
  1061. // init_fastdiv_values constants for dividing by KW, KW*KH, OW, OW*OH
  1062. uint32_t KWmp; uint32_t KWL;
  1063. uint32_t KWKHmp; uint32_t KWKHL;
  1064. uint32_t OWmp; uint32_t OWL;
  1065. uint32_t OWOHmp; uint32_t OWOHL;
  1066. };
  1067. template <> void init_pushconst_fastdiv(vk_op_conv2d_push_constants &p) {
  1068. // Compute magic values to divide by KW, KW*KH, OW, OW*OH
  1069. init_fastdiv_values(p.KW, p.KWmp, p.KWL);
  1070. init_fastdiv_values(p.KW*p.KH, p.KWKHmp, p.KWKHL);
  1071. init_fastdiv_values(p.OW, p.OWmp, p.OWL);
  1072. init_fastdiv_values(p.OW*p.OH, p.OWOHmp, p.OWOHL);
  1073. }
  1074. struct vk_op_conv_transpose_2d_push_constants {
  1075. uint32_t Cout;
  1076. uint32_t Cin;
  1077. uint32_t N;
  1078. uint32_t KW;
  1079. uint32_t KH;
  1080. uint32_t W;
  1081. uint32_t H;
  1082. uint32_t OW;
  1083. uint32_t OH;
  1084. uint32_t s0;
  1085. uint32_t s1;
  1086. uint32_t p0;
  1087. uint32_t p1;
  1088. uint32_t d0;
  1089. uint32_t d1;
  1090. uint32_t nb01;
  1091. uint32_t nb02;
  1092. uint32_t nb03;
  1093. uint32_t nb11;
  1094. uint32_t nb12;
  1095. uint32_t nb13;
  1096. uint32_t nb1;
  1097. uint32_t nb2;
  1098. uint32_t nb3;
  1099. // init_fastdiv_values constants for dividing by KW, KW*KH, OW, OW*OH, s0, s1
  1100. uint32_t KWmp; uint32_t KWL;
  1101. uint32_t KWKHmp; uint32_t KWKHL;
  1102. uint32_t OWmp; uint32_t OWL;
  1103. uint32_t OWOHmp; uint32_t OWOHL;
  1104. uint32_t s0mp; uint32_t s0L;
  1105. uint32_t s1mp; uint32_t s1L;
  1106. };
  1107. template <> void init_pushconst_fastdiv(vk_op_conv_transpose_2d_push_constants &p) {
  1108. // Compute magic values to divide by KW, KW*KH, OW, OW*OH, s0, s1
  1109. init_fastdiv_values(p.KW, p.KWmp, p.KWL);
  1110. init_fastdiv_values(p.KW*p.KH, p.KWKHmp, p.KWKHL);
  1111. init_fastdiv_values(p.OW, p.OWmp, p.OWL);
  1112. init_fastdiv_values(p.OW*p.OH, p.OWOHmp, p.OWOHL);
  1113. init_fastdiv_values(p.s0, p.s0mp, p.s0L);
  1114. init_fastdiv_values(p.s1, p.s1mp, p.s1L);
  1115. }
  1116. struct vk_op_conv2d_dw_push_constants {
  1117. uint32_t ne;
  1118. uint32_t batches;
  1119. uint32_t channels;
  1120. uint32_t dst_w;
  1121. uint32_t dst_h;
  1122. uint32_t src_w;
  1123. uint32_t src_h;
  1124. uint32_t knl_w;
  1125. uint32_t knl_h;
  1126. int32_t stride_x;
  1127. int32_t stride_y;
  1128. int32_t pad_x;
  1129. int32_t pad_y;
  1130. int32_t dilation_x;
  1131. int32_t dilation_y;
  1132. };
  1133. struct vk_op_upscale_push_constants {
  1134. uint32_t ne; uint32_t a_offset; uint32_t d_offset;
  1135. uint32_t ne00; uint32_t ne01;
  1136. uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  1137. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13;
  1138. float sf0; float sf1; float sf2; float sf3;
  1139. float pixel_offset;
  1140. };
  1141. struct vk_op_sum_rows_push_constants
  1142. {
  1143. uint32_t n_cols;
  1144. uint32_t ne01, ne02;
  1145. uint32_t nb01, nb02, nb03;
  1146. uint32_t nb11, nb12, nb13;
  1147. float weight;
  1148. uint32_t misalign_offsets;
  1149. uint32_t ne0_12mp, ne0_12L;
  1150. uint32_t ne0_1mp, ne0_1L;
  1151. };
  1152. static vk_op_sum_rows_push_constants vk_op_sum_rows_push_constants_init(const ggml_tensor * src, const ggml_tensor * dst, int64_t n_cols) {
  1153. uint32_t type_size = (uint32_t)ggml_type_size(src->type);
  1154. vk_op_sum_rows_push_constants p = {};
  1155. p.n_cols = (uint32_t)n_cols;
  1156. p.ne01 = (uint32_t)src->ne[1];
  1157. p.ne02 = (uint32_t)src->ne[2];
  1158. p.nb01 = (uint32_t)src->nb[1] / type_size;
  1159. p.nb02 = (uint32_t)src->nb[2] / type_size;
  1160. p.nb03 = (uint32_t)src->nb[3] / type_size;
  1161. p.nb11 = (uint32_t)dst->nb[1] / type_size;
  1162. p.nb12 = (uint32_t)dst->nb[2] / type_size;
  1163. p.nb13 = (uint32_t)dst->nb[3] / type_size;
  1164. p.weight = 1.0f;
  1165. return p;
  1166. }
  1167. template <> void init_pushconst_fastdiv(vk_op_sum_rows_push_constants &p) {
  1168. init_fastdiv_values(p.ne01*p.ne02, p.ne0_12mp, p.ne0_12L);
  1169. init_fastdiv_values(p.ne01, p.ne0_1mp, p.ne0_1L);
  1170. }
  1171. // Allow pre-recording command buffers
  1172. struct vk_staging_memcpy {
  1173. vk_staging_memcpy(void * _dst, const void * _src, size_t _n) : dst(_dst), src(_src), n(_n) {}
  1174. void * dst;
  1175. const void * src;
  1176. size_t n;
  1177. };
  1178. struct vk_staging_memset {
  1179. vk_staging_memset(void * _dst, uint32_t _val, size_t _n) : dst(_dst), val(_val), n(_n) {}
  1180. void * dst;
  1181. uint32_t val;
  1182. size_t n;
  1183. };
  1184. struct vk_context_struct {
  1185. vk_submission * s;
  1186. std::vector<vk_sequence> seqs;
  1187. int exit_tensor_idx;
  1188. std::vector<vk_staging_memcpy> in_memcpys;
  1189. std::vector<vk_staging_memcpy> out_memcpys;
  1190. std::vector<vk_staging_memset> memsets;
  1191. vk_command_pool * p {};
  1192. };
  1193. typedef std::shared_ptr<vk_context_struct> vk_context;
  1194. typedef std::weak_ptr<vk_context_struct> vk_context_ref;
  1195. struct ggml_vk_garbage_collector {
  1196. std::vector<vk_semaphore> tl_semaphores;
  1197. std::vector<vk_semaphore> semaphores;
  1198. std::vector<vk::Event> events;
  1199. std::vector<vk_context> contexts;
  1200. };
  1201. #if defined(GGML_VULKAN_MEMORY_DEBUG) || defined(GGML_VULKAN_DEBUG)
  1202. #define VK_LOG_MEMORY(msg) std::cerr << "ggml_vulkan memory: " << msg << std::endl
  1203. static std::string format_size(size_t size) {
  1204. const size_t kib = 1024;
  1205. const size_t mib = kib * 1024;
  1206. const size_t gib = mib * 1024;
  1207. std::ostringstream oss;
  1208. oss << std::fixed << std::setprecision(2);
  1209. if (size >= gib) {
  1210. oss << static_cast<double>(size) / gib << " GiB";
  1211. } else if (size >= mib) {
  1212. oss << static_cast<double>(size) / mib << " MiB";
  1213. } else if (size >= kib) {
  1214. oss << static_cast<double>(size) / kib << " KiB";
  1215. } else {
  1216. oss << size << " B";
  1217. }
  1218. return oss.str();
  1219. }
  1220. class vk_memory_logger {
  1221. public:
  1222. vk_memory_logger(): total_device(0), total_host(0) {}
  1223. void log_allocation(vk_buffer_ref buf_ref, size_t size);
  1224. void log_deallocation(vk_buffer_ref buf_ref);
  1225. private:
  1226. std::map<vk::Buffer, size_t> allocations; // Track allocations
  1227. size_t total_device;
  1228. size_t total_host;
  1229. };
  1230. #else
  1231. #define VK_LOG_MEMORY(msg) ((void) 0)
  1232. #endif // GGML_VULKAN_MEMORY_DEBUG
  1233. class vk_perf_logger {
  1234. public:
  1235. void print_timings() {
  1236. if (timings.empty()) {
  1237. return;
  1238. }
  1239. uint64_t total_all_op_times = 0;
  1240. std::cerr << "----------------\nVulkan Timings:" << std::endl;
  1241. for (const auto & t : timings) {
  1242. uint64_t total_op_times = 0;
  1243. for (const auto & time : t.second) {
  1244. total_op_times += time;
  1245. }
  1246. std::cerr << t.first << ": " << t.second.size() << " x " << (total_op_times / t.second.size() / 1000.0)
  1247. << " us";
  1248. // If we have as many flops entries as timing entries for the op, then compute and log the flops/S.
  1249. auto it = flops.find(t.first);
  1250. if (it != flops.end() && (it->second).size() == t.second.size()) {
  1251. uint64_t total_op_flops = 0;
  1252. for (const auto & elem : it->second) {
  1253. total_op_flops += elem;
  1254. }
  1255. std::cerr << " ("
  1256. << (double(total_op_flops) / (1000.0 * 1000.0 * 1000.0)) /
  1257. (double(total_op_times) / (1000.0 * 1000.0 * 1000.0))
  1258. << " GFLOPS/s)";
  1259. }
  1260. total_all_op_times += total_op_times;
  1261. std::cerr << std::endl;
  1262. }
  1263. if (timings.size() > 0) {
  1264. std::cerr << "Total time: " << total_all_op_times / 1000.0 << " us." << std::endl;
  1265. }
  1266. timings.clear();
  1267. flops.clear();
  1268. }
  1269. void log_timing(const ggml_tensor * node, uint64_t time) {
  1270. if (node->op == GGML_OP_UNARY) {
  1271. timings[ggml_unary_op_name(ggml_get_unary_op(node))].push_back(time);
  1272. return;
  1273. }
  1274. if (node->op == GGML_OP_MUL_MAT || node->op == GGML_OP_MUL_MAT_ID) {
  1275. const uint64_t m = node->src[0]->ne[1];
  1276. const uint64_t n = node->ne[1];
  1277. const uint64_t k = node->src[1]->ne[0];
  1278. const uint64_t batch = node->src[1]->ne[2] * node->src[1]->ne[3];
  1279. std::string name = ggml_op_name(node->op);
  1280. if ((node->op == GGML_OP_MUL_MAT && n <= mul_mat_vec_max_cols) ||
  1281. (node->op == GGML_OP_MUL_MAT_ID && node->src[2]->ne[1] == 1)) {
  1282. name += "_VEC";
  1283. }
  1284. name += " ";
  1285. name += ggml_type_name(node->src[0]->type);
  1286. name += " m=" + std::to_string(m) + " n=" + std::to_string(n) + " k=" + std::to_string(k);
  1287. if (batch > 1) {
  1288. name += " batch=" + std::to_string(batch);
  1289. }
  1290. timings[name].push_back(time);
  1291. flops[name].push_back(m * n * (k + (k - 1)) * batch);
  1292. return;
  1293. }
  1294. if (node->op == GGML_OP_CONV_2D || node->op == GGML_OP_CONV_TRANSPOSE_2D) {
  1295. std::string name = ggml_op_name(node->op);
  1296. ggml_tensor * knl = node->src[0];
  1297. uint64_t OW = node->ne[0];
  1298. uint64_t OH = node->ne[1];
  1299. uint64_t N = node->ne[3];
  1300. uint64_t Cout = node->ne[2];
  1301. uint64_t KW = knl->ne[0];
  1302. uint64_t KH = knl->ne[1];
  1303. uint64_t Cin = node->src[1]->ne[2];
  1304. // KxCRS @ CRSxNPQ = KxNPQ -> M=K, K=CRS, N=NPQ
  1305. uint64_t size_M = Cout;
  1306. uint64_t size_K = Cin * KW * KH;
  1307. uint64_t size_N = N * OW * OH;
  1308. uint64_t n_flops = size_M * size_N * (size_K + (size_K - 1));
  1309. name += " M=Cout=" + std::to_string(size_M) + ", K=Cin*KW*KH=" + std::to_string(size_K) +
  1310. ", N=N*OW*OH=" + std::to_string(size_N);
  1311. flops[name].push_back(n_flops);
  1312. timings[name].push_back(time);
  1313. return;
  1314. }
  1315. if (node->op == GGML_OP_RMS_NORM) {
  1316. std::string name = ggml_op_name(node->op);
  1317. name += "(" + std::to_string(node->ne[0]) + "," + std::to_string(node->ne[1]) + "," + std::to_string(node->ne[2]) + "," + std::to_string(node->ne[3]) + ")";
  1318. timings[name].push_back(time);
  1319. return;
  1320. }
  1321. timings[ggml_op_name(node->op)].push_back(time);
  1322. }
  1323. private:
  1324. std::map<std::string, std::vector<uint64_t>> timings;
  1325. std::map<std::string, std::vector<uint64_t>> flops;
  1326. };
  1327. struct ggml_backend_vk_context {
  1328. std::string name;
  1329. vk_device device;
  1330. size_t semaphore_idx, event_idx;
  1331. ggml_vk_garbage_collector gc;
  1332. size_t prealloc_size_x, prealloc_size_y, prealloc_size_split_k, prealloc_size_add_rms_partials, prealloc_size_add_rms_partials_offset;
  1333. vk_buffer prealloc_x, prealloc_y, prealloc_split_k, prealloc_add_rms_partials;
  1334. vk::Fence fence, almost_ready_fence;
  1335. bool almost_ready_fence_pending {};
  1336. // Set before op_add and unset after op_rms_norm to indicate that the add should
  1337. // write partial sums to accumulate the square of the vector components
  1338. bool do_add_rms_partials;
  1339. // Cache most recent tensor that was converted into prealloc_y, and what pipeline it used to convert.
  1340. vk_pipeline_struct * prealloc_y_last_pipeline_used {};
  1341. const ggml_tensor * prealloc_y_last_tensor_used {};
  1342. // Track which nodes have been used since the last sync, and whether they were written to
  1343. std::vector<const ggml_tensor *> unsynced_nodes_written;
  1344. std::vector<const ggml_tensor *> unsynced_nodes_read;
  1345. // Track which prealloc buffers have pending reads that need to be synchronized.
  1346. // These are checked before writing to the buffer (and call ggml_vk_sync_buffers if set),
  1347. // and set to true after the buffer contents are consumed.
  1348. bool prealloc_x_need_sync, prealloc_y_need_sync, prealloc_split_k_need_sync;
  1349. vk_context_ref compute_ctx;
  1350. vk_context_ref transfer_ctx;
  1351. std::vector<vk_context_ref> tensor_ctxs;
  1352. std::vector<vk::DescriptorPool> descriptor_pools;
  1353. std::vector<vk::DescriptorSet> descriptor_sets;
  1354. uint32_t descriptor_set_idx {};
  1355. uint32_t pipeline_descriptor_set_requirements {};
  1356. vk_command_pool compute_cmd_pool;
  1357. vk_command_pool transfer_cmd_pool;
  1358. // number of additional consecutive nodes that are being fused with the
  1359. // node currently being processed
  1360. int num_additional_fused_ops {};
  1361. // Bitmask of which fused ops need to write an intermediate value to memory.
  1362. // Bit 'i' means nodes[start_of_fusion + i] writes to memory.
  1363. // If there's no fusion, bit 0 is still set.
  1364. int fused_ops_write_mask {};
  1365. };
  1366. static void * const vk_ptr_base = (void *)(uintptr_t) 0x1000; // NOLINT
  1367. static uint64_t vk_tensor_offset(const ggml_tensor * tensor) {
  1368. if (tensor->view_src) {
  1369. return (uint8_t *) tensor->view_src->data - (uint8_t *) vk_ptr_base;
  1370. }
  1371. return (uint8_t *) tensor->data - (uint8_t *) vk_ptr_base;
  1372. }
  1373. struct ggml_backend_vk_buffer_context {
  1374. vk_device_ref device;
  1375. vk_buffer dev_buffer;
  1376. std::string name;
  1377. ggml_backend_vk_buffer_context(vk_device_ref device, vk_buffer&& dev_buffer, std::string& name) :
  1378. device(device),
  1379. dev_buffer(dev_buffer),
  1380. name(name) {
  1381. }
  1382. ~ggml_backend_vk_buffer_context() {
  1383. ggml_vk_destroy_buffer(dev_buffer);
  1384. }
  1385. };
  1386. #ifdef GGML_VULKAN_MEMORY_DEBUG
  1387. static std::mutex log_mutex;
  1388. void vk_memory_logger::log_allocation(vk_buffer_ref buf_ref, size_t size) {
  1389. std::lock_guard<std::mutex> guard(log_mutex);
  1390. vk_buffer buf = buf_ref.lock();
  1391. const bool device = bool(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eDeviceLocal);
  1392. const std::string type = device ? "device" : "host";
  1393. allocations[buf->buffer] = size;
  1394. total_device += device ? size : 0;
  1395. total_host += device ? 0 : size;
  1396. VK_LOG_MEMORY(buf->device->name << ": +" << format_size(size) << " " << type << " at " << buf->buffer << ". Total device: " << format_size(total_device) << ", total host: " << format_size(total_host));
  1397. }
  1398. void vk_memory_logger::log_deallocation(vk_buffer_ref buf_ref) {
  1399. if (buf_ref.expired() || buf_ref.lock()->size == 0) {
  1400. return;
  1401. }
  1402. std::lock_guard<std::mutex> guard(log_mutex);
  1403. vk_buffer buf = buf_ref.lock();
  1404. const bool device = bool(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eDeviceLocal);
  1405. std::string type = device ? "device" : "host";
  1406. auto it = allocations.find(buf->buffer);
  1407. total_device -= device ? it->second : 0;
  1408. total_host -= device ? 0 : it->second;
  1409. if (it != allocations.end()) {
  1410. VK_LOG_MEMORY(buf->device->name << ": -" << format_size(it->second) << " " << type << " at " << buf->buffer << ". Total device: " << format_size(total_device) << ", total host: " << format_size(total_host));
  1411. allocations.erase(it);
  1412. } else {
  1413. VK_LOG_MEMORY("ERROR " << buf->device->name << ": Attempted to deallocate unknown " << type << " memory at " << buf->buffer);
  1414. }
  1415. }
  1416. #endif // GGML_VULKAN_MEMORY_DEBUG
  1417. struct vk_instance_t {
  1418. vk::Instance instance;
  1419. bool debug_utils_support = false; // VK_EXT_debug_utils enabled
  1420. PFN_vkSetDebugUtilsObjectNameEXT pfn_vkSetDebugUtilsObjectNameEXT = {};
  1421. PFN_vkQueueBeginDebugUtilsLabelEXT pfn_vkQueueBeginDebugUtilsLabelEXT = {};
  1422. PFN_vkQueueEndDebugUtilsLabelEXT pfn_vkQueueEndDebugUtilsLabelEXT = {};
  1423. PFN_vkCmdBeginDebugUtilsLabelEXT pfn_vkCmdBeginDebugUtilsLabelEXT = {};
  1424. PFN_vkCmdEndDebugUtilsLabelEXT pfn_vkCmdEndDebugUtilsLabelEXT = {};
  1425. PFN_vkCmdInsertDebugUtilsLabelEXT pfn_vkCmdInsertDebugUtilsLabelEXT = {};
  1426. std::vector<size_t> device_indices;
  1427. std::vector<bool> device_supports_membudget;
  1428. vk_device devices[GGML_VK_MAX_DEVICES];
  1429. };
  1430. static bool vk_instance_initialized = false;
  1431. static vk_instance_t vk_instance;
  1432. static bool vk_perf_logger_enabled = false;
  1433. #ifdef GGML_VULKAN_CHECK_RESULTS
  1434. static size_t vk_skip_checks;
  1435. static size_t vk_output_tensor;
  1436. static void ggml_vk_print_tensor(const ggml_tensor * tensor, const char * name);
  1437. static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, int tensor_idx);
  1438. static void ggml_vk_check_results_1(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, int tensor_idx);
  1439. #endif
  1440. typedef void (*ggml_vk_func_t)(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
  1441. static void ggml_backend_vk_free(ggml_backend_t backend);
  1442. static VkDeviceSize ggml_vk_get_max_buffer_range(const ggml_backend_vk_context * ctx, const vk_buffer &buf, const VkDeviceSize offset) {
  1443. const VkDeviceSize range = std::min(VkDeviceSize{buf->size - offset},
  1444. VkDeviceSize{ctx->device->properties.limits.maxStorageBufferRange});
  1445. return range;
  1446. }
  1447. // Wait for ctx->fence to be signaled.
  1448. static void ggml_vk_wait_for_fence(ggml_backend_vk_context * ctx) {
  1449. // Use waitForFences while most of the graph executes. Hopefully the CPU can sleep
  1450. // during this wait.
  1451. if (ctx->almost_ready_fence_pending) {
  1452. VK_CHECK(ctx->device->device.waitForFences({ ctx->almost_ready_fence }, true, UINT64_MAX), "almost_ready_fence");
  1453. ctx->device->device.resetFences({ ctx->almost_ready_fence });
  1454. ctx->almost_ready_fence_pending = false;
  1455. }
  1456. // Spin (w/pause) waiting for the graph to finish executing.
  1457. vk::Result result;
  1458. while ((result = ctx->device->device.getFenceStatus(ctx->fence)) != vk::Result::eSuccess) {
  1459. if (result != vk::Result::eNotReady) {
  1460. fprintf(stderr, "ggml_vulkan: error %s at %s:%d\n", to_string(result).c_str(), __FILE__, __LINE__);
  1461. exit(1);
  1462. }
  1463. for (uint32_t i = 0; i < 100; ++i) {
  1464. YIELD();
  1465. YIELD();
  1466. YIELD();
  1467. YIELD();
  1468. YIELD();
  1469. YIELD();
  1470. YIELD();
  1471. YIELD();
  1472. YIELD();
  1473. YIELD();
  1474. }
  1475. }
  1476. ctx->device->device.resetFences({ ctx->fence });
  1477. }
  1478. // variables to track number of compiles in progress
  1479. static uint32_t compile_count = 0;
  1480. static std::mutex compile_count_mutex;
  1481. static std::condition_variable compile_count_cond;
  1482. static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipeline, size_t spv_size, const void* spv_data, const std::string entrypoint,
  1483. uint32_t parameter_count, std::array<uint32_t, 3> wg_denoms, std::vector<uint32_t> specialization_constants,
  1484. bool disable_robustness, bool require_full_subgroups, uint32_t required_subgroup_size) {
  1485. VK_LOG_DEBUG("ggml_vk_create_pipeline(" << device->name << ", " << pipeline->name << ", " << entrypoint << ", " << parameter_count <<
  1486. ", (" << wg_denoms[0] << "," << wg_denoms[1] << "," << wg_denoms[2] << "), specialization_constants, " <<
  1487. disable_robustness << ", " << require_full_subgroups << ", " << required_subgroup_size << ")");
  1488. GGML_ASSERT(parameter_count > 0);
  1489. GGML_ASSERT(parameter_count <= MAX_PARAMETER_COUNT);
  1490. GGML_ASSERT(wg_denoms[0] > 0 && wg_denoms[1] > 0 && wg_denoms[2] > 0); // NOLINT
  1491. vk::ShaderModuleCreateInfo shader_module_create_info({}, spv_size, reinterpret_cast<const uint32_t *>(spv_data));
  1492. pipeline->shader_module = device->device.createShaderModule(shader_module_create_info);
  1493. vk::PushConstantRange pcr(
  1494. vk::ShaderStageFlagBits::eCompute,
  1495. 0,
  1496. pipeline->push_constant_size
  1497. );
  1498. vk::PipelineLayoutCreateInfo pipeline_layout_create_info(vk::PipelineLayoutCreateFlags(), device->dsl, pcr);
  1499. pipeline->layout = device->device.createPipelineLayout(pipeline_layout_create_info);
  1500. std::vector<vk::SpecializationMapEntry> specialization_entries(specialization_constants.size());
  1501. for (size_t i = 0; i < specialization_constants.size(); i++) {
  1502. specialization_entries[i].constantID = i;
  1503. specialization_entries[i].offset = i * sizeof(uint32_t);
  1504. specialization_entries[i].size = sizeof(uint32_t);
  1505. }
  1506. vk::SpecializationInfo specialization_info(
  1507. specialization_entries.size(),
  1508. specialization_entries.data(),
  1509. specialization_constants.size() * sizeof(uint32_t),
  1510. specialization_constants.data()
  1511. );
  1512. vk::PipelineShaderStageCreateFlags pipeline_shader_stage_create_flags{};
  1513. if (device->subgroup_require_full_support && require_full_subgroups) {
  1514. pipeline_shader_stage_create_flags |= vk::PipelineShaderStageCreateFlagBits::eRequireFullSubgroupsEXT;
  1515. }
  1516. vk::PipelineShaderStageCreateInfo pipeline_shader_create_info(
  1517. pipeline_shader_stage_create_flags,
  1518. vk::ShaderStageFlagBits::eCompute,
  1519. pipeline->shader_module,
  1520. entrypoint.c_str(),
  1521. &specialization_info);
  1522. vk::PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT pipeline_shader_stage_required_subgroup_size_create_info;
  1523. pipeline_shader_stage_required_subgroup_size_create_info.requiredSubgroupSize = required_subgroup_size;
  1524. if (device->subgroup_size_control && required_subgroup_size > 0) {
  1525. GGML_ASSERT(device->subgroup_min_size <= required_subgroup_size && required_subgroup_size <= device->subgroup_max_size);
  1526. pipeline_shader_create_info.setPNext(&pipeline_shader_stage_required_subgroup_size_create_info);
  1527. }
  1528. vk::ComputePipelineCreateInfo compute_pipeline_create_info(
  1529. device->pipeline_executable_properties_support ?
  1530. vk::PipelineCreateFlagBits::eCaptureStatisticsKHR :
  1531. vk::PipelineCreateFlags{},
  1532. pipeline_shader_create_info,
  1533. pipeline->layout);
  1534. vk::PipelineRobustnessCreateInfoEXT rci;
  1535. if (device->pipeline_robustness && disable_robustness) {
  1536. rci.storageBuffers = vk::PipelineRobustnessBufferBehaviorEXT::eDisabled;
  1537. rci.uniformBuffers = vk::PipelineRobustnessBufferBehaviorEXT::eDisabled;
  1538. compute_pipeline_create_info.setPNext(&rci);
  1539. }
  1540. try {
  1541. pipeline->pipeline = device->device.createComputePipeline(VK_NULL_HANDLE, compute_pipeline_create_info).value;
  1542. } catch (const vk::SystemError& e) {
  1543. std::cerr << "ggml_vulkan: Compute pipeline creation failed for " << pipeline->name << std::endl;
  1544. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  1545. throw e;
  1546. }
  1547. pipeline->compiled = true;
  1548. if (vk_instance.debug_utils_support) {
  1549. vk::DebugUtilsObjectNameInfoEXT duoni;
  1550. duoni.objectType = vk::ObjectType::ePipeline;
  1551. duoni.pObjectName = pipeline->name.c_str();
  1552. duoni.objectHandle = /*reinterpret_cast*/(uint64_t)(static_cast<VkPipeline>(pipeline->pipeline));
  1553. vk_instance.pfn_vkSetDebugUtilsObjectNameEXT(device->device, &static_cast<VkDebugUtilsObjectNameInfoEXT &>(duoni));
  1554. }
  1555. if (device->pipeline_executable_properties_support) {
  1556. vk::PipelineExecutableInfoKHR executableInfo;
  1557. executableInfo.pipeline = pipeline->pipeline;
  1558. auto statistics = device->device.getPipelineExecutableStatisticsKHR(executableInfo);
  1559. for (auto & s : statistics) {
  1560. // "Register Count" is reported by NVIDIA drivers.
  1561. if (strcmp(s.name, "Register Count") == 0) {
  1562. VK_LOG_DEBUG(pipeline->name << " " << s.name << ": " << s.value.u64 << " registers");
  1563. pipeline->register_count = (uint32_t)s.value.u64;
  1564. }
  1565. }
  1566. }
  1567. {
  1568. std::lock_guard<std::recursive_mutex> guard(device->mutex);
  1569. device->all_pipelines.push_back(pipeline);
  1570. }
  1571. {
  1572. std::lock_guard<std::mutex> guard(compile_count_mutex);
  1573. assert(compile_count > 0);
  1574. compile_count--;
  1575. }
  1576. compile_count_cond.notify_all();
  1577. }
  1578. static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline) {
  1579. VK_LOG_DEBUG("ggml_pipeline_destroy_pipeline(" << pipeline->name << ")");
  1580. device.destroyPipelineLayout(pipeline->layout);
  1581. device.destroyShaderModule(pipeline->shader_module);
  1582. device.destroyPipeline(pipeline->pipeline);
  1583. }
  1584. static void ggml_pipeline_request_descriptor_sets(ggml_backend_vk_context *ctx, vk_pipeline& pipeline, uint32_t n) {
  1585. VK_LOG_DEBUG("ggml_pipeline_request_descriptor_sets(" << pipeline->name << ", " << n << ")");
  1586. ctx->pipeline_descriptor_set_requirements += n;
  1587. if (!pipeline->compiled) {
  1588. pipeline->needed = true;
  1589. ctx->device->need_compiles = true;
  1590. }
  1591. }
  1592. static void ggml_pipeline_allocate_descriptor_sets(ggml_backend_vk_context * ctx) {
  1593. if (ctx->descriptor_sets.size() >= ctx->pipeline_descriptor_set_requirements) {
  1594. // Enough descriptors are available
  1595. return;
  1596. }
  1597. vk_device& device = ctx->device;
  1598. uint32_t to_alloc = ctx->pipeline_descriptor_set_requirements - ctx->descriptor_sets.size();
  1599. uint32_t pool_remaining = VK_DEVICE_DESCRIPTOR_POOL_SIZE - ctx->descriptor_sets.size() % VK_DEVICE_DESCRIPTOR_POOL_SIZE;
  1600. uint32_t pool_idx = ctx->descriptor_sets.size() / VK_DEVICE_DESCRIPTOR_POOL_SIZE;
  1601. while (to_alloc > 0) {
  1602. const uint32_t alloc_count = std::min(pool_remaining, to_alloc);
  1603. to_alloc -= alloc_count;
  1604. pool_remaining = VK_DEVICE_DESCRIPTOR_POOL_SIZE;
  1605. if (pool_idx >= ctx->descriptor_pools.size()) {
  1606. vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, MAX_PARAMETER_COUNT * VK_DEVICE_DESCRIPTOR_POOL_SIZE);
  1607. vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, VK_DEVICE_DESCRIPTOR_POOL_SIZE, descriptor_pool_size);
  1608. ctx->descriptor_pools.push_back(device->device.createDescriptorPool(descriptor_pool_create_info));
  1609. }
  1610. std::vector<vk::DescriptorSetLayout> layouts(alloc_count);
  1611. for (uint32_t i = 0; i < alloc_count; i++) {
  1612. layouts[i] = device->dsl;
  1613. }
  1614. vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(ctx->descriptor_pools[pool_idx], alloc_count, layouts.data());
  1615. std::vector<vk::DescriptorSet> sets = device->device.allocateDescriptorSets(descriptor_set_alloc_info);
  1616. ctx->descriptor_sets.insert(ctx->descriptor_sets.end(), sets.begin(), sets.end());
  1617. pool_idx++;
  1618. }
  1619. }
  1620. static vk::CommandBuffer ggml_vk_create_cmd_buffer(vk_device& device, vk_command_pool& p) {
  1621. VK_LOG_DEBUG("ggml_vk_create_cmd_buffer()");
  1622. if (p.cmd_buffers.size() > p.cmd_buffer_idx) {
  1623. // Reuse command buffer
  1624. return p.cmd_buffers[p.cmd_buffer_idx++];
  1625. }
  1626. vk::CommandBufferAllocateInfo command_buffer_alloc_info(
  1627. p.pool,
  1628. vk::CommandBufferLevel::ePrimary,
  1629. 1);
  1630. const std::vector<vk::CommandBuffer> cmd_buffers = device->device.allocateCommandBuffers(command_buffer_alloc_info);
  1631. auto buf = cmd_buffers.front();
  1632. p.cmd_buffers.push_back(buf);
  1633. p.cmd_buffer_idx++;
  1634. return buf;
  1635. }
  1636. static void ggml_vk_submit(vk_context& ctx, vk::Fence fence) {
  1637. if (ctx->seqs.empty()) {
  1638. if (fence) {
  1639. std::lock_guard<std::mutex> guard(queue_mutex);
  1640. ctx->p->q->queue.submit({}, fence);
  1641. }
  1642. return;
  1643. }
  1644. VK_LOG_DEBUG("ggml_vk_submit(" << ctx << ", " << fence << ")");
  1645. std::vector<std::vector<uint64_t>> tl_wait_vals;
  1646. std::vector<std::vector<uint64_t>> tl_signal_vals;
  1647. std::vector<std::vector<vk::Semaphore>> tl_wait_semaphores;
  1648. std::vector<std::vector<vk::Semaphore>> tl_signal_semaphores;
  1649. std::vector<vk::TimelineSemaphoreSubmitInfo> tl_submit_infos;
  1650. std::vector<vk::SubmitInfo> submit_infos;
  1651. int idx = -1;
  1652. std::vector<std::vector<vk::PipelineStageFlags>> stage_flags;
  1653. size_t reserve = 0;
  1654. for (const auto& sequence : ctx->seqs) {
  1655. reserve += sequence.size();
  1656. }
  1657. // Pre-reserve vectors to prevent reallocation, which invalidates pointers
  1658. tl_wait_semaphores.reserve(reserve);
  1659. tl_wait_vals.reserve(reserve);
  1660. tl_signal_semaphores.reserve(reserve);
  1661. tl_signal_vals.reserve(reserve);
  1662. tl_submit_infos.reserve(reserve);
  1663. submit_infos.reserve(reserve);
  1664. stage_flags.reserve(reserve);
  1665. for (const auto& sequence : ctx->seqs) {
  1666. for (const auto& submission : sequence) {
  1667. stage_flags.push_back({});
  1668. idx++;
  1669. tl_wait_vals.push_back({});
  1670. tl_wait_semaphores.push_back({});
  1671. tl_signal_vals.push_back({});
  1672. tl_signal_semaphores.push_back({});
  1673. for (size_t i = 0; i < submission.wait_semaphores.size(); i++) {
  1674. stage_flags[idx].push_back(ctx->p->q->stage_flags);
  1675. tl_wait_vals[idx].push_back(submission.wait_semaphores[i].value);
  1676. tl_wait_semaphores[idx].push_back(submission.wait_semaphores[i].s);
  1677. }
  1678. for (size_t i = 0; i < submission.signal_semaphores.size(); i++) {
  1679. tl_signal_vals[idx].push_back(submission.signal_semaphores[i].value);
  1680. tl_signal_semaphores[idx].push_back(submission.signal_semaphores[i].s);
  1681. }
  1682. tl_submit_infos.push_back({
  1683. (uint32_t) submission.wait_semaphores.size(),
  1684. tl_wait_vals[idx].data(),
  1685. (uint32_t) submission.signal_semaphores.size(),
  1686. tl_signal_vals[idx].data(),
  1687. });
  1688. tl_submit_infos[idx].sType = vk::StructureType::eTimelineSemaphoreSubmitInfo;
  1689. tl_submit_infos[idx].pNext = nullptr;
  1690. vk::SubmitInfo si{
  1691. (uint32_t) submission.wait_semaphores.size(),
  1692. tl_wait_semaphores[idx].data(),
  1693. stage_flags[idx].data(),
  1694. 1,
  1695. &submission.buffer,
  1696. (uint32_t) submission.signal_semaphores.size(),
  1697. tl_signal_semaphores[idx].data(),
  1698. };
  1699. si.setPNext(&tl_submit_infos[idx]);
  1700. submit_infos.push_back(si);
  1701. }
  1702. }
  1703. std::lock_guard<std::mutex> guard(queue_mutex);
  1704. ctx->p->q->queue.submit(submit_infos, fence);
  1705. ctx->seqs.clear();
  1706. }
  1707. static uint32_t ggml_vk_find_queue_family_index(std::vector<vk::QueueFamilyProperties>& queue_family_props, const vk::QueueFlags& required, const vk::QueueFlags& avoid, int32_t compute_index, uint32_t min_num_queues) {
  1708. VK_LOG_DEBUG("ggml_vk_find_queue_family_index()");
  1709. const uint32_t qfsize = queue_family_props.size();
  1710. // Try with avoid preferences first
  1711. for (uint32_t i = 0; i < qfsize; i++) {
  1712. if (queue_family_props[i].queueCount >= min_num_queues && (compute_index < 0 || i != (uint32_t) compute_index) && queue_family_props[i].queueFlags & required && !(queue_family_props[i].queueFlags & avoid)) {
  1713. return i;
  1714. }
  1715. }
  1716. // Fall back to only required
  1717. for (size_t i = 0; i < qfsize; i++) {
  1718. if (queue_family_props[i].queueCount >= min_num_queues && (compute_index < 0 || i != (uint32_t) compute_index) && queue_family_props[i].queueFlags & required) {
  1719. return i;
  1720. }
  1721. }
  1722. // Fall back to reusing compute queue
  1723. for (size_t i = 0; i < qfsize; i++) {
  1724. if (queue_family_props[i].queueCount >= min_num_queues && queue_family_props[i].queueFlags & required) {
  1725. return i;
  1726. }
  1727. }
  1728. // Fall back to ignoring min_num_queries
  1729. for (size_t i = 0; i < qfsize; i++) {
  1730. if (queue_family_props[i].queueFlags & required) {
  1731. return i;
  1732. }
  1733. }
  1734. // All commands that are allowed on a queue that supports transfer operations are also allowed on a queue that supports either graphics or compute operations.
  1735. // Thus, if the capabilities of a queue family include VK_QUEUE_GRAPHICS_BIT or VK_QUEUE_COMPUTE_BIT, then reporting the VK_QUEUE_TRANSFER_BIT capability separately for that queue family is optional.
  1736. if (compute_index >= 0) {
  1737. return compute_index;
  1738. }
  1739. std::cerr << "ggml_vulkan: No suitable queue family index found." << std::endl;
  1740. for(auto &q_family : queue_family_props) {
  1741. std::cerr << "Queue number: " + std::to_string(q_family.queueCount) << " flags: " + to_string(q_family.queueFlags) << std::endl;
  1742. }
  1743. abort();
  1744. }
  1745. static void ggml_vk_create_queue(vk_device& device, vk_queue& q, uint32_t queue_family_index, uint32_t queue_index, vk::PipelineStageFlags&& stage_flags, bool transfer_only) {
  1746. VK_LOG_DEBUG("ggml_vk_create_queue()");
  1747. std::lock_guard<std::recursive_mutex> guard(device->mutex);
  1748. q.queue_family_index = queue_family_index;
  1749. q.transfer_only = transfer_only;
  1750. q.cmd_pool.init(device, &q);
  1751. q.queue = device->device.getQueue(queue_family_index, queue_index);
  1752. q.stage_flags = stage_flags;
  1753. }
  1754. static vk_context ggml_vk_create_context(ggml_backend_vk_context * ctx, vk_command_pool& p) {
  1755. vk_context result = std::make_shared<vk_context_struct>();
  1756. VK_LOG_DEBUG("ggml_vk_create_context(" << result << ")");
  1757. ctx->gc.contexts.emplace_back(result);
  1758. result->p = &p;
  1759. return result;
  1760. }
  1761. static vk_context ggml_vk_create_temporary_context(vk_command_pool& p) {
  1762. vk_context result = std::make_shared<vk_context_struct>();
  1763. VK_LOG_DEBUG("ggml_vk_create_temporary_context(" << result << ")");
  1764. result->p = &p;
  1765. return result;
  1766. }
  1767. static vk_semaphore * ggml_vk_create_binary_semaphore(ggml_backend_vk_context * ctx) {
  1768. VK_LOG_DEBUG("ggml_vk_create_timeline_semaphore()");
  1769. vk::SemaphoreTypeCreateInfo tci{ vk::SemaphoreType::eBinary, 0 };
  1770. vk::SemaphoreCreateInfo ci{};
  1771. ci.setPNext(&tci);
  1772. vk::Semaphore semaphore = ctx->device->device.createSemaphore(ci);
  1773. ctx->gc.semaphores.push_back({ semaphore, 0 });
  1774. return &ctx->gc.semaphores[ctx->gc.semaphores.size() - 1];
  1775. }
  1776. static vk_semaphore * ggml_vk_create_timeline_semaphore(ggml_backend_vk_context * ctx) {
  1777. VK_LOG_DEBUG("ggml_vk_create_timeline_semaphore()");
  1778. if (ctx->semaphore_idx >= ctx->gc.tl_semaphores.size()) {
  1779. vk::SemaphoreTypeCreateInfo tci{ vk::SemaphoreType::eTimeline, 0 };
  1780. vk::SemaphoreCreateInfo ci{};
  1781. ci.setPNext(&tci);
  1782. vk::Semaphore semaphore = ctx->device->device.createSemaphore(ci);
  1783. ctx->gc.tl_semaphores.push_back({ semaphore, 0 });
  1784. }
  1785. return &ctx->gc.tl_semaphores[ctx->semaphore_idx++];
  1786. }
  1787. static vk::Event ggml_vk_create_event(ggml_backend_vk_context * ctx) {
  1788. if (ctx->event_idx >= ctx->gc.events.size()) {
  1789. ctx->gc.events.push_back(ctx->device->device.createEvent({}));
  1790. }
  1791. return ctx->gc.events[ctx->event_idx++];
  1792. }
  1793. static void ggml_vk_command_pool_cleanup(vk_device& device, vk_command_pool& p) {
  1794. VK_LOG_DEBUG("ggml_vk_command_pool_cleanup()");
  1795. // Requires command buffers to be done
  1796. device->device.resetCommandPool(p.pool);
  1797. p.cmd_buffer_idx = 0;
  1798. }
  1799. static void ggml_vk_queue_command_pools_cleanup(vk_device& device) {
  1800. VK_LOG_DEBUG("ggml_vk_queue_command_pools_cleanup()");
  1801. // Arbitrary frequency to cleanup/reuse command buffers
  1802. static constexpr uint32_t cleanup_frequency = 10;
  1803. if (device->compute_queue.cmd_pool.cmd_buffer_idx >= cleanup_frequency) {
  1804. ggml_vk_command_pool_cleanup(device, device->compute_queue.cmd_pool);
  1805. }
  1806. if (device->transfer_queue.cmd_pool.cmd_buffer_idx >= cleanup_frequency) {
  1807. ggml_vk_command_pool_cleanup(device, device->transfer_queue.cmd_pool);
  1808. }
  1809. }
  1810. static uint32_t find_properties(const vk::PhysicalDeviceMemoryProperties* mem_props, vk::MemoryRequirements* mem_req, vk::MemoryPropertyFlags flags) {
  1811. for (uint32_t i = 0; i < mem_props->memoryTypeCount; ++i) {
  1812. vk::MemoryType memory_type = mem_props->memoryTypes[i];
  1813. if ((mem_req->memoryTypeBits & ((uint64_t)1 << i)) &&
  1814. (flags & memory_type.propertyFlags) == flags &&
  1815. mem_props->memoryHeaps[memory_type.heapIndex].size >= mem_req->size) {
  1816. return static_cast<int32_t>(i);
  1817. }
  1818. }
  1819. return UINT32_MAX;
  1820. }
  1821. static vk_buffer ggml_vk_create_buffer(vk_device& device, size_t size, const std::initializer_list<vk::MemoryPropertyFlags> & req_flags_list) {
  1822. VK_LOG_DEBUG("ggml_vk_create_buffer(" << device->name << ", " << size << ", " << to_string(req_flags_list.begin()[0]) << ", " << to_string(req_flags_list.begin()[req_flags_list.size()-1]) << ")");
  1823. if (size > device->max_buffer_size) {
  1824. throw vk::OutOfDeviceMemoryError("Requested buffer size exceeds device buffer size limit");
  1825. }
  1826. vk_buffer buf = std::make_shared<vk_buffer_struct>();
  1827. if (size == 0) {
  1828. buf->size = 0;
  1829. return buf;
  1830. }
  1831. vk::BufferUsageFlags usage_flags = vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst;
  1832. vk::MemoryAllocateFlags mem_flags {};
  1833. if (device->buffer_device_address) {
  1834. usage_flags |= vk::BufferUsageFlagBits::eShaderDeviceAddress;
  1835. mem_flags |= vk::MemoryAllocateFlagBits::eDeviceAddress;
  1836. }
  1837. vk::BufferCreateInfo buffer_create_info{
  1838. vk::BufferCreateFlags(),
  1839. size,
  1840. usage_flags,
  1841. vk::SharingMode::eExclusive,
  1842. 0,
  1843. nullptr,
  1844. };
  1845. buf->buffer = device->device.createBuffer(buffer_create_info);
  1846. vk::MemoryRequirements mem_req = device->device.getBufferMemoryRequirements(buf->buffer);
  1847. vk::PhysicalDeviceMemoryProperties mem_props = device->physical_device.getMemoryProperties();
  1848. const vk::MemoryAllocateFlagsInfo mem_flags_info { mem_flags };
  1849. for (auto it = req_flags_list.begin(); it != req_flags_list.end(); it++) {
  1850. const auto & req_flags = *it;
  1851. uint32_t memory_type_index = find_properties(&mem_props, &mem_req, req_flags);
  1852. if (memory_type_index == UINT32_MAX) {
  1853. continue;
  1854. }
  1855. buf->memory_property_flags = req_flags;
  1856. try {
  1857. buf->device_memory = device->device.allocateMemory({ mem_req.size, memory_type_index, &mem_flags_info });
  1858. break;
  1859. } catch (const vk::SystemError& e) {
  1860. // loop and retry
  1861. // during last attempt throw the exception
  1862. if (it + 1 == req_flags_list.end()) {
  1863. device->device.destroyBuffer(buf->buffer);
  1864. throw e;
  1865. }
  1866. }
  1867. }
  1868. if (!buf->device_memory) {
  1869. device->device.destroyBuffer(buf->buffer);
  1870. throw vk::OutOfDeviceMemoryError("No suitable memory type found");
  1871. }
  1872. buf->ptr = nullptr;
  1873. if (buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  1874. buf->ptr = device->device.mapMemory(buf->device_memory, 0, VK_WHOLE_SIZE);
  1875. }
  1876. device->device.bindBufferMemory(buf->buffer, buf->device_memory, 0);
  1877. buf->device = device;
  1878. buf->size = size;
  1879. if (device->buffer_device_address) {
  1880. const vk::BufferDeviceAddressInfo addressInfo(buf->buffer);
  1881. buf->bda_addr = device->device.getBufferAddress(addressInfo);
  1882. }
  1883. #ifdef GGML_VULKAN_MEMORY_DEBUG
  1884. device->memory_logger->log_allocation(buf, size);
  1885. #endif
  1886. return buf;
  1887. }
  1888. static vk_buffer ggml_vk_create_buffer_check(vk_device& device, size_t size, vk::MemoryPropertyFlags req_flags, vk::MemoryPropertyFlags fallback_flags = vk::MemoryPropertyFlags(0)) {
  1889. try {
  1890. return ggml_vk_create_buffer(device, size, {req_flags, fallback_flags});
  1891. } catch (const vk::SystemError& e) {
  1892. std::cerr << "ggml_vulkan: Memory allocation of size " << size << " failed." << std::endl;
  1893. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  1894. throw e;
  1895. }
  1896. }
  1897. static vk_buffer ggml_vk_create_buffer_device(vk_device& device, size_t size) {
  1898. vk_buffer buf;
  1899. try {
  1900. if (device->prefer_host_memory) {
  1901. buf = ggml_vk_create_buffer(device, size, {vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent,
  1902. vk::MemoryPropertyFlagBits::eDeviceLocal});
  1903. } else if (device->uma) {
  1904. // Fall back to host memory type
  1905. buf = ggml_vk_create_buffer(device, size, {vk::MemoryPropertyFlagBits::eDeviceLocal,
  1906. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent});
  1907. } else if (device->disable_host_visible_vidmem) {
  1908. if (device->allow_sysmem_fallback) {
  1909. buf = ggml_vk_create_buffer(device, size, {vk::MemoryPropertyFlagBits::eDeviceLocal,
  1910. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent});
  1911. } else {
  1912. buf = ggml_vk_create_buffer(device, size, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  1913. }
  1914. } else {
  1915. // use rebar if available, otherwise fallback to device only visible memory
  1916. if (device->allow_sysmem_fallback) {
  1917. buf = ggml_vk_create_buffer(device, size, {vk::MemoryPropertyFlagBits::eDeviceLocal | vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent,
  1918. vk::MemoryPropertyFlagBits::eDeviceLocal,
  1919. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent});
  1920. } else {
  1921. buf = ggml_vk_create_buffer(device, size, {vk::MemoryPropertyFlagBits::eDeviceLocal | vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent,
  1922. vk::MemoryPropertyFlagBits::eDeviceLocal});
  1923. }
  1924. }
  1925. } catch (const vk::SystemError& e) {
  1926. std::cerr << "ggml_vulkan: Device memory allocation of size " << size << " failed." << std::endl;
  1927. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  1928. throw e;
  1929. }
  1930. return buf;
  1931. }
  1932. static void ggml_vk_destroy_buffer(vk_buffer& buf) {
  1933. if (buf == nullptr) {
  1934. return;
  1935. }
  1936. #ifdef GGML_VULKAN_MEMORY_DEBUG
  1937. if (buf->device != nullptr) {
  1938. buf->device->memory_logger->log_deallocation(buf);
  1939. }
  1940. #endif
  1941. buf.reset();
  1942. }
  1943. static vk_subbuffer ggml_vk_subbuffer(const ggml_backend_vk_context* ctx, const vk_buffer& buf, size_t offset = 0) {
  1944. return { buf, offset, ggml_vk_get_max_buffer_range(ctx, buf, offset) };
  1945. }
  1946. static void ggml_vk_sync_buffers(ggml_backend_vk_context* ctx, vk_context& subctx) {
  1947. VK_LOG_DEBUG("ggml_vk_sync_buffers()");
  1948. const bool transfer_queue = subctx->p->q->transfer_only;
  1949. if (ctx) {
  1950. ctx->prealloc_x_need_sync = ctx->prealloc_y_need_sync = ctx->prealloc_split_k_need_sync = false;
  1951. }
  1952. subctx->s->buffer.pipelineBarrier(
  1953. subctx->p->q->stage_flags,
  1954. subctx->p->q->stage_flags,
  1955. {},
  1956. { {
  1957. { !transfer_queue ? (vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite | vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) : (vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) },
  1958. { !transfer_queue ? (vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite | vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) : (vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) }
  1959. } },
  1960. {},
  1961. {}
  1962. );
  1963. }
  1964. static void ggml_vk_wait_events(vk_context& ctx, std::vector<vk::Event>&& events) {
  1965. VK_LOG_DEBUG("ggml_vk_wait_events()");
  1966. if (events.empty()) {
  1967. return;
  1968. }
  1969. ctx->s->buffer.waitEvents(
  1970. events,
  1971. ctx->p->q->stage_flags,
  1972. ctx->p->q->stage_flags,
  1973. {},
  1974. {},
  1975. {}
  1976. );
  1977. }
  1978. // number of rows/cols for flash attention shader
  1979. static constexpr uint32_t flash_attention_num_small_rows = 32;
  1980. static constexpr uint32_t scalar_flash_attention_num_small_rows = 1;
  1981. static uint32_t get_fa_scalar_num_large_rows(uint32_t hsv) {
  1982. if (hsv >= 192) {
  1983. return 2;
  1984. } else {
  1985. return 8;
  1986. }
  1987. }
  1988. // The FA coopmat1 shader assumes 16x16x16 matrix multiply support.
  1989. // 128 threads split into four subgroups, each subgroup does 1/4
  1990. // of the Bc dimension.
  1991. static constexpr uint32_t coopmat1_flash_attention_num_large_rows = 16;
  1992. static constexpr uint32_t scalar_flash_attention_Bc = 64;
  1993. static constexpr uint32_t scalar_flash_attention_workgroup_size = 128;
  1994. static uint32_t get_fa_num_small_rows(FaCodePath path) {
  1995. if (path == FA_COOPMAT2) {
  1996. return flash_attention_num_small_rows;
  1997. } else {
  1998. return scalar_flash_attention_num_small_rows;
  1999. }
  2000. }
  2001. static std::array<uint32_t, 2> fa_rows_cols(FaCodePath path, uint32_t hsk, uint32_t hsv, uint32_t clamp, ggml_type type, bool small_rows) {
  2002. GGML_UNUSED(clamp);
  2003. GGML_UNUSED(hsv);
  2004. if (path == FA_SCALAR) {
  2005. if (small_rows) {
  2006. return {scalar_flash_attention_num_small_rows, 64};
  2007. } else {
  2008. if ((hsv | hsk) & 8) {
  2009. // HSV/HSK not being a multiple of 16 makes D_split smaller, which makes cols_per_iter
  2010. // larger, and Bc needs to be >= cols_per_thread. 64 is large enough, 32 is not.
  2011. return {get_fa_scalar_num_large_rows(hsv), 64};
  2012. } else {
  2013. return {get_fa_scalar_num_large_rows(hsv), 32};
  2014. }
  2015. }
  2016. }
  2017. if (path == FA_COOPMAT1) {
  2018. if (small_rows) {
  2019. return {scalar_flash_attention_num_small_rows, scalar_flash_attention_Bc};
  2020. } else {
  2021. return {coopmat1_flash_attention_num_large_rows, scalar_flash_attention_Bc};
  2022. }
  2023. }
  2024. // small rows, large cols
  2025. if (small_rows) {
  2026. return {get_fa_num_small_rows(FA_COOPMAT2), 32};
  2027. }
  2028. // small cols to reduce register count
  2029. if (ggml_is_quantized(type) || hsk >= 256 || hsv >= 256) {
  2030. if (hsk >= 512 || hsv >= 512) {
  2031. return {32, 32};
  2032. } else {
  2033. return {64, 32};
  2034. }
  2035. }
  2036. return {64, 64};
  2037. }
  2038. static uint32_t fa_align(FaCodePath path, uint32_t hsk, uint32_t hsv, ggml_type type, bool small_rows) {
  2039. return fa_rows_cols(path, hsk, hsv, 0, type, small_rows)[1];
  2040. }
  2041. static bool ggml_vk_matmul_shmem_support(const vk_device& device, const std::vector<uint32_t>& warptile, bool mul_mat_id, ggml_type src0_type) {
  2042. uint32_t lut_size = 0;
  2043. switch (src0_type) {
  2044. case GGML_TYPE_IQ1_S:
  2045. case GGML_TYPE_IQ1_M:
  2046. lut_size = 2*2048;
  2047. break;
  2048. case GGML_TYPE_IQ2_XXS:
  2049. lut_size = 8*256;
  2050. break;
  2051. case GGML_TYPE_IQ2_XS:
  2052. lut_size = 8*512;
  2053. break;
  2054. case GGML_TYPE_IQ2_S:
  2055. lut_size = 8*1024;
  2056. break;
  2057. case GGML_TYPE_IQ3_XXS:
  2058. lut_size = 4*256;
  2059. break;
  2060. case GGML_TYPE_IQ3_S:
  2061. lut_size = 4*512;
  2062. break;
  2063. case GGML_TYPE_IQ4_NL:
  2064. case GGML_TYPE_IQ4_XS:
  2065. case GGML_TYPE_MXFP4:
  2066. lut_size = 4*16;
  2067. break;
  2068. default:
  2069. break;
  2070. }
  2071. // Needs to be kept up to date on shader changes
  2072. const uint32_t bank_conflict_offset = device->coopmat_support ? 8 : 1;
  2073. const uint32_t type_size = device->fp16 ? sizeof(ggml_fp16_t) : sizeof(float);
  2074. const uint32_t warps = warptile[0] / warptile[10];
  2075. const uint32_t load_bufs = (warptile[1] + warptile[2]) * (warptile[3] + bank_conflict_offset) * type_size;
  2076. const uint32_t mmid_row_ids = mul_mat_id ? (warptile[2] * 2 * sizeof(uint16_t)) : 0;
  2077. const uint32_t coopmat_stage = device->coopmat_support ? warptile[7] * warptile[8] / warps * sizeof(float) : 0;
  2078. const uint32_t ballots_sh = mul_mat_id ? (warps * 4 * sizeof(uint32_t)) : 0;
  2079. const uint32_t total_size = load_bufs + mmid_row_ids + coopmat_stage + lut_size + ballots_sh;
  2080. const bool supported = total_size <= device->properties.limits.maxComputeSharedMemorySize;
  2081. VK_LOG_DEBUG("ggml_vk_matmul_shmem_support(warptile=(" << warptile[0] << "," << warptile[1] << "," << warptile[2] << "), "
  2082. "mul_mat_id=" << mul_mat_id << ", src0_type=" << ggml_type_name(src0_type) << ", supported=" << supported);
  2083. return supported;
  2084. }
  2085. struct GpuPipelineConfig {
  2086. // GPU architecture identifier.
  2087. // Example: vk_device_architecture::AMD_GCN
  2088. vk_device_architecture arch;
  2089. // Mapping of pipeline names to their specific subgroup sizes.
  2090. // Example: {"soft_max_f32", 64}
  2091. std::unordered_map<std::string, uint32_t> pipelines;
  2092. // Default subgroup size for this GPU.
  2093. // Defaults to 0 if not explicitly provided.
  2094. uint32_t default_subgroup_size = 0;
  2095. };
  2096. // Pipeline configuration for RDNA1 GPUs.
  2097. static const std::unordered_map<std::string, uint32_t> rdna1_pipelines = {
  2098. {"soft_max", 64}, {"im2col", 64},
  2099. {"argmax", 64}, {"mul_mat_vec", 64},
  2100. {"mul_mat_vec_f16", 32}, {"mul_mat_vec_f32_f16", 32}
  2101. };
  2102. // Pipeline configuration for RDNA2 GPUs.
  2103. static const std::unordered_map<std::string, uint32_t> rdna2_pipelines = {
  2104. {"soft_max", 64}, {"im2col", 64},
  2105. };
  2106. static constexpr uint32_t RDNA_DEFAULT_SUBGROUP_SIZE = 32;
  2107. // Define configurations for different GPUs.
  2108. static std::vector<GpuPipelineConfig> gpu_pipeline_configs = {
  2109. {
  2110. vk_device_architecture::AMD_RDNA1,
  2111. {
  2112. rdna1_pipelines,
  2113. },
  2114. RDNA_DEFAULT_SUBGROUP_SIZE
  2115. },
  2116. {
  2117. vk_device_architecture::AMD_RDNA2,
  2118. {
  2119. rdna2_pipelines,
  2120. },
  2121. RDNA_DEFAULT_SUBGROUP_SIZE
  2122. },
  2123. };
  2124. static uint32_t get_subgroup_size(const std::string &pipeline_name, const vk_device_architecture &arch) {
  2125. for (const auto &config : gpu_pipeline_configs) {
  2126. if (config.arch == arch) {
  2127. auto pipIt = config.pipelines.find(pipeline_name);
  2128. if (pipIt != config.pipelines.end()) {
  2129. return pipIt->second;
  2130. }
  2131. std::vector<std::pair<std::string, uint32_t>> sorted_pipelines(config.pipelines.begin(), config.pipelines.end());
  2132. std::sort(sorted_pipelines.begin(), sorted_pipelines.end(),
  2133. [](const auto &a, const auto &b) { return a.first.size() > b.first.size(); });
  2134. for (const auto &entry : sorted_pipelines) {
  2135. if (pipeline_name.find(entry.first) != std::string::npos) {
  2136. return entry.second;
  2137. }
  2138. }
  2139. return config.default_subgroup_size;
  2140. }
  2141. }
  2142. return 0; // If no matching configuration is found
  2143. }
  2144. static void ggml_vk_load_shaders(vk_device& device) {
  2145. VK_LOG_DEBUG("ggml_vk_load_shaders(" << device->name << ")");
  2146. // some shaders have a minimum subgroup size
  2147. const uint32_t subgroup_size_8 = std::max(device->subgroup_size, 8u);
  2148. const uint32_t subgroup_size_16 = std::max(device->subgroup_size, 16u);
  2149. const uint32_t subgroup_size_32 = std::max(device->subgroup_size, 32u);
  2150. const uint32_t mul_mat_subgroup_size = (device->vendor_id == VK_VENDOR_ID_INTEL && device->subgroup_size_control) ? device->subgroup_min_size : device->subgroup_size;
  2151. const uint32_t mul_mat_subgroup_size_8 = std::max(mul_mat_subgroup_size, 8u);
  2152. const uint32_t mul_mat_subgroup_size_16 = std::max(mul_mat_subgroup_size, 16u);
  2153. const uint32_t mul_mat_subgroup_size_32 = std::max(mul_mat_subgroup_size, 32u);
  2154. const bool subgroup_min_size_16 = (!device->subgroup_size_control && device->subgroup_size >= 16) ||
  2155. (device->subgroup_size_control && device->subgroup_max_size >= 16);
  2156. // mulmat
  2157. std::vector<uint32_t> l_warptile, m_warptile, s_warptile,
  2158. l_warptile_id, m_warptile_id, s_warptile_id,
  2159. l_warptile_mmq, m_warptile_mmq, s_warptile_mmq,
  2160. l_warptile_mmq_int, m_warptile_mmq_int, s_warptile_mmq_int,
  2161. l_warptile_mmq_int_k, m_warptile_mmq_int_k, s_warptile_mmq_int_k,
  2162. l_warptile_mmq_k, m_warptile_mmq_k, s_warptile_mmq_k,
  2163. l_warptile_mmqid, m_warptile_mmqid, s_warptile_mmqid,
  2164. l_warptile_mmqid_int, m_warptile_mmqid_int, s_warptile_mmqid_int,
  2165. l_warptile_mmqid_int_k, m_warptile_mmqid_int_k, s_warptile_mmqid_int_k;
  2166. std::array<uint32_t, 3> l_wg_denoms, m_wg_denoms, s_wg_denoms,
  2167. l_mmq_wg_denoms, m_mmq_wg_denoms, s_mmq_wg_denoms,
  2168. l_mmq_wg_denoms_k, m_mmq_wg_denoms_k, s_mmq_wg_denoms_k,
  2169. l_mmqid_wg_denoms, m_mmqid_wg_denoms, s_mmqid_wg_denoms;
  2170. uint32_t l_align, m_align, s_align;
  2171. if (device->coopmat2) {
  2172. // spec constants and tile sizes for non-quant matmul/matmul_id
  2173. l_warptile = { 256, 128, 256, 64, 1 };
  2174. m_warptile = { 256, 128, 128, 64, 0 };
  2175. s_warptile = { 128, 64, 64, 64, 0 };
  2176. l_wg_denoms = {128, 256, 1 };
  2177. m_wg_denoms = {128, 128, 1 };
  2178. s_wg_denoms = { 64, 64, 1 };
  2179. // spec constants and tile sizes for quant matmul (non-Qi_K)
  2180. l_warptile_mmq = { 256, 128, 256, 64, 1 };
  2181. m_warptile_mmq = { 256, 128, 128, 64, 1 };
  2182. s_warptile_mmq = { 256, 32, 64, 128, 0 };
  2183. l_mmq_wg_denoms = { 128, 256, 1 };
  2184. m_mmq_wg_denoms = { 128, 128, 1 };
  2185. s_mmq_wg_denoms = { 32, 64, 1 };
  2186. // spec constants and tile sizes for quant matmul (Qi_K)
  2187. l_warptile_mmq_k = { 256, 128, 256, 64, 1 };
  2188. m_warptile_mmq_k = { 256, 128, 128, 64, 1 };
  2189. s_warptile_mmq_k = { 256, 32, 64, 128, 0 };
  2190. l_mmq_wg_denoms_k = { 128, 256, 1 };
  2191. m_mmq_wg_denoms_k = { 128, 128, 1 };
  2192. s_mmq_wg_denoms_k = { 32, 64, 1 };
  2193. // spec constants and tile sizes for quant matmul_id
  2194. l_warptile_mmqid = { 256, 128, 128, 16, 1, device->subgroup_size };
  2195. m_warptile_mmqid = { 256, 128, 64, 16, 0, device->subgroup_size };
  2196. s_warptile_mmqid = { 256, 128, 64, 16, 0, device->subgroup_size };
  2197. l_mmqid_wg_denoms = { 128, 128, 1 };
  2198. m_mmqid_wg_denoms = { 128, 64, 1 };
  2199. s_mmqid_wg_denoms = { 128, 64, 1 };
  2200. l_align = 128;
  2201. m_align = 64;
  2202. s_align = 32;
  2203. } else {
  2204. // Matrix cores require different warp group sizes
  2205. const uint32_t tm_l = device->coopmat_support ? device->coopmat_m : 4;
  2206. const uint32_t tm_m = device->coopmat_support ? device->coopmat_m : 4;
  2207. const uint32_t tm_s = device->coopmat_support ? device->coopmat_m : 2;
  2208. const uint32_t tn_l = device->coopmat_support ? device->coopmat_n : 4;
  2209. const uint32_t tn_m = device->coopmat_support ? device->coopmat_n : 2;
  2210. const uint32_t tn_s = device->coopmat_support ? device->coopmat_n : 2;
  2211. const uint32_t tk_l = device->coopmat_support ? device->coopmat_k : 1;
  2212. const uint32_t tk_m = device->coopmat_support ? device->coopmat_k : 1;
  2213. const uint32_t tk_s = device->coopmat_support ? device->coopmat_k : 1;
  2214. l_warptile = { 128, 128, 128, 16, subgroup_size_8 * 2, 64, 2, tm_l, tn_l, tk_l, subgroup_size_8 };
  2215. m_warptile = { 128, 64, 64, 16, subgroup_size_8, 32, 2, tm_m, tn_m, tk_m, subgroup_size_8 };
  2216. s_warptile = { subgroup_size_16, 32, 32, 16, 32, 32, 2, tm_s, tn_s, tk_s, subgroup_size_8 };
  2217. l_warptile_mmq = { 128, 128, 128, 32, subgroup_size_8 * 2, 64, 2, tm_l, tn_l, tk_l, subgroup_size_8 };
  2218. m_warptile_mmq = { 128, 64, 64, 32, subgroup_size_8, 32, 2, tm_m, tn_m, tk_m, subgroup_size_8 };
  2219. s_warptile_mmq = { subgroup_size_32, 32, 32, 32, 32, 32, 2, tm_s, tn_s, tk_s, subgroup_size_8 };
  2220. // Integer MMQ has a smaller shared memory profile, but heavier register use
  2221. l_warptile_mmq_int = { 128, 128, 128, 32, subgroup_size_8 * 2, 64, 2, 4, 4, 1, subgroup_size_8 };
  2222. m_warptile_mmq_int = { 128, 64, 64, 32, subgroup_size_8, 32, 2, 2, 2, 1, subgroup_size_8 };
  2223. s_warptile_mmq_int = { subgroup_size_32, 32, 32, 32, 32, 32, 2, 2, 1, 1, subgroup_size_8 };
  2224. // K-quants use even more registers, mitigate by setting WMITER to 1
  2225. l_warptile_mmq_int_k = { 128, 128, 128, 32, subgroup_size_8 * 2, 64, 1, 4, 4, 1, subgroup_size_8 };
  2226. m_warptile_mmq_int_k = { 128, 64, 64, 32, subgroup_size_8, 32, 1, 2, 2, 1, subgroup_size_8 };
  2227. s_warptile_mmq_int_k = { subgroup_size_32, 32, 32, 32, 32, 32, 1, 2, 1, 1, subgroup_size_8 };
  2228. l_warptile_id = { 128, 128, 128, 16, mul_mat_subgroup_size_16 * 2, 64, 2, tm_l, tn_l, tk_l, mul_mat_subgroup_size_16 };
  2229. m_warptile_id = { 128, 64, 64, 16, mul_mat_subgroup_size_16, 32, 2, tm_m, tn_m, tk_m, mul_mat_subgroup_size_16 };
  2230. s_warptile_id = { mul_mat_subgroup_size_16, 32, 32, 16, 32, 32, 2, tm_s, tn_s, tk_s, mul_mat_subgroup_size_16 };
  2231. l_warptile_mmqid = { 128, 128, 128, 32, mul_mat_subgroup_size_8 * 2, 64, 2, tm_l, tn_l, tk_l, mul_mat_subgroup_size_8 };
  2232. m_warptile_mmqid = { 128, 64, 64, 32, mul_mat_subgroup_size_8, 32, 2, tm_m, tn_m, tk_m, mul_mat_subgroup_size_8 };
  2233. s_warptile_mmqid = { mul_mat_subgroup_size_32, 32, 32, 32, 32, 32, 2, tm_s, tn_s, tk_s, mul_mat_subgroup_size_8 };
  2234. l_warptile_mmqid_int = { 128, 128, 128, 32, mul_mat_subgroup_size_8 * 2, 64, 2, 4, 4, 1, mul_mat_subgroup_size_8 };
  2235. m_warptile_mmqid_int = { 128, 64, 64, 32, mul_mat_subgroup_size_8, 32, 2, 2, 2, 1, mul_mat_subgroup_size_8 };
  2236. s_warptile_mmqid_int = { mul_mat_subgroup_size_32, 32, 32, 32, 32, 32, 2, 2, 1, 1, mul_mat_subgroup_size_8 };
  2237. l_warptile_mmqid_int_k = { 128, 128, 128, 32, mul_mat_subgroup_size_16 * 2, 64, 1, 4, 4, 1, mul_mat_subgroup_size_16 };
  2238. m_warptile_mmqid_int_k = { 128, 64, 64, 32, mul_mat_subgroup_size_16, 32, 1, 2, 2, 1, mul_mat_subgroup_size_16 };
  2239. s_warptile_mmqid_int_k = { mul_mat_subgroup_size_32, 32, 32, 32, 32, 32, 1, 2, 1, 1, mul_mat_subgroup_size_16 };
  2240. // chip specific tuning
  2241. if ((device->architecture == AMD_GCN) && (device->driver_id != vk::DriverId::eAmdProprietary)) {
  2242. m_warptile_mmq = m_warptile_mmq_int = { 256, 64, 64, 32, 16, 16, 2, 2, 2, 1, 16 };
  2243. m_warptile_mmqid = m_warptile_mmqid_int = { 256, 64, 64, 32, 16, 16, 2, 2, 2, 1, 16 };
  2244. }
  2245. l_mmq_wg_denoms = l_wg_denoms = {128, 128, 1 };
  2246. m_mmq_wg_denoms = m_wg_denoms = { 64, 64, 1 };
  2247. s_mmq_wg_denoms = s_wg_denoms = { 32, 32, 1 };
  2248. l_align = 128;
  2249. m_align = 64;
  2250. s_align = 32;
  2251. for (uint32_t i = 0; i < GGML_TYPE_COUNT; ++i) {
  2252. ggml_type t = (ggml_type)i;
  2253. // Disable medium and large matrix multiplication if not enough shared memory is available
  2254. // Check mmq warptiles as the largest configuration
  2255. // Throw an error if not enough for any matrix multiplication is available
  2256. if (!ggml_vk_matmul_shmem_support(device, s_warptile_mmq, false, t)) {
  2257. std::cerr << "ggml_vulkan: Error: Shared memory size too small for matrix multiplication." << std::endl;
  2258. throw std::runtime_error("Shared memory size too small for matrix multiplication.");
  2259. } else if (!ggml_vk_matmul_shmem_support(device, m_warptile_mmq, false, t)) {
  2260. device->mul_mat_m[i] = false;
  2261. device->mul_mat_l[i] = false;
  2262. } else if (!ggml_vk_matmul_shmem_support(device, l_warptile_mmq, false, t)) {
  2263. device->mul_mat_l[i] = false;
  2264. }
  2265. // Disable mul_mat_id if not enough shared memory is available
  2266. if (!ggml_vk_matmul_shmem_support(device, s_warptile_mmqid, true, t)) {
  2267. device->mul_mat_id_s[i] = false;
  2268. device->mul_mat_id_m[i] = false;
  2269. device->mul_mat_id_l[i] = false;
  2270. } else if (!ggml_vk_matmul_shmem_support(device, m_warptile_mmqid, true, t)) {
  2271. device->mul_mat_id_m[i] = false;
  2272. device->mul_mat_id_l[i] = false;
  2273. } else if (!ggml_vk_matmul_shmem_support(device, l_warptile_mmqid, true, t)) {
  2274. device->mul_mat_id_l[i] = false;
  2275. }
  2276. }
  2277. }
  2278. if (!device->pipeline_matmul_f32) {
  2279. device->pipeline_matmul_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  2280. }
  2281. if (!device->pipeline_matmul_f32_f16) {
  2282. device->pipeline_matmul_f32_f16 = std::make_shared<vk_matmul_pipeline_struct>();
  2283. }
  2284. if (!device->pipeline_matmul_id_f32) {
  2285. device->pipeline_matmul_id_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  2286. }
  2287. if (!device->pipeline_matmul_bf16) {
  2288. device->pipeline_matmul_bf16 = std::make_shared<vk_matmul_pipeline_struct>();
  2289. }
  2290. if (!device->pipeline_matmul_id_bf16) {
  2291. device->pipeline_matmul_id_bf16 = std::make_shared<vk_matmul_pipeline_struct>();
  2292. }
  2293. std::vector<std::future<void>> compiles;
  2294. auto const &ggml_vk_create_pipeline = [&](vk_device& device, vk_pipeline& pipeline, const char *name, size_t spv_size, const void* spv_data, const char *entrypoint,
  2295. uint32_t parameter_count, uint32_t push_constant_size, std::array<uint32_t, 3> wg_denoms, const std::vector<uint32_t>& specialization_constants,
  2296. uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) {
  2297. if (!require_full_subgroups && required_subgroup_size == 0) {
  2298. required_subgroup_size = get_subgroup_size(name, device->architecture);
  2299. }
  2300. if (!pipeline) {
  2301. pipeline = std::make_shared<vk_pipeline_struct>();
  2302. }
  2303. if (!pipeline->initialized) {
  2304. pipeline->name = name;
  2305. pipeline->parameter_count = parameter_count;
  2306. pipeline->push_constant_size = push_constant_size;
  2307. pipeline->wg_denoms = wg_denoms;
  2308. pipeline->align = align;
  2309. pipeline->initialized = true;
  2310. }
  2311. if (!pipeline->needed || pipeline->compiled) {
  2312. return;
  2313. }
  2314. {
  2315. // wait until fewer than N compiles are in progress
  2316. uint32_t N = std::max(1u, std::thread::hardware_concurrency());
  2317. std::unique_lock<std::mutex> guard(compile_count_mutex);
  2318. while (compile_count >= N) {
  2319. compile_count_cond.wait(guard);
  2320. }
  2321. compile_count++;
  2322. }
  2323. compiles.push_back(std::async(ggml_vk_create_pipeline_func, std::ref(device), std::ref(pipeline), spv_size, spv_data, entrypoint,
  2324. parameter_count, wg_denoms, specialization_constants, disable_robustness, require_full_subgroups, required_subgroup_size));
  2325. };
  2326. auto const &ggml_vk_create_pipeline2 = [&](vk_device& device, vk_pipeline& pipeline, const std::string &name, size_t spv_size, const void* spv_data, const char *entrypoint,
  2327. uint32_t parameter_count, uint32_t push_constant_size, std::array<uint32_t, 3> wg_denoms, const std::vector<uint32_t>& specialization_constants,
  2328. uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) {
  2329. return ggml_vk_create_pipeline(device, pipeline, name.c_str(), spv_size, spv_data, entrypoint,
  2330. parameter_count, push_constant_size, wg_denoms, specialization_constants,
  2331. align, disable_robustness, require_full_subgroups, required_subgroup_size);
  2332. };
  2333. auto const &fa_wg_denoms = [&](FaCodePath path, uint32_t hsk, uint32_t hsv, uint32_t clamp, ggml_type type, bool small_rows) -> std::array<uint32_t, 3> {
  2334. return {fa_rows_cols(path, hsk, hsv, clamp, type, small_rows)[0], 1, 1};
  2335. };
  2336. auto const &fa_spec_constants = [&](FaCodePath path, uint32_t hsk, uint32_t hsv, uint32_t clamp, ggml_type type, bool small_rows) -> std::vector<uint32_t> {
  2337. // For large number of rows, 128 invocations seems to work best.
  2338. // For small number of rows (e.g. N==1), 256 works better. But matrix granularity for 256 is 32, so we
  2339. // can't use 256 for D==80.
  2340. // For scalar, use 128 (arbitrary)
  2341. // The same D_split value is used for both HSK and HSV, so just base it on the union of the LSBs.
  2342. const uint32_t D = (hsk|hsv);
  2343. uint32_t wg_size = (path == FA_SCALAR || path == FA_COOPMAT1)
  2344. ? scalar_flash_attention_workgroup_size
  2345. : ((small_rows && (D % 32) == 0) ? 256 : 128);
  2346. auto rows_cols = fa_rows_cols(path, hsk, hsv, clamp, type, small_rows);
  2347. // D_split can't be larger than a subgroup because we use subgroupShuffle to reduce it.
  2348. // D_split can't be larger than the LSB of D divided by 4 due to vectorization in the shader.
  2349. const uint32_t D_lsb = D ^ (D & (D-1));
  2350. uint32_t D_split = std::min(std::min(device->subgroup_size, 8u), D_lsb / 4);
  2351. return {wg_size, rows_cols[0], rows_cols[1], hsk, hsv, clamp, D_split};
  2352. };
  2353. #define CREATE_FA(TYPE, NAMELC, FAPATH, SUFFIX) \
  2354. for (auto &fa : device->pipeline_flash_attn_f32_f16[TYPE]) { \
  2355. uint32_t HSK = fa.first.HSK; \
  2356. uint32_t HSV = fa.first.HSV; \
  2357. bool small_rows = fa.first.small_rows; \
  2358. FaCodePath path = fa.first.path; \
  2359. bool aligned = fa.first.aligned; \
  2360. bool f32acc = fa.first.f32acc; \
  2361. if (path == FAPATH) { \
  2362. if (aligned) { \
  2363. if (f32acc) { \
  2364. ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_aligned_f32acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,0,TYPE,small_rows), fa_spec_constants(FAPATH, HSK,HSV,0,TYPE,small_rows), fa_align(FAPATH,HSK,HSV,TYPE,small_rows), true, FAPATH==FA_COOPMAT1, (FAPATH==FA_COOPMAT1 ? 32 : 0)); \
  2365. } else { \
  2366. ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_aligned_f16acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,0,TYPE,small_rows), fa_spec_constants(FAPATH, HSK,HSV,0,TYPE,small_rows), fa_align(FAPATH,HSK,HSV,TYPE,small_rows), true, FAPATH==FA_COOPMAT1, (FAPATH==FA_COOPMAT1 ? 32 : 0)); \
  2367. } \
  2368. } else { \
  2369. if (f32acc) { \
  2370. ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_f32acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,1,TYPE,small_rows), fa_spec_constants(FAPATH, HSK,HSV,1,TYPE,small_rows), 1, true, FAPATH==FA_COOPMAT1, (FAPATH==FA_COOPMAT1 ? 32 : 0)); \
  2371. } else { \
  2372. ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_f16acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,1,TYPE,small_rows), fa_spec_constants(FAPATH, HSK,HSV,1,TYPE,small_rows), 1, true, FAPATH==FA_COOPMAT1, (FAPATH==FA_COOPMAT1 ? 32 : 0)); \
  2373. } \
  2374. } \
  2375. } \
  2376. }
  2377. CREATE_FA(GGML_TYPE_F32, f32, FA_SCALAR, )
  2378. CREATE_FA(GGML_TYPE_F16, f16, FA_SCALAR, )
  2379. CREATE_FA(GGML_TYPE_Q4_0, q4_0, FA_SCALAR, )
  2380. CREATE_FA(GGML_TYPE_Q8_0, q8_0, FA_SCALAR, )
  2381. #if defined(VK_KHR_cooperative_matrix) && defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
  2382. if (device->coopmat1_fa_support) {
  2383. CREATE_FA(GGML_TYPE_F32, f32, FA_COOPMAT1, _cm1)
  2384. CREATE_FA(GGML_TYPE_F16, f16, FA_COOPMAT1, _cm1)
  2385. CREATE_FA(GGML_TYPE_Q4_0, q4_0, FA_COOPMAT1, _cm1)
  2386. CREATE_FA(GGML_TYPE_Q8_0, q8_0, FA_COOPMAT1, _cm1)
  2387. }
  2388. #endif
  2389. #if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  2390. if (device->coopmat2) {
  2391. CREATE_FA(GGML_TYPE_F32, f32, FA_COOPMAT2, _cm2)
  2392. CREATE_FA(GGML_TYPE_F16, f16, FA_COOPMAT2, _cm2)
  2393. CREATE_FA(GGML_TYPE_Q4_0, q4_0, FA_COOPMAT2, _cm2)
  2394. CREATE_FA(GGML_TYPE_Q4_1, q4_1, FA_COOPMAT2, _cm2)
  2395. CREATE_FA(GGML_TYPE_Q5_0, q5_0, FA_COOPMAT2, _cm2)
  2396. CREATE_FA(GGML_TYPE_Q5_1, q5_1, FA_COOPMAT2, _cm2)
  2397. CREATE_FA(GGML_TYPE_Q8_0, q8_0, FA_COOPMAT2, _cm2)
  2398. CREATE_FA(GGML_TYPE_IQ4_NL, iq4_nl, FA_COOPMAT2, _cm2)
  2399. }
  2400. #endif
  2401. #undef CREATE_FA
  2402. #if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  2403. if (device->coopmat2) {
  2404. // Create 6 variants, {s,m,l}x{unaligned,aligned}
  2405. #define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \
  2406. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _cm2_len, NAMELC ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1); \
  2407. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _cm2_len, NAMELC ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1); \
  2408. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _cm2_len, NAMELC ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1); \
  2409. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _cm2_len, NAMELC ## _aligned ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align); \
  2410. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _cm2_len, NAMELC ## _aligned ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align); \
  2411. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _cm2_len, NAMELC ## _aligned ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align); \
  2412. // Create 2 variants, {f16,f32} accumulator
  2413. #define CREATE_MM2(PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \
  2414. CREATE_MM(PIPELINE_NAME . f16acc, NAMELC, _f16acc, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \
  2415. CREATE_MM(PIPELINE_NAME . f32acc, NAMELC, , WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \
  2416. CREATE_MM2(pipeline_matmul_f16, matmul_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 3)
  2417. #if defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  2418. if (device->coopmat_bf16_support) {
  2419. CREATE_MM(pipeline_matmul_bf16, matmul_bf16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3)
  2420. }
  2421. #endif
  2422. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q4_0], matmul_q4_0_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2423. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q4_1], matmul_q4_1_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2424. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q5_0], matmul_q5_0_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2425. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q5_1], matmul_q5_1_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2426. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q8_0], matmul_q8_0_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2427. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q2_K], matmul_q2_k_f16, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
  2428. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q3_K], matmul_q3_k_f16, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
  2429. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q4_K], matmul_q4_k_f16, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
  2430. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q5_K], matmul_q5_k_f16, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
  2431. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q6_K], matmul_q6_k_f16, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
  2432. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ1_S], matmul_iq1_s_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2433. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ1_M], matmul_iq1_m_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2434. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ2_XXS], matmul_iq2_xxs_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2435. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ2_XS], matmul_iq2_xs_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2436. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ2_S], matmul_iq2_s_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2437. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ3_XXS], matmul_iq3_xxs_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2438. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ3_S], matmul_iq3_s_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2439. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ4_XS], matmul_iq4_xs_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2440. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ4_NL], matmul_iq4_nl_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2441. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_MXFP4], matmul_mxfp4_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2442. GGML_ASSERT(device->subgroup_ballot);
  2443. CREATE_MM2(pipeline_matmul_id_f16, matmul_id_subgroup_f16, wg_denoms, warptile, vk_mat_mat_id_push_constants, 4)
  2444. #if defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  2445. if (device->coopmat_bf16_support) {
  2446. CREATE_MM(pipeline_matmul_id_bf16, matmul_id_subgroup_bf16, , wg_denoms, warptile, vk_mat_mat_id_push_constants, 4)
  2447. }
  2448. #endif
  2449. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0], matmul_id_subgroup_q4_0_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2450. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1], matmul_id_subgroup_q4_1_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2451. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0], matmul_id_subgroup_q5_0_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2452. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1], matmul_id_subgroup_q5_1_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2453. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0], matmul_id_subgroup_q8_0_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2454. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K], matmul_id_subgroup_q2_k_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2455. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K], matmul_id_subgroup_q3_k_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2456. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K], matmul_id_subgroup_q4_k_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2457. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K], matmul_id_subgroup_q5_k_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2458. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K], matmul_id_subgroup_q6_k_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2459. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_S], matmul_id_subgroup_iq1_s_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2460. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_M], matmul_id_subgroup_iq1_m_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2461. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS], matmul_id_subgroup_iq2_xxs_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2462. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS], matmul_id_subgroup_iq2_xs_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2463. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S], matmul_id_subgroup_iq2_s_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2464. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS], matmul_id_subgroup_iq3_xxs_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2465. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S], matmul_id_subgroup_iq3_s_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2466. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS], matmul_id_subgroup_iq4_xs_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2467. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL], matmul_id_subgroup_iq4_nl_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2468. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_MXFP4], matmul_id_subgroup_mxfp4_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2469. #undef CREATE_MM
  2470. #undef CREATE_MM2
  2471. } else
  2472. #endif // defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  2473. #if defined(VK_KHR_cooperative_matrix) && defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
  2474. if (device->coopmat_support) {
  2475. // Create 6 variants, {s,m,l}x{unaligned,aligned}
  2476. #define CREATE_MM(TYPE, PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
  2477. if (device->mul_mat ## ID ## _l[TYPE]) \
  2478. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _cm1_len, NAMELC ## F16ACC ## _cm1_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1, false, true); \
  2479. if (device->mul_mat ## ID ## _m[TYPE]) \
  2480. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _cm1_len, NAMELC ## F16ACC ## _cm1_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1, false, true); \
  2481. if (device->mul_mat ## ID ## _s[TYPE]) \
  2482. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _cm1_len, NAMELC ## F16ACC ## _cm1_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1, false, true); \
  2483. if (device->mul_mat ## ID ## _l[TYPE]) \
  2484. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _cm1_len, NAMELC ## _aligned ## F16ACC ## _cm1_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align, false, true); \
  2485. if (device->mul_mat ## ID ## _m[TYPE]) \
  2486. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _cm1_len, NAMELC ## _aligned ## F16ACC ## _cm1_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align, false, true); \
  2487. if (device->mul_mat ## ID ## _s[TYPE]) \
  2488. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _cm1_len, NAMELC ## _aligned ## F16ACC ## _cm1_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align, false, true); \
  2489. // Create 2 variants, {f16,f32} accumulator
  2490. #define CREATE_MM2(TYPE, PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
  2491. if (device->coopmat_acc_f16_support) { \
  2492. CREATE_MM(TYPE, PIPELINE_NAME . f16acc, NAMELC, _f16acc, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
  2493. } \
  2494. if (device->coopmat_acc_f32_support) { \
  2495. CREATE_MM(TYPE, PIPELINE_NAME . f32acc, NAMELC, , WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
  2496. } \
  2497. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
  2498. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
  2499. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_f16, matmul_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
  2500. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_f16_f32, matmul_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
  2501. #if defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  2502. if (device->coopmat_bf16_support) {
  2503. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_bf16, matmul_bf16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, )
  2504. }
  2505. #endif
  2506. if (device->coopmat_acc_f16_support) {
  2507. CREATE_MM2(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0], matmul_q4_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2508. CREATE_MM2(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1], matmul_q4_1_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2509. CREATE_MM2(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0], matmul_q5_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2510. CREATE_MM2(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1], matmul_q5_1_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2511. CREATE_MM2(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0], matmul_q8_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2512. CREATE_MM2(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K], matmul_q2_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2513. CREATE_MM2(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K], matmul_q3_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2514. CREATE_MM2(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K], matmul_q4_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2515. CREATE_MM2(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K], matmul_q5_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2516. CREATE_MM2(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K], matmul_q6_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2517. CREATE_MM2(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_S], matmul_iq1_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2518. CREATE_MM2(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_M], matmul_iq1_m_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2519. CREATE_MM2(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XXS], matmul_iq2_xxs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2520. CREATE_MM2(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XS], matmul_iq2_xs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2521. CREATE_MM2(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S], matmul_iq2_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2522. CREATE_MM2(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS], matmul_iq3_xxs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2523. CREATE_MM2(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S], matmul_iq3_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2524. CREATE_MM2(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_XS], matmul_iq4_xs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2525. CREATE_MM2(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL], matmul_iq4_nl_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2526. CREATE_MM2(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat[GGML_TYPE_MXFP4], matmul_mxfp4_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2527. } else {
  2528. CREATE_MM(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f32acc, matmul_q4_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2529. CREATE_MM(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f32acc, matmul_q4_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2530. CREATE_MM(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0].f32acc, matmul_q5_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2531. CREATE_MM(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1].f32acc, matmul_q5_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2532. CREATE_MM(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0].f32acc, matmul_q8_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2533. CREATE_MM(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K].f32acc, matmul_q2_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2534. CREATE_MM(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K].f32acc, matmul_q3_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2535. CREATE_MM(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f32acc, matmul_q4_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2536. CREATE_MM(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f32acc, matmul_q5_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2537. CREATE_MM(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f32acc, matmul_q6_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2538. CREATE_MM(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_S].f32acc, matmul_iq1_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2539. CREATE_MM(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_M].f32acc, matmul_iq1_m_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2540. CREATE_MM(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XXS].f32acc, matmul_iq2_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2541. CREATE_MM(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XS].f32acc, matmul_iq2_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2542. CREATE_MM(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S].f32acc, matmul_iq2_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2543. CREATE_MM(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS].f32acc, matmul_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2544. CREATE_MM(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S].f32acc, matmul_iq3_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2545. CREATE_MM(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_XS].f32acc, matmul_iq4_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2546. CREATE_MM(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f32acc, matmul_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2547. CREATE_MM(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat[GGML_TYPE_MXFP4].f32acc, matmul_mxfp4_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2548. }
  2549. GGML_ASSERT(device->subgroup_ballot);
  2550. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_id_f32, matmul_id_subgroup_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
  2551. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_id_f16, matmul_id_subgroup_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
  2552. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_id_f16_f32, matmul_id_subgroup_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
  2553. #if defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  2554. if (device->coopmat_bf16_support) {
  2555. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_id_bf16, matmul_id_subgroup_bf16, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
  2556. }
  2557. #endif
  2558. CREATE_MM2(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0], matmul_id_subgroup_q4_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2559. CREATE_MM2(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1], matmul_id_subgroup_q4_1_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2560. CREATE_MM2(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0], matmul_id_subgroup_q5_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2561. CREATE_MM2(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1], matmul_id_subgroup_q5_1_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2562. CREATE_MM2(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0], matmul_id_subgroup_q8_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2563. CREATE_MM2(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K], matmul_id_subgroup_q2_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2564. CREATE_MM2(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K], matmul_id_subgroup_q3_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2565. CREATE_MM2(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K], matmul_id_subgroup_q4_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2566. CREATE_MM2(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K], matmul_id_subgroup_q5_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2567. CREATE_MM2(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K], matmul_id_subgroup_q6_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2568. CREATE_MM2(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_S], matmul_id_subgroup_iq1_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2569. CREATE_MM2(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_M], matmul_id_subgroup_iq1_m_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2570. CREATE_MM2(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS], matmul_id_subgroup_iq2_xxs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2571. CREATE_MM2(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS], matmul_id_subgroup_iq2_xs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2572. CREATE_MM2(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S], matmul_id_subgroup_iq2_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2573. CREATE_MM2(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS], matmul_id_subgroup_iq3_xxs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2574. CREATE_MM2(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S], matmul_id_subgroup_iq3_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2575. CREATE_MM2(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS], matmul_id_subgroup_iq4_xs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2576. CREATE_MM2(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL], matmul_id_subgroup_iq4_nl_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2577. CREATE_MM2(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_MXFP4], matmul_id_subgroup_mxfp4_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2578. #undef CREATE_MM2
  2579. #undef CREATE_MM
  2580. } else
  2581. #endif // defined(VK_KHR_cooperative_matrix) && defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
  2582. if (device->fp16) {
  2583. // Create 6 variants, {s,m,l}x{unaligned,aligned}
  2584. #define CREATE_MM(TYPE, PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID, REQSUBGROUPSIZE) \
  2585. if (device->mul_mat ## ID ## _l[TYPE]) \
  2586. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2587. if (device->mul_mat ## ID ## _m[TYPE]) \
  2588. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2589. if (device->mul_mat ## ID ## _s[TYPE]) \
  2590. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2591. if (device->mul_mat ## ID ## _l[TYPE]) \
  2592. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2593. if (device->mul_mat ## ID ## _m[TYPE]) \
  2594. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2595. if (device->mul_mat ## ID ## _s[TYPE]) \
  2596. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2597. #define CREATE_MMQ(TYPE, PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID, REQSUBGROUPSIZE) \
  2598. if (device->mul_mat ## ID ## _l[TYPE]) { \
  2599. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME .f32acc->l, #NAMELC "_l", NAMELC ## _len, NAMELC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2600. } \
  2601. if (device->mul_mat ## ID ## _m[TYPE]) { \
  2602. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME .f32acc->m, #NAMELC "_m", NAMELC ## _len, NAMELC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2603. } \
  2604. if (device->mul_mat ## ID ## _s[TYPE]) { \
  2605. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME .f32acc->s, #NAMELC "_s", NAMELC ## _len, NAMELC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2606. } \
  2607. // Create 2 variants, {f16,f32} accumulator
  2608. #define CREATE_MM2(TYPE, PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID, REQSUBGROUPSIZE) \
  2609. CREATE_MM(TYPE, PIPELINE_NAME . f16acc, NAMELC, _f16acc, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID, REQSUBGROUPSIZE) \
  2610. CREATE_MM(TYPE, PIPELINE_NAME . f32acc, NAMELC, , WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID, REQSUBGROUPSIZE) \
  2611. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2612. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2613. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_f16, matmul_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2614. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_f16_f32, matmul_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2615. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_bf16, matmul_bf16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2616. CREATE_MM2(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0], matmul_q4_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2617. CREATE_MM2(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1], matmul_q4_1_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2618. CREATE_MM2(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0], matmul_q5_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2619. CREATE_MM2(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1], matmul_q5_1_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2620. CREATE_MM2(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0], matmul_q8_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2621. CREATE_MM2(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K], matmul_q2_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2622. CREATE_MM2(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K], matmul_q3_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2623. CREATE_MM2(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K], matmul_q4_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2624. CREATE_MM2(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K], matmul_q5_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2625. CREATE_MM2(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K], matmul_q6_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2626. CREATE_MM2(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_S], matmul_iq1_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2627. CREATE_MM2(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_M], matmul_iq1_m_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2628. CREATE_MM2(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XXS], matmul_iq2_xxs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2629. CREATE_MM2(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XS], matmul_iq2_xs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2630. CREATE_MM2(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S], matmul_iq2_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2631. CREATE_MM2(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS], matmul_iq3_xxs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2632. CREATE_MM2(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S], matmul_iq3_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2633. CREATE_MM2(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_XS], matmul_iq4_xs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2634. CREATE_MM2(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL], matmul_iq4_nl_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2635. CREATE_MM2(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat[GGML_TYPE_MXFP4], matmul_mxfp4_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2636. #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
  2637. if (device->integer_dot_product) {
  2638. CREATE_MMQ(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q4_0], matmul_q4_0_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, , 0);
  2639. CREATE_MMQ(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q4_1], matmul_q4_1_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, , 0);
  2640. CREATE_MMQ(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q5_0], matmul_q5_0_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, , 0);
  2641. CREATE_MMQ(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q5_1], matmul_q5_1_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, , 0);
  2642. CREATE_MMQ(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q8_0], matmul_q8_0_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, , 0);
  2643. CREATE_MMQ(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_MXFP4], matmul_mxfp4_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, , 0);
  2644. CREATE_MMQ(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q2_K], matmul_q2_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, , 0);
  2645. CREATE_MMQ(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q3_K], matmul_q3_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, , 0);
  2646. CREATE_MMQ(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q4_K], matmul_q4_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, , 0);
  2647. CREATE_MMQ(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q5_K], matmul_q5_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, , 0);
  2648. CREATE_MMQ(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q6_K], matmul_q6_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, , 0);
  2649. }
  2650. #endif
  2651. if (device->subgroup_ballot && device->subgroup_require_full_support && subgroup_min_size_16) {
  2652. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_id_f32, matmul_id_subgroup_f32_f32, , wg_denoms, warptile_id, vk_mat_mat_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2653. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_id_f16, matmul_id_subgroup_f16, wg_denoms, warptile_id, vk_mat_mat_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2654. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_id_f16_f32, matmul_id_subgroup_f16_f32, wg_denoms, warptile_id, vk_mat_mat_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2655. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_id_bf16, matmul_id_subgroup_bf16, , wg_denoms, warptile_id, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2656. CREATE_MM2(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0], matmul_id_subgroup_q4_0_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2657. CREATE_MM2(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1], matmul_id_subgroup_q4_1_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2658. CREATE_MM2(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0], matmul_id_subgroup_q5_0_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2659. CREATE_MM2(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1], matmul_id_subgroup_q5_1_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2660. CREATE_MM2(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0], matmul_id_subgroup_q8_0_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2661. CREATE_MM2(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K], matmul_id_subgroup_q2_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2662. CREATE_MM2(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K], matmul_id_subgroup_q3_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2663. CREATE_MM2(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K], matmul_id_subgroup_q4_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2664. CREATE_MM2(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K], matmul_id_subgroup_q5_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2665. CREATE_MM2(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K], matmul_id_subgroup_q6_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2666. CREATE_MM2(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_S], matmul_id_subgroup_iq1_s_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2667. CREATE_MM2(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_M], matmul_id_subgroup_iq1_m_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2668. CREATE_MM2(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS], matmul_id_subgroup_iq2_xxs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2669. CREATE_MM2(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS], matmul_id_subgroup_iq2_xs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2670. CREATE_MM2(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S], matmul_id_subgroup_iq2_s_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2671. CREATE_MM2(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS], matmul_id_subgroup_iq3_xxs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2672. CREATE_MM2(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S], matmul_id_subgroup_iq3_s_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2673. CREATE_MM2(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS], matmul_id_subgroup_iq4_xs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2674. CREATE_MM2(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL], matmul_id_subgroup_iq4_nl_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2675. CREATE_MM2(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_MXFP4], matmul_id_subgroup_mxfp4_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2676. #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
  2677. if (device->integer_dot_product) {
  2678. CREATE_MMQ(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q4_0], matmul_id_subgroup_q4_0_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2679. CREATE_MMQ(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q4_1], matmul_id_subgroup_q4_1_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2680. CREATE_MMQ(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q5_0], matmul_id_subgroup_q5_0_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2681. CREATE_MMQ(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q5_1], matmul_id_subgroup_q5_1_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2682. CREATE_MMQ(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q8_0], matmul_id_subgroup_q8_0_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2683. CREATE_MMQ(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_MXFP4], matmul_id_subgroup_mxfp4_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2684. CREATE_MMQ(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q2_K], matmul_id_subgroup_q2_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2685. CREATE_MMQ(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q3_K], matmul_id_subgroup_q3_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2686. CREATE_MMQ(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q4_K], matmul_id_subgroup_q4_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2687. CREATE_MMQ(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q5_K], matmul_id_subgroup_q5_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2688. CREATE_MMQ(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q6_K], matmul_id_subgroup_q6_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2689. }
  2690. #endif
  2691. } else {
  2692. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_id_f32, matmul_id_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id, 0);
  2693. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_id_f16, matmul_id_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id, 0);
  2694. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_id_f16_f32, matmul_id_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id, 0);
  2695. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_id_bf16, matmul_id_bf16, , wg_denoms, warptile, vk_mat_mat_id_push_constants, 4, _id, 0);
  2696. CREATE_MM2(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0], matmul_id_q4_0_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2697. CREATE_MM2(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1], matmul_id_q4_1_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2698. CREATE_MM2(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0], matmul_id_q5_0_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2699. CREATE_MM2(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1], matmul_id_q5_1_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2700. CREATE_MM2(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0], matmul_id_q8_0_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2701. CREATE_MM2(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K], matmul_id_q2_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2702. CREATE_MM2(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K], matmul_id_q3_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2703. CREATE_MM2(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K], matmul_id_q4_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2704. CREATE_MM2(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K], matmul_id_q5_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2705. CREATE_MM2(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K], matmul_id_q6_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2706. CREATE_MM2(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_S], matmul_id_iq1_s_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2707. CREATE_MM2(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_M], matmul_id_iq1_m_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2708. CREATE_MM2(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS], matmul_id_iq2_xxs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2709. CREATE_MM2(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS], matmul_id_iq2_xs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2710. CREATE_MM2(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S], matmul_id_iq2_s_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2711. CREATE_MM2(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS], matmul_id_iq3_xxs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2712. CREATE_MM2(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S], matmul_id_iq3_s_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2713. CREATE_MM2(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS], matmul_id_iq4_xs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2714. CREATE_MM2(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL], matmul_id_iq4_nl_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2715. CREATE_MM2(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_MXFP4], matmul_id_mxfp4_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2716. #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
  2717. if (device->integer_dot_product) {
  2718. CREATE_MMQ(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q4_0], matmul_id_q4_0_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, 0);
  2719. CREATE_MMQ(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q4_1], matmul_id_q4_1_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, 0);
  2720. CREATE_MMQ(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q5_0], matmul_id_q5_0_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, 0);
  2721. CREATE_MMQ(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q5_1], matmul_id_q5_1_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, 0);
  2722. CREATE_MMQ(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q8_0], matmul_id_q8_0_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, 0);
  2723. CREATE_MMQ(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_MXFP4], matmul_id_mxfp4_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, 0);
  2724. CREATE_MMQ(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q2_K], matmul_id_q2_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, 0);
  2725. CREATE_MMQ(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q3_K], matmul_id_q3_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, 0);
  2726. CREATE_MMQ(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q4_K], matmul_id_q4_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, 0);
  2727. CREATE_MMQ(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q5_K], matmul_id_q5_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, 0);
  2728. CREATE_MMQ(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q6_K], matmul_id_q6_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, 0);
  2729. }
  2730. #endif
  2731. }
  2732. #undef CREATE_MM2
  2733. #undef CREATE_MMQ
  2734. #undef CREATE_MM
  2735. } else {
  2736. // Create 6 variants, {s,m,l}x{unaligned,aligned}
  2737. #define CREATE_MM(TYPE, PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID, REQSUBGROUPSIZE) \
  2738. if (device->mul_mat ## ID ## _l[TYPE]) \
  2739. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2740. if (device->mul_mat ## ID ## _m[TYPE]) \
  2741. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2742. if (device->mul_mat ## ID ## _s[TYPE]) \
  2743. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2744. if (device->mul_mat ## ID ## _l[TYPE]) \
  2745. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2746. if (device->mul_mat ## ID ## _m[TYPE]) \
  2747. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2748. if (device->mul_mat ## ID ## _s[TYPE]) \
  2749. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2750. #define CREATE_MMQ(TYPE, PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
  2751. if (device->mul_mat ## ID ## _l[TYPE]) \
  2752. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC "_l", NAMELC ## _fp32_len, NAMELC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1); \
  2753. if (device->mul_mat ## ID ## _m[TYPE]) \
  2754. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC "_m", NAMELC ## _fp32_len, NAMELC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1); \
  2755. if (device->mul_mat ## ID ## _s[TYPE]) \
  2756. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC "_s", NAMELC ## _fp32_len, NAMELC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1); \
  2757. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2758. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2759. CREATE_MM(GGML_TYPE_F16, pipeline_matmul_f16.f32acc, matmul_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2760. CREATE_MM(GGML_TYPE_F16, pipeline_matmul_f16_f32.f32acc, matmul_f16_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2761. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_bf16, matmul_bf16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2762. CREATE_MM(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f32acc, matmul_q4_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2763. CREATE_MM(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f32acc, matmul_q4_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2764. CREATE_MM(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0].f32acc, matmul_q5_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2765. CREATE_MM(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1].f32acc, matmul_q5_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2766. CREATE_MM(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0].f32acc, matmul_q8_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2767. CREATE_MM(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K].f32acc, matmul_q2_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2768. CREATE_MM(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K].f32acc, matmul_q3_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2769. CREATE_MM(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f32acc, matmul_q4_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2770. CREATE_MM(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f32acc, matmul_q5_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2771. CREATE_MM(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f32acc, matmul_q6_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2772. CREATE_MM(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_S].f32acc, matmul_iq1_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2773. CREATE_MM(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_M].f32acc, matmul_iq1_m_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2774. CREATE_MM(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XXS].f32acc, matmul_iq2_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2775. CREATE_MM(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XS].f32acc, matmul_iq2_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2776. CREATE_MM(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S].f32acc, matmul_iq2_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2777. CREATE_MM(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS].f32acc, matmul_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2778. CREATE_MM(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S].f32acc, matmul_iq3_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2779. CREATE_MM(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_XS].f32acc, matmul_iq4_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2780. CREATE_MM(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f32acc, matmul_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2781. CREATE_MM(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat[GGML_TYPE_MXFP4].f32acc, matmul_mxfp4_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2782. #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
  2783. if (device->integer_dot_product) {
  2784. CREATE_MMQ(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q4_0].f32acc, matmul_q4_0_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, );
  2785. CREATE_MMQ(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q4_1].f32acc, matmul_q4_1_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, );
  2786. CREATE_MMQ(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q5_0].f32acc, matmul_q5_0_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, );
  2787. CREATE_MMQ(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q5_1].f32acc, matmul_q5_1_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, );
  2788. CREATE_MMQ(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q8_0].f32acc, matmul_q8_0_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, );
  2789. CREATE_MMQ(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q2_K].f32acc, matmul_q2_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, );
  2790. CREATE_MMQ(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q3_K].f32acc, matmul_q3_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, );
  2791. CREATE_MMQ(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q4_K].f32acc, matmul_q4_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, );
  2792. CREATE_MMQ(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q5_K].f32acc, matmul_q5_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, );
  2793. CREATE_MMQ(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q6_K].f32acc, matmul_q6_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, );
  2794. }
  2795. #endif
  2796. if (device->subgroup_ballot && device->subgroup_require_full_support && subgroup_min_size_16) {
  2797. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_id_f32, matmul_id_subgroup_f32_f32, , wg_denoms, warptile_id, vk_mat_mat_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2798. CREATE_MM(GGML_TYPE_F16, pipeline_matmul_id_f16.f32acc, matmul_id_subgroup_f16, , wg_denoms, warptile_id, vk_mat_mat_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2799. CREATE_MM(GGML_TYPE_F16, pipeline_matmul_id_f16_f32.f32acc, matmul_id_subgroup_f16_f32, , wg_denoms, warptile_id, vk_mat_mat_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2800. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_id_bf16, matmul_id_subgroup_bf16, , wg_denoms, warptile_id, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2801. CREATE_MM(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f32acc, matmul_id_subgroup_q4_0_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2802. CREATE_MM(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f32acc, matmul_id_subgroup_q4_1_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2803. CREATE_MM(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0].f32acc, matmul_id_subgroup_q5_0_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2804. CREATE_MM(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1].f32acc, matmul_id_subgroup_q5_1_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2805. CREATE_MM(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0].f32acc, matmul_id_subgroup_q8_0_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2806. CREATE_MM(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K].f32acc, matmul_id_subgroup_q2_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2807. CREATE_MM(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K].f32acc, matmul_id_subgroup_q3_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2808. CREATE_MM(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f32acc, matmul_id_subgroup_q4_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2809. CREATE_MM(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f32acc, matmul_id_subgroup_q5_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2810. CREATE_MM(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f32acc, matmul_id_subgroup_q6_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2811. CREATE_MM(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_S].f32acc, matmul_id_subgroup_iq1_s_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2812. CREATE_MM(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_M].f32acc, matmul_id_subgroup_iq1_m_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2813. CREATE_MM(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS].f32acc, matmul_id_subgroup_iq2_xxs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2814. CREATE_MM(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS].f32acc, matmul_id_subgroup_iq2_xs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2815. CREATE_MM(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S].f32acc, matmul_id_subgroup_iq2_s_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2816. CREATE_MM(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS].f32acc, matmul_id_subgroup_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2817. CREATE_MM(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S].f32acc, matmul_id_subgroup_iq3_s_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2818. CREATE_MM(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS].f32acc, matmul_id_subgroup_iq4_xs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2819. CREATE_MM(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f32acc, matmul_id_subgroup_iq4_nl_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2820. CREATE_MM(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_MXFP4].f32acc, matmul_id_subgroup_mxfp4_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2821. } else {
  2822. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_id_f32, matmul_id_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id, 0);
  2823. CREATE_MM(GGML_TYPE_F16, pipeline_matmul_id_f16.f32acc, matmul_id_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id, 0);
  2824. CREATE_MM(GGML_TYPE_F16, pipeline_matmul_id_f16_f32.f32acc, matmul_id_f16_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id, 0);
  2825. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_id_bf16, matmul_id_bf16, , wg_denoms, warptile, vk_mat_mat_id_push_constants, 4, _id, 0);
  2826. CREATE_MM(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f32acc, matmul_id_q4_0_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2827. CREATE_MM(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f32acc, matmul_id_q4_1_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2828. CREATE_MM(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0].f32acc, matmul_id_q5_0_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2829. CREATE_MM(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1].f32acc, matmul_id_q5_1_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2830. CREATE_MM(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0].f32acc, matmul_id_q8_0_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2831. CREATE_MM(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K].f32acc, matmul_id_q2_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2832. CREATE_MM(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K].f32acc, matmul_id_q3_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2833. CREATE_MM(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f32acc, matmul_id_q4_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2834. CREATE_MM(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f32acc, matmul_id_q5_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2835. CREATE_MM(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f32acc, matmul_id_q6_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2836. CREATE_MM(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_S].f32acc, matmul_id_iq1_s_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2837. CREATE_MM(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_M].f32acc, matmul_id_iq1_m_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2838. CREATE_MM(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS].f32acc, matmul_id_iq2_xxs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2839. CREATE_MM(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS].f32acc, matmul_id_iq2_xs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2840. CREATE_MM(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S].f32acc, matmul_id_iq2_s_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2841. CREATE_MM(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS].f32acc, matmul_id_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2842. CREATE_MM(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S].f32acc, matmul_id_iq3_s_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2843. CREATE_MM(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS].f32acc, matmul_id_iq4_xs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2844. CREATE_MM(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f32acc, matmul_id_iq4_nl_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2845. CREATE_MM(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_MXFP4].f32acc, matmul_id_mxfp4_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2846. }
  2847. }
  2848. // reusing CREATE_MM from the fp32 path
  2849. if ((device->coopmat2 || device->coopmat_support)
  2850. #if defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  2851. && !device->coopmat_bf16_support
  2852. #endif
  2853. ) {
  2854. // use scalar tile sizes
  2855. l_warptile = { 128, 128, 128, 16, subgroup_size_8 * 2, 64, 2, 4, 4, 1, subgroup_size_8 };
  2856. m_warptile = { 128, 64, 64, 16, subgroup_size_8, 32, 2, 4, 2, 1, subgroup_size_8 };
  2857. s_warptile = { subgroup_size_16, 32, 32, 16, 32, 32, 2, 2, 2, 1, subgroup_size_8 };
  2858. l_wg_denoms = {128, 128, 1 };
  2859. m_wg_denoms = { 64, 64, 1 };
  2860. s_wg_denoms = { 32, 32, 1 };
  2861. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_bf16, matmul_bf16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2862. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_id_bf16, matmul_id_bf16, , wg_denoms, warptile, vk_mat_mat_id_push_constants, 4, _id, 0);
  2863. }
  2864. #undef CREATE_MM
  2865. // mul mat vec
  2866. // the number of rows computed per shader depends on GPU model and quant
  2867. uint32_t rm_stdq = 1;
  2868. uint32_t rm_kq = 2;
  2869. if (device->vendor_id == VK_VENDOR_ID_AMD) {
  2870. if (device->architecture == AMD_GCN) {
  2871. rm_stdq = 2;
  2872. rm_kq = 4;
  2873. }
  2874. } else if (device->vendor_id == VK_VENDOR_ID_INTEL)
  2875. rm_stdq = 2;
  2876. uint32_t rm_iq = 2 * rm_kq;
  2877. const bool use_subgroups = device->subgroup_arithmetic && device->architecture != vk_device_architecture::AMD_GCN;
  2878. // Ensure a subgroup size >= 16 is available
  2879. const bool use_subgroups16 = use_subgroups && subgroup_min_size_16;
  2880. const uint32_t subgroup_size = (device->vendor_id == VK_VENDOR_ID_INTEL && device->subgroup_size_control && device->subgroup_min_size <= 16 && device->subgroup_max_size >= 16) ? 16 : device->subgroup_size;
  2881. const uint32_t subgroup_size16 = std::max(subgroup_size, 16u);
  2882. const uint32_t force_subgroup_size = use_subgroups ? subgroup_size : 0;
  2883. const uint32_t force_subgroup_size16 = use_subgroups16 ? subgroup_size16 : 0;
  2884. for (uint32_t w = 0; w < DMMV_WG_SIZE_COUNT; ++w) {
  2885. const uint32_t wg_size_subgroup = (w == DMMV_WG_SIZE_SUBGROUP) ? subgroup_size : (subgroup_size * 4);
  2886. const uint32_t wg_size_subgroup16 = (w == DMMV_WG_SIZE_SUBGROUP) ? subgroup_size16 : (subgroup_size16 * 4);
  2887. const shader_reduction_mode reduc = (use_subgroups && w == DMMV_WG_SIZE_SUBGROUP) ? SHADER_REDUCTION_MODE_SUBGROUP :
  2888. (use_subgroups && w == DMMV_WG_SIZE_LARGE) ? SHADER_REDUCTION_MODE_HYBRID :
  2889. SHADER_REDUCTION_MODE_SHMEM;
  2890. const shader_reduction_mode reduc16 = (use_subgroups16 && w == DMMV_WG_SIZE_SUBGROUP) ? SHADER_REDUCTION_MODE_SUBGROUP :
  2891. (use_subgroups16 && w == DMMV_WG_SIZE_LARGE) ? SHADER_REDUCTION_MODE_HYBRID :
  2892. SHADER_REDUCTION_MODE_SHMEM;
  2893. for (uint32_t i = 0; i < mul_mat_vec_max_cols; ++i) {
  2894. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f32_f32", arr_dmmv_f32_f32_f32_len[reduc], arr_dmmv_f32_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size);
  2895. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f32_f32", arr_dmmv_f16_f32_f32_len[reduc], arr_dmmv_f16_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size);
  2896. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_BF16][i], "mul_mat_vec_bf16_f32_f32", arr_dmmv_bf16_f32_f32_len[reduc], arr_dmmv_bf16_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size);
  2897. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_f32_f32", arr_dmmv_q4_0_f32_f32_len[reduc], arr_dmmv_q4_0_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  2898. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_f32_f32", arr_dmmv_q4_1_f32_f32_len[reduc], arr_dmmv_q4_1_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  2899. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_f32_f32", arr_dmmv_q5_0_f32_f32_len[reduc], arr_dmmv_q5_0_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  2900. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_f32_f32", arr_dmmv_q5_1_f32_f32_len[reduc], arr_dmmv_q5_1_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  2901. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_f32_f32", arr_dmmv_q8_0_f32_f32_len[reduc], arr_dmmv_q8_0_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {wg_size_subgroup, 1*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  2902. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q2_K][i], "mul_mat_vec_q2_k_f32_f32", arr_dmmv_q2_k_f32_f32_len[reduc16], arr_dmmv_q2_k_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2903. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q3_K][i], "mul_mat_vec_q3_k_f32_f32", arr_dmmv_q3_k_f32_f32_len[reduc16], arr_dmmv_q3_k_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2904. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f32_f32", arr_dmmv_q4_k_f32_f32_len[reduc16], arr_dmmv_q4_k_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2905. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f32_f32", arr_dmmv_q5_k_f32_f32_len[reduc16], arr_dmmv_q5_k_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2906. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f32_f32", arr_dmmv_q6_k_f32_f32_len[reduc16], arr_dmmv_q6_k_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2907. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ1_S][i], "mul_mat_vec_iq1_s_f32_f32", arr_dmmv_iq1_s_f32_f32_len[reduc16], arr_dmmv_iq1_s_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2908. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ1_M][i], "mul_mat_vec_iq1_m_f32_f32", arr_dmmv_iq1_m_f32_f32_len[reduc16], arr_dmmv_iq1_m_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2909. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ2_XXS][i], "mul_mat_vec_iq2_xxs_f32_f32", arr_dmmv_iq2_xxs_f32_f32_len[reduc16], arr_dmmv_iq2_xxs_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2910. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ2_XS][i], "mul_mat_vec_iq2_xs_f32_f32", arr_dmmv_iq2_xs_f32_f32_len[reduc16], arr_dmmv_iq2_xs_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2911. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ2_S][i], "mul_mat_vec_iq2_s_f32_f32", arr_dmmv_iq2_s_f32_f32_len[reduc16], arr_dmmv_iq2_s_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2912. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ3_XXS][i], "mul_mat_vec_iq3_xxs_f32_f32", arr_dmmv_iq3_xxs_f32_f32_len[reduc16], arr_dmmv_iq3_xxs_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2913. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ3_S][i], "mul_mat_vec_iq3_s_f32_f32", arr_dmmv_iq3_s_f32_f32_len[reduc16], arr_dmmv_iq3_s_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2914. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ4_XS][i], "mul_mat_vec_iq4_xs_f32_f32", arr_dmmv_iq4_xs_f32_f32_len[reduc16], arr_dmmv_iq4_xs_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2915. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f32_f32", arr_dmmv_iq4_nl_f32_f32_len[reduc16], arr_dmmv_iq4_nl_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2916. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_MXFP4][i], "mul_mat_vec_mxfp4_f32_f32", arr_dmmv_mxfp4_f32_f32_len[reduc16], arr_dmmv_mxfp4_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2917. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f16_f32", arr_dmmv_f32_f16_f32_len[reduc], arr_dmmv_f32_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size);
  2918. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f16_f32", arr_dmmv_f16_f16_f32_len[reduc], arr_dmmv_f16_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size);
  2919. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_BF16][i], "mul_mat_vec_bf16_f16_f32", arr_dmmv_bf16_f16_f32_len[reduc], arr_dmmv_bf16_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size);
  2920. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_f16_f32", arr_dmmv_q4_0_f16_f32_len[reduc], arr_dmmv_q4_0_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  2921. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_f16_f32", arr_dmmv_q4_1_f16_f32_len[reduc], arr_dmmv_q4_1_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  2922. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_f16_f32", arr_dmmv_q5_0_f16_f32_len[reduc], arr_dmmv_q5_0_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  2923. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_f16_f32", arr_dmmv_q5_1_f16_f32_len[reduc], arr_dmmv_q5_1_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  2924. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_f16_f32", arr_dmmv_q8_0_f16_f32_len[reduc], arr_dmmv_q8_0_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {wg_size_subgroup, 1*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  2925. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q2_K][i], "mul_mat_vec_q2_k_f16_f32", arr_dmmv_q2_k_f16_f32_len[reduc16], arr_dmmv_q2_k_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2926. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q3_K][i], "mul_mat_vec_q3_k_f16_f32", arr_dmmv_q3_k_f16_f32_len[reduc16], arr_dmmv_q3_k_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2927. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f16_f32", arr_dmmv_q4_k_f16_f32_len[reduc16], arr_dmmv_q4_k_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2928. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f16_f32", arr_dmmv_q5_k_f16_f32_len[reduc16], arr_dmmv_q5_k_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2929. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f16_f32", arr_dmmv_q6_k_f16_f32_len[reduc16], arr_dmmv_q6_k_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2930. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ1_S][i], "mul_mat_vec_iq1_s_f16_f32", arr_dmmv_iq1_s_f16_f32_len[reduc16], arr_dmmv_iq1_s_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2931. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ1_M][i], "mul_mat_vec_iq1_m_f16_f32", arr_dmmv_iq1_m_f16_f32_len[reduc16], arr_dmmv_iq1_m_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2932. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ2_XXS][i], "mul_mat_vec_iq2_xxs_f16_f32", arr_dmmv_iq2_xxs_f16_f32_len[reduc16], arr_dmmv_iq2_xxs_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2933. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ2_XS][i], "mul_mat_vec_iq2_xs_f16_f32", arr_dmmv_iq2_xs_f16_f32_len[reduc16], arr_dmmv_iq2_xs_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2934. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ2_S][i], "mul_mat_vec_iq2_s_f16_f32", arr_dmmv_iq2_s_f16_f32_len[reduc16], arr_dmmv_iq2_s_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2935. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ3_XXS][i], "mul_mat_vec_iq3_xxs_f16_f32", arr_dmmv_iq3_xxs_f16_f32_len[reduc16], arr_dmmv_iq3_xxs_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2936. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ3_S][i], "mul_mat_vec_iq3_s_f16_f32", arr_dmmv_iq3_s_f16_f32_len[reduc16], arr_dmmv_iq3_s_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2937. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ4_XS][i], "mul_mat_vec_iq4_xs_f16_f32", arr_dmmv_iq4_xs_f16_f32_len[reduc16], arr_dmmv_iq4_xs_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2938. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f16_f32", arr_dmmv_iq4_nl_f16_f32_len[reduc16], arr_dmmv_iq4_nl_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2939. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_MXFP4][i], "mul_mat_vec_mxfp4_f16_f32", arr_dmmv_mxfp4_f16_f32_len[reduc16], arr_dmmv_mxfp4_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  2940. #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
  2941. if (device->integer_dot_product) {
  2942. const uint32_t subgroup_size_int = (device->vendor_id == VK_VENDOR_ID_INTEL && device->subgroup_size_control) ? device->subgroup_min_size : device->subgroup_size;
  2943. const uint32_t wg_size_subgroup_int = (w == DMMV_WG_SIZE_SUBGROUP) ? subgroup_size_int : (subgroup_size_int * 4);
  2944. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_q8_1_f32", arr_dmmv_q4_0_q8_1_f32_len[reduc], arr_dmmv_q4_0_q8_1_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int);
  2945. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_q8_1_f32", arr_dmmv_q4_1_q8_1_f32_len[reduc], arr_dmmv_q4_1_q8_1_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int);
  2946. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_q8_1_f32", arr_dmmv_q5_0_q8_1_f32_len[reduc], arr_dmmv_q5_0_q8_1_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int);
  2947. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_q8_1_f32", arr_dmmv_q5_1_q8_1_f32_len[reduc], arr_dmmv_q5_1_q8_1_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int);
  2948. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_q8_1_f32", arr_dmmv_q8_0_q8_1_f32_len[reduc], arr_dmmv_q8_0_q8_1_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {wg_size_subgroup_int, 1*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int);
  2949. }
  2950. #endif // GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT
  2951. }
  2952. }
  2953. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F32 ], "mul_mat_vec_id_f32_f32", mul_mat_vec_id_f32_f32_len, mul_mat_vec_id_f32_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1);
  2954. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F16 ], "mul_mat_vec_id_f16_f32", mul_mat_vec_id_f16_f32_len, mul_mat_vec_id_f16_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1);
  2955. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_BF16], "mul_mat_vec_id_bf16_f32", mul_mat_vec_id_bf16_f32_len, mul_mat_vec_id_bf16_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1);
  2956. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_0], "mul_mat_vec_id_q4_0_f32", mul_mat_vec_id_q4_0_f32_len, mul_mat_vec_id_q4_0_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true);
  2957. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_1], "mul_mat_vec_id_q4_1_f32", mul_mat_vec_id_q4_1_f32_len, mul_mat_vec_id_q4_1_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true);
  2958. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_0], "mul_mat_vec_id_q5_0_f32", mul_mat_vec_id_q5_0_f32_len, mul_mat_vec_id_q5_0_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true);
  2959. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_1], "mul_mat_vec_id_q5_1_f32", mul_mat_vec_id_q5_1_f32_len, mul_mat_vec_id_q5_1_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true);
  2960. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q8_0], "mul_mat_vec_id_q8_0_f32", mul_mat_vec_id_q8_0_f32_len, mul_mat_vec_id_q8_0_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq}, 1, true);
  2961. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q2_K], "mul_mat_vec_id_q2_k_f32", mul_mat_vec_id_q2_k_f32_len, mul_mat_vec_id_q2_k_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
  2962. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q3_K], "mul_mat_vec_id_q3_k_f32", mul_mat_vec_id_q3_k_f32_len, mul_mat_vec_id_q3_k_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
  2963. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_K], "mul_mat_vec_id_q4_k_f32", mul_mat_vec_id_q4_k_f32_len, mul_mat_vec_id_q4_k_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
  2964. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_K], "mul_mat_vec_id_q5_k_f32", mul_mat_vec_id_q5_k_f32_len, mul_mat_vec_id_q5_k_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
  2965. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q6_K], "mul_mat_vec_id_q6_k_f32", mul_mat_vec_id_q6_k_f32_len, mul_mat_vec_id_q6_k_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
  2966. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ1_S], "mul_mat_vec_id_iq1_s_f32", mul_mat_vec_id_iq1_s_f32_len, mul_mat_vec_id_iq1_s_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  2967. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ1_M], "mul_mat_vec_id_iq1_m_f32", mul_mat_vec_id_iq1_m_f32_len, mul_mat_vec_id_iq1_m_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  2968. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_XXS], "mul_mat_vec_id_iq2_xxs_f32", mul_mat_vec_id_iq2_xxs_f32_len, mul_mat_vec_id_iq2_xxs_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  2969. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_XS], "mul_mat_vec_id_iq2_xs_f32", mul_mat_vec_id_iq2_xs_f32_len, mul_mat_vec_id_iq2_xs_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  2970. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_S], "mul_mat_vec_id_iq2_s_f32", mul_mat_vec_id_iq2_s_f32_len, mul_mat_vec_id_iq2_s_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  2971. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ3_XXS], "mul_mat_vec_id_iq3_xxs_f32", mul_mat_vec_id_iq3_xxs_f32_len, mul_mat_vec_id_iq3_xxs_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  2972. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ3_S], "mul_mat_vec_id_iq3_s_f32", mul_mat_vec_id_iq3_s_f32_len, mul_mat_vec_id_iq3_s_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  2973. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_XS], "mul_mat_vec_id_iq4_xs_f32", mul_mat_vec_id_iq4_xs_f32_len, mul_mat_vec_id_iq4_xs_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  2974. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_id_iq4_nl_f32", mul_mat_vec_id_iq4_nl_f32_len, mul_mat_vec_id_iq4_nl_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  2975. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_MXFP4], "mul_mat_vec_id_mxfp4_f32", mul_mat_vec_id_mxfp4_f32_len, mul_mat_vec_id_mxfp4_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  2976. // dequant shaders
  2977. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_F32 ], "f32_to_f16", dequant_f32_len, dequant_f32_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  2978. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_0], "dequant_q4_0", dequant_q4_0_len, dequant_q4_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  2979. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_1], "dequant_q4_1", dequant_q4_1_len, dequant_q4_1_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  2980. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_0], "dequant_q5_0", dequant_q5_0_len, dequant_q5_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  2981. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_1], "dequant_q5_1", dequant_q5_1_len, dequant_q5_1_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  2982. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q8_0], "dequant_q8_0", dequant_q8_0_len, dequant_q8_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  2983. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q2_K], "dequant_q2_k", dequant_q2_k_len, dequant_q2_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  2984. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q3_K], "dequant_q3_k", dequant_q3_k_len, dequant_q3_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  2985. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_K], "dequant_q4_k", dequant_q4_k_len, dequant_q4_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  2986. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_K], "dequant_q5_k", dequant_q5_k_len, dequant_q5_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  2987. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q6_K], "dequant_q6_k", dequant_q6_k_len, dequant_q6_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  2988. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ1_S], "dequant_iq1_s", dequant_iq1_s_len, dequant_iq1_s_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  2989. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ1_M], "dequant_iq1_m", dequant_iq1_m_len, dequant_iq1_m_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  2990. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ2_XXS], "dequant_iq2_xxs", dequant_iq2_xxs_len, dequant_iq2_xxs_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  2991. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ2_XS], "dequant_iq2_xs", dequant_iq2_xs_len, dequant_iq2_xs_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  2992. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ2_S], "dequant_iq2_s", dequant_iq2_s_len, dequant_iq2_s_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  2993. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ3_XXS], "dequant_iq3_xxs", dequant_iq3_xxs_len, dequant_iq3_xxs_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  2994. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ3_S], "dequant_iq3_s", dequant_iq3_s_len, dequant_iq3_s_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  2995. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ4_XS], "dequant_iq4_xs", dequant_iq4_xs_len, dequant_iq4_xs_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  2996. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ4_NL], "dequant_iq4_nl", dequant_iq4_nl_len, dequant_iq4_nl_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  2997. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_MXFP4], "dequant_mxfp4", dequant_mxfp4_len, dequant_mxfp4_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  2998. // get_rows
  2999. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_F32 ], "get_rows_f32", get_rows_f32_len, get_rows_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  3000. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_F16 ], "get_rows_f16", get_rows_f16_len, get_rows_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  3001. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_BF16], "get_rows_bf16", get_rows_bf16_len, get_rows_bf16_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  3002. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q4_0], "get_rows_q4_0", get_rows_q4_0_len, get_rows_q4_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3003. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q4_1], "get_rows_q4_1", get_rows_q4_1_len, get_rows_q4_1_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3004. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q5_0], "get_rows_q5_0", get_rows_q5_0_len, get_rows_q5_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3005. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q5_1], "get_rows_q5_1", get_rows_q5_1_len, get_rows_q5_1_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3006. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q8_0], "get_rows_q8_0", get_rows_q8_0_len, get_rows_q8_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3007. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q2_K], "get_rows_q2_k", get_rows_q2_k_len, get_rows_q2_k_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3008. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q3_K], "get_rows_q3_k", get_rows_q3_k_len, get_rows_q3_k_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3009. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q4_K], "get_rows_q4_k", get_rows_q4_k_len, get_rows_q4_k_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3010. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q5_K], "get_rows_q5_k", get_rows_q5_k_len, get_rows_q5_k_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3011. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q6_K], "get_rows_q6_k", get_rows_q6_k_len, get_rows_q6_k_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3012. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ1_S], "get_rows_iq1_s", get_rows_iq1_s_len, get_rows_iq1_s_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3013. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ1_M], "get_rows_iq1_m", get_rows_iq1_m_len, get_rows_iq1_m_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3014. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ2_XXS], "get_rows_iq2_xxs", get_rows_iq2_xxs_len, get_rows_iq2_xxs_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3015. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ2_XS], "get_rows_iq2_xs", get_rows_iq2_xs_len, get_rows_iq2_xs_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3016. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ2_S], "get_rows_iq2_s", get_rows_iq2_s_len, get_rows_iq2_s_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3017. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ3_XXS], "get_rows_iq3_xxs", get_rows_iq3_xxs_len, get_rows_iq3_xxs_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3018. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ3_S], "get_rows_iq3_s", get_rows_iq3_s_len, get_rows_iq3_s_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3019. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ4_XS], "get_rows_iq4_xs", get_rows_iq4_xs_len, get_rows_iq4_xs_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3020. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl", get_rows_iq4_nl_len, get_rows_iq4_nl_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3021. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_MXFP4], "get_rows_mxfp4", get_rows_mxfp4_len, get_rows_mxfp4_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3022. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_F32 ], "get_rows_f32_f32", get_rows_f32_f32_len, get_rows_f32_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  3023. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_F16 ], "get_rows_f16_f32", get_rows_f16_f32_len, get_rows_f16_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  3024. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_BF16], "get_rows_bf16_f32", get_rows_bf16_f32_len, get_rows_bf16_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  3025. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q4_0], "get_rows_q4_0_f32", get_rows_q4_0_f32_len, get_rows_q4_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3026. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q4_1], "get_rows_q4_1_f32", get_rows_q4_1_f32_len, get_rows_q4_1_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3027. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q5_0], "get_rows_q5_0_f32", get_rows_q5_0_f32_len, get_rows_q5_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3028. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q5_1], "get_rows_q5_1_f32", get_rows_q5_1_f32_len, get_rows_q5_1_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3029. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q8_0], "get_rows_q8_0_f32", get_rows_q8_0_f32_len, get_rows_q8_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3030. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q2_K], "get_rows_q2_k_f32", get_rows_q2_k_f32_len, get_rows_q2_k_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3031. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q3_K], "get_rows_q3_k_f32", get_rows_q3_k_f32_len, get_rows_q3_k_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3032. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q4_K], "get_rows_q4_k_f32", get_rows_q4_k_f32_len, get_rows_q4_k_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3033. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q5_K], "get_rows_q5_k_f32", get_rows_q5_k_f32_len, get_rows_q5_k_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3034. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q6_K], "get_rows_q6_k_f32", get_rows_q6_k_f32_len, get_rows_q6_k_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3035. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ1_S], "get_rows_iq1_s_f32", get_rows_iq1_s_f32_len, get_rows_iq1_s_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3036. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ1_M], "get_rows_iq1_m_f32", get_rows_iq1_m_f32_len, get_rows_iq1_m_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3037. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ2_XXS], "get_rows_iq2_xxs_f32", get_rows_iq2_xxs_f32_len, get_rows_iq2_xxs_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3038. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ2_XS], "get_rows_iq2_xs_f32", get_rows_iq2_xs_f32_len, get_rows_iq2_xs_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3039. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ2_S], "get_rows_iq2_s_f32", get_rows_iq2_s_f32_len, get_rows_iq2_s_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3040. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ3_XXS], "get_rows_iq3_xxs_f32", get_rows_iq3_xxs_f32_len, get_rows_iq3_xxs_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3041. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ3_S], "get_rows_iq3_s_f32", get_rows_iq3_s_f32_len, get_rows_iq3_s_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3042. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ4_XS], "get_rows_iq4_xs_f32", get_rows_iq4_xs_f32_len, get_rows_iq4_xs_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3043. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl_f32", get_rows_iq4_nl_f32_len, get_rows_iq4_nl_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3044. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_MXFP4], "get_rows_mxfp4_f32", get_rows_mxfp4_f32_len, get_rows_mxfp4_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3045. ggml_vk_create_pipeline(device, device->pipeline_matmul_split_k_reduce, "split_k_reduce", split_k_reduce_len, split_k_reduce_data, "main", 2, 2 * sizeof(uint32_t), {256 * 4, 1, 1}, {}, 1);
  3046. ggml_vk_create_pipeline(device, device->pipeline_flash_attn_split_k_reduce, "fa_split_k_reduce", fa_split_k_reduce_len, fa_split_k_reduce_data, "main", 3, 5 * sizeof(uint32_t), {1, device->subgroup_size, 1}, {device->subgroup_size}, 1, true);
  3047. if (device->subgroup_clustered && device->subgroup_require_full_support) {
  3048. ggml_vk_create_pipeline(device, device->pipeline_quantize_q8_1, "quantize_q8_1", quantize_q8_1_subgroup_len, quantize_q8_1_subgroup_data, "main", 2, 1 * sizeof(uint32_t), {32 * device->subgroup_size / 8, 1, 1}, { device->subgroup_size }, 1, true, true);
  3049. ggml_vk_create_pipeline(device, device->pipeline_quantize_q8_1_x4, "quantize_q8_1_x4", quantize_q8_1_x4_subgroup_len, quantize_q8_1_x4_subgroup_data, "main", 2, 1 * sizeof(uint32_t), {32 * device->subgroup_size / 8, 1, 1}, { device->subgroup_size }, 1, true, true);
  3050. } else {
  3051. ggml_vk_create_pipeline(device, device->pipeline_quantize_q8_1, "quantize_q8_1", quantize_q8_1_len, quantize_q8_1_data, "main", 2, 1 * sizeof(uint32_t), {32 * device->subgroup_size / 8, 1, 1}, { device->subgroup_size }, 1);
  3052. ggml_vk_create_pipeline(device, device->pipeline_quantize_q8_1_x4, "quantize_q8_1_x4", quantize_q8_1_x4_len, quantize_q8_1_x4_data, "main", 2, 1 * sizeof(uint32_t), {32 * device->subgroup_size / 8, 1, 1}, { device->subgroup_size }, 1);
  3053. }
  3054. for (uint32_t i = 0; i < p021_max_gqa_ratio; ++i) {
  3055. if (device->subgroup_arithmetic && device->subgroup_require_full_support) {
  3056. ggml_vk_create_pipeline2(device, device->pipeline_mul_mat_vec_p021_f16_f32[i], "mul_mat_vec_p021_f16_f32"+std::to_string(i+1), mul_mat_vec_p021_f16_f32_subgroup_add_len, mul_mat_vec_p021_f16_f32_subgroup_add_data, "main", 4, 7 * sizeof(uint32_t), {1, 1, 1}, {device->subgroup_size, i + 1}, 1, true, true);
  3057. } else {
  3058. ggml_vk_create_pipeline2(device, device->pipeline_mul_mat_vec_p021_f16_f32[i], "mul_mat_vec_p021_f16_f32"+std::to_string(i+1), mul_mat_vec_p021_f16_f32_len, mul_mat_vec_p021_f16_f32_data, "main", 4, 7 * sizeof(uint32_t), {1, 1, 1}, {device->subgroup_size, i + 1}, 1, true);
  3059. }
  3060. }
  3061. ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_nc_f16_f32, "mul_mat_vec_nc_f16_f32", mul_mat_vec_nc_f16_f32_len, mul_mat_vec_nc_f16_f32_data, "main", 4, 13 * sizeof(uint32_t), {1, 1, 1}, {}, 1);
  3062. ggml_vk_create_pipeline(device, device->pipeline_norm_f32, "norm_f32", norm_f32_len, norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  3063. ggml_vk_create_pipeline(device, device->pipeline_group_norm_f32, "group_norm_f32", group_norm_f32_len, group_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  3064. ggml_vk_create_pipeline(device, device->pipeline_rms_norm_f32, "rms_norm_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 4, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {0, 0}, 1, true);
  3065. ggml_vk_create_pipeline(device, device->pipeline_rms_norm_mul_f32, "rms_norm_mul_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 4, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {0, 1}, 1, true);
  3066. ggml_vk_create_pipeline(device, device->pipeline_rms_norm_partials_f32, "rms_norm_partials_f32", rms_norm_partials_f32_len, rms_norm_partials_f32_data, "main", 4, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {0, 0}, 1, true);
  3067. ggml_vk_create_pipeline(device, device->pipeline_rms_norm_mul_partials_f32, "rms_norm_mul_partials_f32", rms_norm_partials_f32_len, rms_norm_partials_f32_data, "main", 4, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {0, 1}, 1, true);
  3068. ggml_vk_create_pipeline(device, device->pipeline_rms_norm_back_f32, "rms_norm_back_f32", rms_norm_back_f32_len, rms_norm_back_f32_data, "main", 3, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  3069. ggml_vk_create_pipeline(device, device->pipeline_l2_norm_f32, "l2_norm_f32", l2_norm_f32_len, l2_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  3070. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_f32, "cpy_f32_f32", cpy_f32_f32_len, cpy_f32_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3071. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_f16, "cpy_f32_f16", cpy_f32_f16_len, cpy_f32_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3072. ggml_vk_create_pipeline(device, device->pipeline_cpy_f16_f16, "cpy_f16_f16", cpy_f16_f16_len, cpy_f16_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3073. ggml_vk_create_pipeline(device, device->pipeline_cpy_f16_f32, "cpy_f16_f32", cpy_f16_f32_len, cpy_f16_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3074. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_bf16,"cpy_f32_bf16",cpy_f32_bf16_len,cpy_f32_bf16_data,"main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3075. ggml_vk_create_pipeline(device, device->pipeline_cpy_i32_f32, "cpy_i32_f32", cpy_i32_f32_len, cpy_i32_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3076. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_i32, "cpy_f32_i32", cpy_f32_i32_len, cpy_f32_i32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3077. ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f32_f32, "contig_cpy_f32_f32", contig_cpy_f32_f32_len, contig_cpy_f32_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3078. ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f32_f16, "contig_cpy_f32_f16", contig_cpy_f32_f16_len, contig_cpy_f32_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3079. ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f16_f16, "contig_cpy_f16_f16", contig_cpy_f16_f16_len, contig_cpy_f16_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3080. ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f16_f32, "contig_cpy_f16_f32", contig_cpy_f16_f32_len, contig_cpy_f16_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3081. ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f32_bf16,"contig_cpy_f32_bf16",contig_cpy_f32_bf16_len,contig_cpy_f32_bf16_data,"main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3082. ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_i32_f32, "contig_cpy_i32_f32", contig_cpy_i32_f32_len, contig_cpy_i32_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3083. ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f32_i32, "contig_cpy_f32_i32", contig_cpy_f32_i32_len, contig_cpy_f32_i32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3084. if (device->float_controls_rte_fp16) {
  3085. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q4_0], "cpy_f32_q4_0", cpy_f32_q4_0_rte_len, cpy_f32_q4_0_rte_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3086. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q4_1], "cpy_f32_q4_1", cpy_f32_q4_1_rte_len, cpy_f32_q4_1_rte_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3087. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q5_0], "cpy_f32_q5_0", cpy_f32_q5_0_rte_len, cpy_f32_q5_0_rte_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3088. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q5_1], "cpy_f32_q5_1", cpy_f32_q5_1_rte_len, cpy_f32_q5_1_rte_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3089. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q8_0], "cpy_f32_q8_0", cpy_f32_q8_0_rte_len, cpy_f32_q8_0_rte_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3090. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_IQ4_NL], "cpy_f32_iq4_nl", cpy_f32_iq4_nl_rte_len, cpy_f32_iq4_nl_rte_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3091. } else {
  3092. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q4_0], "cpy_f32_q4_0", cpy_f32_q4_0_len, cpy_f32_q4_0_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3093. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q4_1], "cpy_f32_q4_1", cpy_f32_q4_1_len, cpy_f32_q4_1_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3094. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q5_0], "cpy_f32_q5_0", cpy_f32_q5_0_len, cpy_f32_q5_0_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3095. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q5_1], "cpy_f32_q5_1", cpy_f32_q5_1_len, cpy_f32_q5_1_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3096. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q8_0], "cpy_f32_q8_0", cpy_f32_q8_0_len, cpy_f32_q8_0_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3097. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_IQ4_NL], "cpy_f32_iq4_nl", cpy_f32_iq4_nl_len, cpy_f32_iq4_nl_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3098. }
  3099. #define SET_ROWS(itype, rte) \
  3100. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_F32], "set_rows_f32" #itype, set_rows_f32 ## itype ## rte ## _len, set_rows_f32 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3101. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_F16], "set_rows_f16" #itype, set_rows_f16 ## itype ## rte ## _len, set_rows_f16 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3102. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_BF16], "set_rows_bf16" #itype, set_rows_bf16 ## itype ## rte ## _len, set_rows_bf16 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3103. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_Q4_0], "set_rows_q4_0" #itype, set_rows_q4_0 ## itype ## rte ## _len, set_rows_q4_0 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3104. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_Q4_1], "set_rows_q4_1" #itype, set_rows_q4_1 ## itype ## rte ## _len, set_rows_q4_1 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3105. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_Q5_0], "set_rows_q5_0" #itype, set_rows_q5_0 ## itype ## rte ## _len, set_rows_q5_0 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3106. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_Q5_1], "set_rows_q5_1" #itype, set_rows_q5_1 ## itype ## rte ## _len, set_rows_q5_1 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3107. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_Q8_0], "set_rows_q8_0" #itype, set_rows_q8_0 ## itype ## rte ## _len, set_rows_q8_0 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3108. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_IQ4_NL], "set_rows_iq4_nl" #itype, set_rows_iq4_nl ## itype ## rte ## _len, set_rows_iq4_nl ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true);
  3109. if (device->float_controls_rte_fp16) {
  3110. SET_ROWS(_i32, _rte)
  3111. SET_ROWS(_i64, _rte)
  3112. } else {
  3113. SET_ROWS(_i32, )
  3114. SET_ROWS(_i64, )
  3115. }
  3116. #undef SET_ROWS
  3117. ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q4_0], "cpy_q4_0_f32", cpy_q4_0_f32_len, cpy_q4_0_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q4_0), 1, 1}, {}, 1);
  3118. ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q4_1], "cpy_q4_1_f32", cpy_q4_1_f32_len, cpy_q4_1_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q4_1), 1, 1}, {}, 1);
  3119. ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q5_0], "cpy_q5_0_f32", cpy_q5_0_f32_len, cpy_q5_0_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q5_0), 1, 1}, {}, 1);
  3120. ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q5_1], "cpy_q5_1_f32", cpy_q5_1_f32_len, cpy_q5_1_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q5_1), 1, 1}, {}, 1);
  3121. ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q8_0], "cpy_q8_0_f32", cpy_q8_0_f32_len, cpy_q8_0_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q8_0), 1, 1}, {}, 1);
  3122. ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_IQ4_NL], "cpy_iq4_nl_f32", cpy_iq4_nl_f32_len, cpy_iq4_nl_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_IQ4_NL), 1, 1}, {}, 1);
  3123. auto get_suffix = [](bool src0_f16, bool src1_f16, bool dst_f16) {
  3124. std::string s;
  3125. s += std::string(src0_f16 ? "_f16" : "_f32");
  3126. s += std::string(src1_f16 ? "_f16" : "_f32");
  3127. s += std::string(dst_f16 ? "_f16" : "_f32");
  3128. return s;
  3129. };
  3130. bool rte = device->float_controls_rte_fp16;
  3131. #define CREATE_BINARY(name, namemod, spec, bindings) \
  3132. for (int s0 : {0,1}) for (int s1 : {0,1}) for (int d : {0,1}) \
  3133. ggml_vk_create_pipeline2(device, device->pipeline_ ## name ## namemod[s0][s1][d], \
  3134. #name + get_suffix(s0, s1, d) + #namemod, name ## _len[s0][s1][d][rte], name ## _data[s0][s1][d][rte], \
  3135. "main", (bindings), sizeof(vk_op_binary_push_constants), {512, 1, 1}, spec, 1);
  3136. CREATE_BINARY(add, , {0}, 4)
  3137. CREATE_BINARY(add, _norepeat, {1}, 4)
  3138. CREATE_BINARY(sub, , {0}, 3)
  3139. CREATE_BINARY(sub, _norepeat, {1}, 3)
  3140. CREATE_BINARY(mul, , {0}, 3)
  3141. CREATE_BINARY(mul, _norepeat, {1}, 3)
  3142. CREATE_BINARY(div, , {0}, 3)
  3143. CREATE_BINARY(div, _norepeat, {1}, 3)
  3144. CREATE_BINARY(add_rms, , {0}, 4)
  3145. CREATE_BINARY(add_rms, _norepeat, {1}, 4)
  3146. #undef CREATE_BINARY
  3147. if (device->multi_add) {
  3148. for (uint32_t i = 0; i < MAX_FUSED_ADDS; ++i) {
  3149. ggml_vk_create_pipeline2(device, device->pipeline_multi_add[i], "multi_add_f32_" + std::to_string(i+1), multi_add_f32_len, multi_add_f32_data, "main", MAX_PARAMETER_COUNT, sizeof(vk_op_multi_add_push_constants), {512, 1, 1}, {i+2}, 1);
  3150. ggml_vk_create_pipeline2(device, device->pipeline_multi_add_rms[i], "multi_add_rms_f32_" + std::to_string(i+1), multi_add_rms_f32_len, multi_add_rms_f32_data, "main", MAX_PARAMETER_COUNT, sizeof(vk_op_multi_add_push_constants), {512, 1, 1}, {i+2}, 1);
  3151. }
  3152. }
  3153. ggml_vk_create_pipeline(device, device->pipeline_add_id_f32, "add_id_f32", add_id_f32_len, add_id_f32_data, "main", 4, sizeof(vk_op_add_id_push_constants), {1, 1, 1}, {}, 1);
  3154. ggml_vk_create_pipeline(device, device->pipeline_acc_f32, "acc_f32", acc_f32_len, acc_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  3155. ggml_vk_create_pipeline(device, device->pipeline_concat_f32, "concat_f32", concat_f32_len, concat_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  3156. ggml_vk_create_pipeline(device, device->pipeline_concat_f16, "concat_f16", concat_f16_len, concat_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  3157. ggml_vk_create_pipeline(device, device->pipeline_concat_i32, "concat_i32", concat_i32_len, concat_i32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  3158. ggml_vk_create_pipeline(device, device->pipeline_upscale_nearest_f32, "upscale_f32", upscale_f32_len, upscale_f32_data, "main", 2, sizeof(vk_op_upscale_push_constants), {512, 1, 1}, {GGML_SCALE_MODE_NEAREST}, 1);
  3159. ggml_vk_create_pipeline(device, device->pipeline_upscale_bilinear_f32, "upscale_f32", upscale_f32_len, upscale_f32_data, "main", 2, sizeof(vk_op_upscale_push_constants), {512, 1, 1}, {GGML_SCALE_MODE_BILINEAR}, 1);
  3160. ggml_vk_create_pipeline(device, device->pipeline_scale_f32, "scale_f32", scale_f32_len, scale_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3161. ggml_vk_create_pipeline(device, device->pipeline_sqr_f32, "sqr_f32", sqr_f32_len, sqr_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3162. ggml_vk_create_pipeline(device, device->pipeline_sqrt_f32, "sqrt_f32", sqrt_f32_len, sqrt_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3163. ggml_vk_create_pipeline(device, device->pipeline_sin_f32, "sin_f32", sin_f32_len, sin_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3164. ggml_vk_create_pipeline(device, device->pipeline_cos_f32, "cos_f32", cos_f32_len, cos_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3165. ggml_vk_create_pipeline(device, device->pipeline_clamp_f32, "clamp_f32", clamp_f32_len, clamp_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3166. ggml_vk_create_pipeline(device, device->pipeline_pad_f32, "pad_f32", pad_f32_len, pad_f32_data, "main", 2, sizeof(vk_op_pad_push_constants), {512, 1, 1}, {}, 1);
  3167. ggml_vk_create_pipeline(device, device->pipeline_roll_f32, "roll_f32", roll_f32_len, roll_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3168. ggml_vk_create_pipeline(device, device->pipeline_repeat_f32, "repeat_f32", repeat_f32_len, repeat_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3169. ggml_vk_create_pipeline(device, device->pipeline_repeat_back_f32, "repeat_back_f32", repeat_back_f32_len, repeat_back_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3170. #define CREATE_UNARY(name) \
  3171. ggml_vk_create_pipeline(device, device->pipeline_ ## name [0], #name "_f32", name ## _f32_len, name ## _f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); \
  3172. ggml_vk_create_pipeline(device, device->pipeline_ ## name [1], #name "_f16", name ## _f16_len, name ## _f16_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  3173. CREATE_UNARY(gelu)
  3174. CREATE_UNARY(gelu_erf)
  3175. CREATE_UNARY(gelu_quick)
  3176. CREATE_UNARY(silu)
  3177. CREATE_UNARY(relu)
  3178. CREATE_UNARY(tanh)
  3179. CREATE_UNARY(sigmoid)
  3180. CREATE_UNARY(hardsigmoid)
  3181. CREATE_UNARY(hardswish)
  3182. #undef CREATE_UNARY
  3183. #define CREATE_UNARY_RTE(name) \
  3184. if (device->float_controls_rte_fp16) { \
  3185. ggml_vk_create_pipeline(device, device->pipeline_ ## name [0], #name "_f32_rte", name ## _f32_rte_len, name ## _f32_rte_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); \
  3186. ggml_vk_create_pipeline(device, device->pipeline_ ## name [1], #name "_f16_rte", name ## _f16_rte_len, name ## _f16_rte_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); \
  3187. } else { \
  3188. ggml_vk_create_pipeline(device, device->pipeline_ ## name [0], #name "_f32", name ## _f32_len, name ## _f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); \
  3189. ggml_vk_create_pipeline(device, device->pipeline_ ## name [1], #name "_f16", name ## _f16_len, name ## _f16_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); \
  3190. }
  3191. CREATE_UNARY_RTE(exp)
  3192. #undef CREATE_UNARY_RTE
  3193. #define CREATE_GLU(name) \
  3194. if (device->float_controls_rte_fp16) { \
  3195. ggml_vk_create_pipeline(device, device->pipeline_ ## name [0], #name "_f32_rte", name ## _f32_rte_len, name ## _f32_rte_data, "main", 3, sizeof(vk_op_glu_push_constants), {512, 1, 1}, {}, 1, true); \
  3196. ggml_vk_create_pipeline(device, device->pipeline_ ## name [1], #name "_f16_rte", name ## _f16_rte_len, name ## _f16_rte_data, "main", 3, sizeof(vk_op_glu_push_constants), {512, 1, 1}, {}, 1, true); \
  3197. } else { \
  3198. ggml_vk_create_pipeline(device, device->pipeline_ ## name [0], #name "_f32", name ## _f32_len, name ## _f32_data, "main", 3, sizeof(vk_op_glu_push_constants), {512, 1, 1}, {}, 1, true); \
  3199. ggml_vk_create_pipeline(device, device->pipeline_ ## name [1], #name "_f16", name ## _f16_len, name ## _f16_data, "main", 3, sizeof(vk_op_glu_push_constants), {512, 1, 1}, {}, 1, true); \
  3200. }
  3201. CREATE_GLU(geglu)
  3202. CREATE_GLU(reglu)
  3203. CREATE_GLU(swiglu)
  3204. CREATE_GLU(swiglu_oai)
  3205. CREATE_GLU(geglu_erf)
  3206. CREATE_GLU(geglu_quick)
  3207. #undef CREATE_GLU
  3208. ggml_vk_create_pipeline(device, device->pipeline_leaky_relu_f32, "leaky_relu_f32", leaky_relu_f32_len, leaky_relu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  3209. ggml_vk_create_pipeline(device, device->pipeline_silu_back_f32, "silu_back_f32", silu_back_f32_len, silu_back_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  3210. ggml_vk_create_pipeline(device, device->pipeline_diag_mask_inf_f32, "diag_mask_inf_f32", diag_mask_inf_f32_len, diag_mask_inf_f32_data, "main", 2, sizeof(vk_op_diag_mask_push_constants), {1, 512, 1}, {}, 1, true);
  3211. ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32, "soft_max_f32", soft_max_f32_len, soft_max_f32_data, "main", 4, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  3212. ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_wg512, "soft_max_f32_wg512", soft_max_f32_len, soft_max_f32_data, "main", 4, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { 512 }, 1);
  3213. ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_f16, "soft_max_f32_f16", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 4, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  3214. ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_f16_wg512, "soft_max_f32_f16_wg512", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 4, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { 512 }, 1);
  3215. ggml_vk_create_pipeline(device, device->pipeline_soft_max_back_f32, "soft_max_back_f32", soft_max_back_f32_len, soft_max_back_f32_data, "main", 3, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1, true);
  3216. ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f32, "rope_norm_f32", rope_norm_f32_len, rope_norm_f32_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3217. ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f32, "rope_neox_f32", rope_neox_f32_len, rope_neox_f32_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3218. ggml_vk_create_pipeline(device, device->pipeline_rope_multi_f32, "rope_multi_f32", rope_multi_f32_len, rope_multi_f32_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3219. ggml_vk_create_pipeline(device, device->pipeline_rope_vision_f32, "rope_vision_f32", rope_vision_f32_len, rope_vision_f32_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3220. if (device->float_controls_rte_fp16) {
  3221. ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f16, "rope_norm_f16", rope_norm_f16_rte_len, rope_norm_f16_rte_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3222. ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f16, "rope_neox_f16", rope_neox_f16_rte_len, rope_neox_f16_rte_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3223. ggml_vk_create_pipeline(device, device->pipeline_rope_multi_f16, "rope_multi_f16", rope_multi_f16_rte_len, rope_multi_f16_rte_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3224. ggml_vk_create_pipeline(device, device->pipeline_rope_vision_f16, "rope_vision_f16", rope_vision_f16_rte_len, rope_vision_f16_rte_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3225. ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f32_f16, "rope_norm_f32_f16", rope_norm_f32_f16_rte_len, rope_norm_f32_f16_rte_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3226. ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f32_f16, "rope_neox_f32_f16", rope_neox_f32_f16_rte_len, rope_neox_f32_f16_rte_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3227. } else {
  3228. ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f16, "rope_norm_f16", rope_norm_f16_len, rope_norm_f16_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3229. ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f16, "rope_neox_f16", rope_neox_f16_len, rope_neox_f16_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3230. ggml_vk_create_pipeline(device, device->pipeline_rope_multi_f16, "rope_multi_f16", rope_multi_f16_len, rope_multi_f16_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3231. ggml_vk_create_pipeline(device, device->pipeline_rope_vision_f16, "rope_vision_f16", rope_vision_f16_len, rope_vision_f16_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3232. ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f32_f16, "rope_norm_f32_f16", rope_norm_f32_f16_len, rope_norm_f32_f16_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3233. ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f32_f16, "rope_neox_f32_f16", rope_neox_f32_f16_len, rope_neox_f32_f16_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3234. }
  3235. for (uint32_t i = 0; i < num_argsort_pipelines; ++i) {
  3236. ggml_vk_create_pipeline2(device, device->pipeline_argsort_f32[i], "argsort_f32_"+std::to_string(i), argsort_f32_len, argsort_f32_data, "main", 2, sizeof(vk_op_argsort_push_constants), {1u<<i, 1, 1}, {1u<<i, i}, 1, true);
  3237. }
  3238. ggml_vk_create_pipeline(device, device->pipeline_argmax_f32, "argmax_f32", argmax_f32_len, argmax_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  3239. ggml_vk_create_pipeline(device, device->pipeline_sum_rows_f32, "sum_rows_f32", sum_rows_f32_len, sum_rows_f32_data, "main", 2, sizeof(vk_op_sum_rows_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  3240. ggml_vk_create_pipeline(device, device->pipeline_count_equal_i32, "count_equal_i32", count_equal_i32_len, count_equal_i32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, { device->subgroup_size }, 1);
  3241. #define IM2COL(bda) \
  3242. ggml_vk_create_pipeline(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32 ## bda ## _len, im2col_f32 ## bda ## _data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); \
  3243. ggml_vk_create_pipeline(device, device->pipeline_im2col_3d_f32, "im2col_3d_f32", im2col_3d_f32 ## bda ## _len, im2col_3d_f32 ## bda ## _data, "main", 2, sizeof(vk_op_im2col_3d_push_constants), {512, 1, 1}, { 512 }, 1, true); \
  3244. if (device->float_controls_rte_fp16) { \
  3245. ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_rte ## bda ## _len, im2col_f32_f16_rte ## bda ## _data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); \
  3246. ggml_vk_create_pipeline(device, device->pipeline_im2col_3d_f32_f16, "im2col_3d_f32_f16", im2col_3d_f32_f16_rte ## bda ## _len, im2col_3d_f32_f16_rte ## bda ## _data, "main", 2, sizeof(vk_op_im2col_3d_push_constants), {512, 1, 1}, { 512 }, 1, true); \
  3247. } else { \
  3248. ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16 ## bda ## _len, im2col_f32_f16 ## bda ## _data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); \
  3249. ggml_vk_create_pipeline(device, device->pipeline_im2col_3d_f32_f16, "im2col_3d_f32_f16", im2col_3d_f32_f16 ## bda ## _len, im2col_3d_f32_f16 ## bda ## _data, "main", 2, sizeof(vk_op_im2col_3d_push_constants), {512, 1, 1}, { 512 }, 1, true); \
  3250. }
  3251. if (device->shader_int64 && device->buffer_device_address) {
  3252. IM2COL(_bda)
  3253. } else {
  3254. IM2COL()
  3255. }
  3256. ggml_vk_create_pipeline(device, device->pipeline_timestep_embedding_f32, "timestep_embedding_f32", timestep_embedding_f32_len, timestep_embedding_f32_data, "main", 2, sizeof(vk_op_timestep_embedding_push_constants), {256, 1, 1}, {}, 1);
  3257. ggml_vk_create_pipeline(device, device->pipeline_conv_transpose_1d_f32, "conv_transpose_1d_f32", conv_transpose_1d_f32_len, conv_transpose_1d_f32_data, "main", 3, sizeof(vk_op_conv_transpose_1d_push_constants), {1, 1, 1}, {}, 1);
  3258. ggml_vk_create_pipeline(device, device->pipeline_pool2d_f32, "pool2d_f32", pool2d_f32_len, pool2d_f32_data, "main", 2, sizeof(vk_op_pool2d_push_constants), {512, 1, 1}, {}, 1);
  3259. ggml_vk_create_pipeline(device, device->pipeline_rwkv_wkv6_f32, "rwkv_wkv6_f32", rwkv_wkv6_f32_len, rwkv_wkv6_f32_data, "main", 7, sizeof(vk_op_rwkv_wkv6_push_constants), {1, 1, 1}, {device->subgroup_size}, 1);
  3260. ggml_vk_create_pipeline(device, device->pipeline_rwkv_wkv7_f32, "rwkv_wkv7_f32", rwkv_wkv7_f32_len, rwkv_wkv7_f32_data, "main", 8, sizeof(vk_op_rwkv_wkv7_push_constants), {1, 1, 1}, {device->subgroup_size}, 1);
  3261. if (device->subgroup_arithmetic && device->subgroup_require_full_support) {
  3262. ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d128, "ssm_scan_128_f32", ssm_scan_subgroup_f32_len, ssm_scan_subgroup_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {128, device->subgroup_size, 16}, 1, true, true);
  3263. ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d256, "ssm_scan_256_f32", ssm_scan_subgroup_f32_len, ssm_scan_subgroup_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {256, device->subgroup_size, 16}, 1, true, true);
  3264. } else {
  3265. ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d128, "ssm_scan_128_f32", ssm_scan_f32_len, ssm_scan_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {128, device->subgroup_size, 16}, 1, true, true);
  3266. ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d256, "ssm_scan_256_f32", ssm_scan_f32_len, ssm_scan_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {256, device->subgroup_size, 16}, 1, true, true);
  3267. }
  3268. ggml_vk_create_pipeline(device, device->pipeline_ssm_conv_f32, "ssm_conv_f32", ssm_conv_f32_len, ssm_conv_f32_data, "main", 3, sizeof(vk_op_ssm_conv_push_constants), {32, 1, 1}, {32}, 1);
  3269. ggml_vk_create_pipeline(device, device->pipeline_opt_step_adamw_f32, "opt_step_adamw_f32", opt_step_adamw_f32_len, opt_step_adamw_f32_data, "main", 5, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  3270. ggml_vk_create_pipeline(device, device->pipeline_opt_step_sgd_f32, "opt_step_sgd_f32", opt_step_sgd_f32_len, opt_step_sgd_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  3271. // conv2d, conv_transpose_2d
  3272. for (uint32_t s = 0; s < CONV_SHAPE_COUNT; ++s) {
  3273. uint32_t conv2d_WG_SIZE = 256;
  3274. uint32_t conv2d_BS_K = 128;
  3275. uint32_t conv2d_BS_CRS = 16;
  3276. uint32_t use_collectives = 0; // Enables subgroup ops for preventing the re-calculation of indices.
  3277. uint32_t conv2d_BS_NPQ = 128;
  3278. uint32_t conv2d_TS_K = 8;
  3279. uint32_t conv2d_SHMEM_PAD = 4;
  3280. bool conv2d_UNROLL = true;
  3281. #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  3282. if (device->coopmat2) {
  3283. conv2d_SHMEM_PAD = 8; // 8 float16_t
  3284. }
  3285. #endif
  3286. if (device->vendor_id == VK_VENDOR_ID_INTEL) {
  3287. conv2d_SHMEM_PAD = 0;
  3288. conv2d_UNROLL = false;
  3289. } else if (device->vendor_id == VK_VENDOR_ID_AMD) {
  3290. conv2d_SHMEM_PAD = device->architecture == vk_device_architecture::AMD_GCN ? 1 : 4;
  3291. }
  3292. switch (s) {
  3293. default:
  3294. case CONV_SHAPE_128x128:
  3295. conv2d_BS_K = 128;
  3296. conv2d_BS_NPQ = 128;
  3297. conv2d_BS_CRS = 16;
  3298. if (device->vendor_id == VK_VENDOR_ID_AMD && device->architecture != vk_device_architecture::AMD_GCN) {
  3299. conv2d_UNROLL = false;
  3300. }
  3301. break;
  3302. case CONV_SHAPE_64x32:
  3303. conv2d_BS_K = 64;
  3304. conv2d_BS_NPQ = 32;
  3305. conv2d_BS_CRS = 32;
  3306. conv2d_TS_K = 4;
  3307. break;
  3308. case CONV_SHAPE_32x256:
  3309. conv2d_BS_K = 32;
  3310. conv2d_BS_NPQ = 256;
  3311. conv2d_BS_CRS = 16;
  3312. break;
  3313. }
  3314. // Use collectives on pre-Turing NVIDIA GPUs and GCN AMD cards, which had slower integer math.
  3315. bool allow_collectives_nv = device->vendor_id != VK_VENDOR_ID_NVIDIA ||
  3316. device->architecture == vk_device_architecture::NVIDIA_PRE_TURING;
  3317. bool allow_collectives_amd = device->vendor_id != VK_VENDOR_ID_AMD ||
  3318. device->architecture == vk_device_architecture::AMD_GCN;
  3319. if (device->subgroup_shuffle &&
  3320. device->vendor_id != VK_VENDOR_ID_INTEL && // Do not enable collectives on Intel, see PR 14316.
  3321. allow_collectives_nv &&
  3322. allow_collectives_amd) {
  3323. use_collectives = 1;
  3324. conv2d_BS_CRS = std::min(
  3325. device->subgroup_size,
  3326. conv2d_BS_CRS); // CRS block size should be capped at subgroup size for correctness when shuffle is used.
  3327. }
  3328. uint32_t conv2d_shmem_req =
  3329. (conv2d_BS_K * (conv2d_BS_CRS + conv2d_SHMEM_PAD) + conv2d_BS_CRS * (conv2d_BS_NPQ + conv2d_SHMEM_PAD)) * sizeof(float);
  3330. if (device->properties.limits.maxComputeSharedMemorySize < conv2d_shmem_req) {
  3331. conv2d_BS_CRS = 8;
  3332. if (use_collectives) {
  3333. conv2d_BS_CRS = std::min(device->subgroup_size, conv2d_BS_CRS);
  3334. }
  3335. }
  3336. std::array<uint32_t, 3> wg_denoms = { conv2d_BS_K, conv2d_BS_NPQ, 1 };
  3337. std::vector<uint32_t> spec_constants = { conv2d_WG_SIZE, conv2d_BS_K, conv2d_BS_CRS, conv2d_BS_NPQ, conv2d_TS_K, use_collectives, conv2d_SHMEM_PAD };
  3338. #define CREATE_CONV(name, type_suffix, spv_suffix) \
  3339. ggml_vk_create_pipeline( \
  3340. device, device->pipeline_##name##type_suffix[s], #name #type_suffix, \
  3341. name##type_suffix##spv_suffix##_len, name##type_suffix##spv_suffix##_data, "main", 3, \
  3342. sizeof(vk_op_##name##_push_constants), wg_denoms, spec_constants, 1, true, use_collectives);
  3343. #define CREATE_CONVS(spv_suffix) \
  3344. CREATE_CONV(conv2d, _f32, spv_suffix) \
  3345. CREATE_CONV(conv2d, _f16_f32, spv_suffix) \
  3346. if (device->properties.limits.maxPushConstantsSize >= sizeof(vk_op_conv_transpose_2d_push_constants)) { \
  3347. CREATE_CONV(conv_transpose_2d, _f32, spv_suffix) \
  3348. CREATE_CONV(conv_transpose_2d, _f16_f32, spv_suffix) \
  3349. }
  3350. #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  3351. if (device->coopmat2) {
  3352. CREATE_CONVS(_cm2)
  3353. } else
  3354. #endif
  3355. if (conv2d_UNROLL) {
  3356. CREATE_CONVS(_unroll)
  3357. } else {
  3358. CREATE_CONVS( )
  3359. }
  3360. #undef CREATE_CONV
  3361. #undef CREATE_CONVS
  3362. }
  3363. ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_whcn_f32, "conv2d_dw_whcn_f32", conv2d_dw_whcn_f32_len, conv2d_dw_whcn_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1);
  3364. ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_cwhn_f32, "conv2d_dw_cwhn_f32", conv2d_dw_cwhn_f32_len, conv2d_dw_cwhn_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1);
  3365. ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_whcn_f16_f32, "conv2d_dw_whcn_f16_f32", conv2d_dw_whcn_f16_f32_len, conv2d_dw_whcn_f16_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1);
  3366. ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_cwhn_f16_f32, "conv2d_dw_cwhn_f16_f32", conv2d_dw_cwhn_f16_f32_len, conv2d_dw_cwhn_f16_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1);
  3367. for (uint32_t i = 0; i < num_topk_moe_pipelines; ++i) {
  3368. ggml_vk_create_pipeline2(device, device->pipeline_topk_moe[i][TOPK_MOE_EARLY_SOFTMAX], "topk_moe_f32_early_softmax_"+std::to_string(i), topk_moe_f32_len, topk_moe_f32_data, "main", 3, sizeof(vk_op_topk_moe_push_constants), {1, 1, 1}, {device->subgroup_size, 1u<<i, 0, 0}, 1, true, true);
  3369. ggml_vk_create_pipeline2(device, device->pipeline_topk_moe[i][TOPK_MOE_EARLY_SOFTMAX_NORM], "topk_moe_f32_early_softmax_norm"+std::to_string(i), topk_moe_f32_len, topk_moe_f32_data, "main", 3, sizeof(vk_op_topk_moe_push_constants), {1, 1, 1}, {device->subgroup_size, 1u<<i, 1, 0}, 1, true, true);
  3370. ggml_vk_create_pipeline2(device, device->pipeline_topk_moe[i][TOPK_MOE_LATE_SOFTMAX], "topk_moe_f32_late_softmax"+std::to_string(i), topk_moe_f32_len, topk_moe_f32_data, "main", 3, sizeof(vk_op_topk_moe_push_constants), {1, 1, 1}, {device->subgroup_size, 1u<<i, 0, 1}, 1, true, true);
  3371. }
  3372. for (auto &c : compiles) {
  3373. c.wait();
  3374. }
  3375. device->need_compiles = false;
  3376. }
  3377. static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props, vk_device_architecture arch);
  3378. static vk_device ggml_vk_get_device(size_t idx) {
  3379. VK_LOG_DEBUG("ggml_vk_get_device(" << idx << ")");
  3380. if (vk_instance.devices[idx] == nullptr) {
  3381. VK_LOG_DEBUG("Initializing new vk_device");
  3382. vk_device device = std::make_shared<vk_device_struct>();
  3383. vk_instance.devices[idx] = device;
  3384. #ifdef GGML_VULKAN_MEMORY_DEBUG
  3385. device->memory_logger = std::unique_ptr<vk_memory_logger>(new vk_memory_logger());
  3386. #endif
  3387. if (vk_perf_logger_enabled) {
  3388. device->perf_logger = std::unique_ptr<vk_perf_logger>(new vk_perf_logger());
  3389. }
  3390. size_t dev_num = vk_instance.device_indices[idx];
  3391. std::vector<vk::PhysicalDevice> physical_devices = vk_instance.instance.enumeratePhysicalDevices();
  3392. if (dev_num >= physical_devices.size()) {
  3393. std::cerr << "ggml_vulkan: Device with index " << dev_num << " does not exist." << std::endl;
  3394. throw std::runtime_error("Device not found");
  3395. }
  3396. device->physical_device = physical_devices[dev_num];
  3397. const std::vector<vk::ExtensionProperties> ext_props = device->physical_device.enumerateDeviceExtensionProperties();
  3398. device->architecture = get_device_architecture(device->physical_device);
  3399. const char* GGML_VK_PREFER_HOST_MEMORY = getenv("GGML_VK_PREFER_HOST_MEMORY");
  3400. device->prefer_host_memory = GGML_VK_PREFER_HOST_MEMORY != nullptr;
  3401. const char* GGML_VK_DISABLE_HOST_VISIBLE_VIDMEM = getenv("GGML_VK_DISABLE_HOST_VISIBLE_VIDMEM");
  3402. device->disable_host_visible_vidmem = GGML_VK_DISABLE_HOST_VISIBLE_VIDMEM != nullptr;
  3403. const char* GGML_VK_ALLOW_SYSMEM_FALLBACK = getenv("GGML_VK_ALLOW_SYSMEM_FALLBACK");
  3404. device->allow_sysmem_fallback = GGML_VK_ALLOW_SYSMEM_FALLBACK != nullptr;
  3405. const char* GGML_VK_DISABLE_GRAPH_OPTIMIZE = getenv("GGML_VK_DISABLE_GRAPH_OPTIMIZE");
  3406. device->disable_graph_optimize = GGML_VK_DISABLE_GRAPH_OPTIMIZE != nullptr;
  3407. bool fp16_storage = false;
  3408. bool fp16_compute = false;
  3409. bool maintenance4_support = false;
  3410. bool sm_builtins = false;
  3411. bool amd_shader_core_properties2 = false;
  3412. bool pipeline_robustness = false;
  3413. bool coopmat2_support = false;
  3414. bool pipeline_executable_properties_support = false;
  3415. device->coopmat_support = false;
  3416. device->integer_dot_product = false;
  3417. bool bfloat16_support = false;
  3418. for (const auto& properties : ext_props) {
  3419. if (strcmp("VK_KHR_maintenance4", properties.extensionName) == 0) {
  3420. maintenance4_support = true;
  3421. } else if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) {
  3422. fp16_storage = true;
  3423. } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
  3424. fp16_compute = true;
  3425. } else if (strcmp("VK_NV_shader_sm_builtins", properties.extensionName) == 0) {
  3426. sm_builtins = true;
  3427. } else if (strcmp("VK_AMD_shader_core_properties2", properties.extensionName) == 0) {
  3428. amd_shader_core_properties2 = true;
  3429. } else if (strcmp("VK_EXT_pipeline_robustness", properties.extensionName) == 0) {
  3430. pipeline_robustness = true;
  3431. } else if (strcmp("VK_EXT_subgroup_size_control", properties.extensionName) == 0) {
  3432. device->subgroup_size_control = true;
  3433. #if defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
  3434. } else if (strcmp("VK_KHR_cooperative_matrix", properties.extensionName) == 0 &&
  3435. !getenv("GGML_VK_DISABLE_COOPMAT")) {
  3436. device->coopmat_support = true;
  3437. device->coopmat_m = 0;
  3438. device->coopmat_n = 0;
  3439. device->coopmat_k = 0;
  3440. #endif
  3441. #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  3442. } else if (strcmp("VK_NV_cooperative_matrix2", properties.extensionName) == 0 &&
  3443. !getenv("GGML_VK_DISABLE_COOPMAT2")) {
  3444. coopmat2_support = true;
  3445. #endif
  3446. #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
  3447. } else if (strcmp("VK_KHR_shader_integer_dot_product", properties.extensionName) == 0 &&
  3448. !getenv("GGML_VK_DISABLE_INTEGER_DOT_PRODUCT")) {
  3449. device->integer_dot_product = true;
  3450. #endif
  3451. #if defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  3452. } else if (strcmp("VK_KHR_shader_bfloat16", properties.extensionName) == 0 &&
  3453. !getenv("GGML_VK_DISABLE_BFLOAT16")) {
  3454. bfloat16_support = true;
  3455. #endif
  3456. } else if (strcmp("VK_KHR_pipeline_executable_properties", properties.extensionName) == 0) {
  3457. pipeline_executable_properties_support = true;
  3458. }
  3459. }
  3460. vk::PhysicalDeviceProperties2 props2;
  3461. vk::PhysicalDeviceMaintenance3Properties props3;
  3462. vk::PhysicalDeviceMaintenance4Properties props4;
  3463. vk::PhysicalDeviceSubgroupProperties subgroup_props;
  3464. vk::PhysicalDeviceDriverProperties driver_props;
  3465. vk::PhysicalDeviceShaderSMBuiltinsPropertiesNV sm_props;
  3466. vk::PhysicalDeviceShaderCoreProperties2AMD amd_shader_core_properties2_props;
  3467. vk::PhysicalDeviceVulkan11Properties vk11_props;
  3468. vk::PhysicalDeviceVulkan12Properties vk12_props;
  3469. vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT subgroup_size_control_props;
  3470. vk::PhysicalDeviceShaderIntegerDotProductPropertiesKHR shader_integer_dot_product_props;
  3471. props2.pNext = &props3;
  3472. props3.pNext = &subgroup_props;
  3473. subgroup_props.pNext = &driver_props;
  3474. driver_props.pNext = &vk11_props;
  3475. vk11_props.pNext = &vk12_props;
  3476. VkBaseOutStructure * last_struct = (VkBaseOutStructure *)&vk12_props;
  3477. if (maintenance4_support) {
  3478. last_struct->pNext = (VkBaseOutStructure *)&props4;
  3479. last_struct = (VkBaseOutStructure *)&props4;
  3480. }
  3481. if (sm_builtins) {
  3482. last_struct->pNext = (VkBaseOutStructure *)&sm_props;
  3483. last_struct = (VkBaseOutStructure *)&sm_props;
  3484. }
  3485. if (amd_shader_core_properties2) {
  3486. last_struct->pNext = (VkBaseOutStructure *)&amd_shader_core_properties2_props;
  3487. last_struct = (VkBaseOutStructure *)&amd_shader_core_properties2_props;
  3488. }
  3489. if (device->subgroup_size_control) {
  3490. last_struct->pNext = (VkBaseOutStructure *)&subgroup_size_control_props;
  3491. last_struct = (VkBaseOutStructure *)&subgroup_size_control_props;
  3492. }
  3493. #if defined(VK_NV_cooperative_matrix2)
  3494. vk::PhysicalDeviceCooperativeMatrix2PropertiesNV coopmat2_props;
  3495. if (coopmat2_support) {
  3496. last_struct->pNext = (VkBaseOutStructure *)&coopmat2_props;
  3497. last_struct = (VkBaseOutStructure *)&coopmat2_props;
  3498. }
  3499. #endif
  3500. if (device->integer_dot_product) {
  3501. last_struct->pNext = (VkBaseOutStructure *)&shader_integer_dot_product_props;
  3502. last_struct = (VkBaseOutStructure *)&shader_integer_dot_product_props;
  3503. }
  3504. device->physical_device.getProperties2(&props2);
  3505. device->properties = props2.properties;
  3506. device->vendor_id = device->properties.vendorID;
  3507. device->driver_id = driver_props.driverID;
  3508. const char* GGML_VK_FORCE_MAX_ALLOCATION_SIZE = getenv("GGML_VK_FORCE_MAX_ALLOCATION_SIZE");
  3509. if (GGML_VK_FORCE_MAX_ALLOCATION_SIZE != nullptr) {
  3510. device->max_memory_allocation_size = std::stoull(GGML_VK_FORCE_MAX_ALLOCATION_SIZE);
  3511. } else if (maintenance4_support) {
  3512. device->max_memory_allocation_size = std::min(props3.maxMemoryAllocationSize, props4.maxBufferSize);
  3513. } else {
  3514. device->max_memory_allocation_size = props3.maxMemoryAllocationSize;
  3515. }
  3516. const char* GGML_VK_FORCE_MAX_BUFFER_SIZE = getenv("GGML_VK_FORCE_MAX_BUFFER_SIZE");
  3517. if (GGML_VK_FORCE_MAX_BUFFER_SIZE != nullptr) {
  3518. device->max_buffer_size = std::stoull(GGML_VK_FORCE_MAX_BUFFER_SIZE);
  3519. } else if (maintenance4_support) {
  3520. device->max_buffer_size = props4.maxBufferSize;
  3521. } else {
  3522. device->max_buffer_size = device->max_memory_allocation_size;
  3523. }
  3524. const char* GGML_VK_SUBALLOCATION_BLOCK_SIZE = getenv("GGML_VK_SUBALLOCATION_BLOCK_SIZE");
  3525. if (GGML_VK_SUBALLOCATION_BLOCK_SIZE != nullptr) {
  3526. device->suballocation_block_size = std::stoull(GGML_VK_SUBALLOCATION_BLOCK_SIZE);
  3527. } else {
  3528. // Limit batching of allocations to 1GB by default to avoid fragmentation issues
  3529. device->suballocation_block_size = 1024*1024*1024;
  3530. }
  3531. device->suballocation_block_size = std::min(device->suballocation_block_size, device->max_memory_allocation_size);
  3532. device->subgroup_size = subgroup_props.subgroupSize;
  3533. device->uma = device->properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
  3534. if (sm_builtins) {
  3535. device->shader_core_count = sm_props.shaderSMCount;
  3536. } else if (amd_shader_core_properties2) {
  3537. device->shader_core_count = amd_shader_core_properties2_props.activeComputeUnitCount;
  3538. } else {
  3539. device->shader_core_count = 0;
  3540. }
  3541. device->float_controls_rte_fp16 = vk12_props.shaderRoundingModeRTEFloat16;
  3542. device->subgroup_arithmetic = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) &&
  3543. (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eArithmetic);
  3544. #ifdef __APPLE__
  3545. // Workaround for subgroup arithmetic failing on MoltenVK with AMD GPUs (issue 15846)
  3546. if (device->vendor_id == VK_VENDOR_ID_AMD) {
  3547. device->subgroup_arithmetic = false;
  3548. }
  3549. #endif
  3550. device->subgroup_shuffle = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) &&
  3551. (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eShuffle);
  3552. device->subgroup_clustered = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) &&
  3553. (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eClustered);
  3554. device->subgroup_ballot = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) &&
  3555. (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eBallot);
  3556. const bool force_disable_f16 = getenv("GGML_VK_DISABLE_F16") != nullptr;
  3557. device->fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
  3558. if (!ggml_vk_khr_cooperative_matrix_support(device->properties, driver_props, device->architecture)) {
  3559. device->coopmat_support = false;
  3560. }
  3561. device->integer_dot_product = device->integer_dot_product && shader_integer_dot_product_props.integerDotProduct4x8BitPackedSignedAccelerated;
  3562. std::vector<vk::QueueFamilyProperties> queue_family_props = device->physical_device.getQueueFamilyProperties();
  3563. // Try to find a non-graphics compute queue and transfer-focused queues
  3564. const uint32_t compute_queue_family_index = ggml_vk_find_queue_family_index(queue_family_props, vk::QueueFlagBits::eCompute, vk::QueueFlagBits::eGraphics, -1, 1);
  3565. const uint32_t transfer_queue_family_index = ggml_vk_find_queue_family_index(queue_family_props, vk::QueueFlagBits::eTransfer, vk::QueueFlagBits::eCompute | vk::QueueFlagBits::eGraphics, compute_queue_family_index, 1);
  3566. const float priorities[] = { 1.0f, 1.0f };
  3567. device->single_queue = compute_queue_family_index == transfer_queue_family_index && queue_family_props[compute_queue_family_index].queueCount == 1;
  3568. std::vector<vk::DeviceQueueCreateInfo> device_queue_create_infos;
  3569. if (compute_queue_family_index != transfer_queue_family_index) {
  3570. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, priorities});
  3571. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), transfer_queue_family_index, 1, priorities + 1});
  3572. } else if(!device->single_queue) {
  3573. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 2, priorities});
  3574. } else {
  3575. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, priorities});
  3576. }
  3577. vk::DeviceCreateInfo device_create_info;
  3578. std::vector<const char *> device_extensions;
  3579. vk::PhysicalDeviceFeatures device_features = device->physical_device.getFeatures();
  3580. VkPhysicalDeviceFeatures2 device_features2;
  3581. device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
  3582. device_features2.pNext = nullptr;
  3583. device_features2.features = (VkPhysicalDeviceFeatures)device_features;
  3584. VkPhysicalDeviceVulkan11Features vk11_features;
  3585. vk11_features.pNext = nullptr;
  3586. vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
  3587. device_features2.pNext = &vk11_features;
  3588. VkPhysicalDeviceVulkan12Features vk12_features;
  3589. vk12_features.pNext = nullptr;
  3590. vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
  3591. vk11_features.pNext = &vk12_features;
  3592. last_struct = (VkBaseOutStructure *)&vk12_features;
  3593. VkPhysicalDevicePipelineRobustnessFeaturesEXT pl_robustness_features;
  3594. pl_robustness_features.pNext = nullptr;
  3595. pl_robustness_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT;
  3596. pl_robustness_features.pipelineRobustness = VK_FALSE;
  3597. if (pipeline_robustness) {
  3598. last_struct->pNext = (VkBaseOutStructure *)&pl_robustness_features;
  3599. last_struct = (VkBaseOutStructure *)&pl_robustness_features;
  3600. device_extensions.push_back("VK_EXT_pipeline_robustness");
  3601. }
  3602. VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroup_size_control_features;
  3603. subgroup_size_control_features.pNext = nullptr;
  3604. subgroup_size_control_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
  3605. subgroup_size_control_features.computeFullSubgroups = false;
  3606. subgroup_size_control_features.subgroupSizeControl = false;
  3607. if (device->subgroup_size_control) {
  3608. last_struct->pNext = (VkBaseOutStructure *)&subgroup_size_control_features;
  3609. last_struct = (VkBaseOutStructure *)&subgroup_size_control_features;
  3610. }
  3611. #if defined(VK_KHR_cooperative_matrix)
  3612. VkPhysicalDeviceCooperativeMatrixFeaturesKHR coopmat_features;
  3613. coopmat_features.pNext = nullptr;
  3614. coopmat_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR;
  3615. coopmat_features.cooperativeMatrix = VK_FALSE;
  3616. if (device->coopmat_support) {
  3617. last_struct->pNext = (VkBaseOutStructure *)&coopmat_features;
  3618. last_struct = (VkBaseOutStructure *)&coopmat_features;
  3619. }
  3620. #endif
  3621. #if defined(VK_NV_cooperative_matrix2)
  3622. VkPhysicalDeviceCooperativeMatrix2FeaturesNV coopmat2_features {};
  3623. coopmat2_features.pNext = nullptr;
  3624. coopmat2_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_2_FEATURES_NV;
  3625. if (coopmat2_support) {
  3626. last_struct->pNext = (VkBaseOutStructure *)&coopmat2_features;
  3627. last_struct = (VkBaseOutStructure *)&coopmat2_features;
  3628. device_extensions.push_back("VK_NV_cooperative_matrix2");
  3629. }
  3630. #endif
  3631. #if defined(VK_KHR_shader_bfloat16)
  3632. VkPhysicalDeviceShaderBfloat16FeaturesKHR bfloat16_features {};
  3633. bfloat16_features.pNext = nullptr;
  3634. bfloat16_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_BFLOAT16_FEATURES_KHR;
  3635. if (bfloat16_support) {
  3636. last_struct->pNext = (VkBaseOutStructure *)&bfloat16_features;
  3637. last_struct = (VkBaseOutStructure *)&bfloat16_features;
  3638. device_extensions.push_back("VK_KHR_shader_bfloat16");
  3639. }
  3640. #endif
  3641. VkPhysicalDeviceMaintenance4Features maint4_features {};
  3642. maint4_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES;
  3643. if (maintenance4_support) {
  3644. last_struct->pNext = (VkBaseOutStructure *)&maint4_features;
  3645. last_struct = (VkBaseOutStructure *)&maint4_features;
  3646. device_extensions.push_back("VK_KHR_maintenance4");
  3647. }
  3648. VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR shader_integer_dot_product_features {};
  3649. shader_integer_dot_product_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES_KHR;
  3650. if (device->integer_dot_product) {
  3651. last_struct->pNext = (VkBaseOutStructure *)&shader_integer_dot_product_features;
  3652. last_struct = (VkBaseOutStructure *)&shader_integer_dot_product_features;
  3653. device_extensions.push_back("VK_KHR_shader_integer_dot_product");
  3654. }
  3655. VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR pep_features {};
  3656. pep_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR;
  3657. if (pipeline_executable_properties_support) {
  3658. last_struct->pNext = (VkBaseOutStructure *)&pep_features;
  3659. last_struct = (VkBaseOutStructure *)&pep_features;
  3660. device_extensions.push_back("VK_KHR_pipeline_executable_properties");
  3661. }
  3662. vkGetPhysicalDeviceFeatures2(device->physical_device, &device_features2);
  3663. device->pipeline_executable_properties_support = pipeline_executable_properties_support;
  3664. device->fp16 = device->fp16 && vk12_features.shaderFloat16;
  3665. #if defined(VK_KHR_shader_bfloat16)
  3666. device->bf16 = bfloat16_support && bfloat16_features.shaderBFloat16Type;
  3667. #else
  3668. device->bf16 = false;
  3669. #endif
  3670. device->pipeline_robustness = pl_robustness_features.pipelineRobustness;
  3671. device->multi_add = vk12_props.shaderRoundingModeRTEFloat16 &&
  3672. device->properties.limits.maxPushConstantsSize >= sizeof(vk_op_multi_add_push_constants) &&
  3673. getenv("GGML_VK_DISABLE_MULTI_ADD") == nullptr;
  3674. device->shader_int64 = device_features2.features.shaderInt64;
  3675. device->buffer_device_address = vk12_features.bufferDeviceAddress;
  3676. if (device->subgroup_size_control) {
  3677. device->subgroup_min_size = subgroup_size_control_props.minSubgroupSize;
  3678. device->subgroup_max_size = subgroup_size_control_props.maxSubgroupSize;
  3679. device_extensions.push_back("VK_EXT_subgroup_size_control");
  3680. }
  3681. device->subgroup_size_control = device->subgroup_size_control &&
  3682. (subgroup_size_control_props.requiredSubgroupSizeStages & vk::ShaderStageFlagBits::eCompute) &&
  3683. subgroup_size_control_features.subgroupSizeControl;
  3684. device->subgroup_require_full_support = subgroup_size_control_features.computeFullSubgroups;
  3685. #if defined(VK_KHR_cooperative_matrix)
  3686. device->coopmat_support = device->coopmat_support && coopmat_features.cooperativeMatrix;
  3687. // coopmat1 fa shader currently assumes 32 invocations per subgroup
  3688. device->coopmat1_fa_support = device->coopmat_support && device->subgroup_require_full_support &&
  3689. device->subgroup_size_control && device->subgroup_min_size <= 32 &&
  3690. device->subgroup_max_size >= 32;
  3691. #endif
  3692. if (coopmat2_support) {
  3693. #if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  3694. if (coopmat2_features.cooperativeMatrixWorkgroupScope &&
  3695. coopmat2_features.cooperativeMatrixFlexibleDimensions &&
  3696. coopmat2_features.cooperativeMatrixReductions &&
  3697. coopmat2_features.cooperativeMatrixConversions &&
  3698. coopmat2_features.cooperativeMatrixPerElementOperations &&
  3699. coopmat2_features.cooperativeMatrixTensorAddressing &&
  3700. coopmat2_features.cooperativeMatrixBlockLoads &&
  3701. vk12_features.bufferDeviceAddress) {
  3702. std::vector<VkCooperativeMatrixFlexibleDimensionsPropertiesNV> flexible_dimensions;
  3703. uint32_t count = 0;
  3704. PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV
  3705. _vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV =
  3706. (PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV)
  3707. vk_instance.instance.getProcAddr("vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV");
  3708. _vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV(device->physical_device, &count, nullptr);
  3709. VkCooperativeMatrixFlexibleDimensionsPropertiesNV empty_prop {};
  3710. empty_prop.sType = VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_FLEXIBLE_DIMENSIONS_PROPERTIES_NV;
  3711. flexible_dimensions.resize(count, empty_prop);
  3712. _vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV(device->physical_device, &count, flexible_dimensions.data());
  3713. bool found_fp16_128 = false,
  3714. found_fp16_256 = false,
  3715. found_fp32_128 = false,
  3716. found_fp32_256 = false;
  3717. // need to support fp16*fp16 with fp16/fp32 accumulator, for workgroupsize 128
  3718. // with 32x16x16 and 256 with 32x32x16.
  3719. for (auto &prop : flexible_dimensions) {
  3720. if (prop.saturatingAccumulation == VK_FALSE &&
  3721. prop.scope == VK_SCOPE_WORKGROUP_KHR &&
  3722. prop.AType == VK_COMPONENT_TYPE_FLOAT16_KHR &&
  3723. prop.BType == VK_COMPONENT_TYPE_FLOAT16_KHR) {
  3724. if (prop.workgroupInvocations == 128 &&
  3725. prop.MGranularity <= 32 &&
  3726. prop.NGranularity <= 16 &&
  3727. prop.KGranularity <= 16) {
  3728. if (prop.CType == VK_COMPONENT_TYPE_FLOAT16_KHR &&
  3729. prop.ResultType == VK_COMPONENT_TYPE_FLOAT16_KHR) {
  3730. found_fp16_128 = true;
  3731. }
  3732. if (prop.CType == VK_COMPONENT_TYPE_FLOAT32_KHR &&
  3733. prop.ResultType == VK_COMPONENT_TYPE_FLOAT32_KHR) {
  3734. found_fp32_128 = true;
  3735. }
  3736. }
  3737. if (prop.workgroupInvocations == 256 &&
  3738. prop.MGranularity <= 32 &&
  3739. prop.NGranularity <= 32 &&
  3740. prop.KGranularity <= 16) {
  3741. if (prop.CType == VK_COMPONENT_TYPE_FLOAT16_KHR &&
  3742. prop.ResultType == VK_COMPONENT_TYPE_FLOAT16_KHR) {
  3743. found_fp16_256 = true;
  3744. }
  3745. if (prop.CType == VK_COMPONENT_TYPE_FLOAT32_KHR &&
  3746. prop.ResultType == VK_COMPONENT_TYPE_FLOAT32_KHR) {
  3747. found_fp32_256 = true;
  3748. }
  3749. }
  3750. }
  3751. }
  3752. if (found_fp16_128 && found_fp16_256 &&
  3753. found_fp32_128 && found_fp32_256 &&
  3754. coopmat2_props.cooperativeMatrixFlexibleDimensionsMaxDimension >= 512) {
  3755. device->coopmat2 = true;
  3756. }
  3757. }
  3758. #endif
  3759. }
  3760. if (!vk11_features.storageBuffer16BitAccess) {
  3761. std::cerr << "ggml_vulkan: device " << GGML_VK_NAME << idx << " does not support 16-bit storage." << std::endl;
  3762. throw std::runtime_error("Unsupported device");
  3763. }
  3764. device_extensions.push_back("VK_KHR_16bit_storage");
  3765. #ifdef GGML_VULKAN_VALIDATE
  3766. device_extensions.push_back("VK_KHR_shader_non_semantic_info");
  3767. #endif
  3768. if (device->fp16) {
  3769. device_extensions.push_back("VK_KHR_shader_float16_int8");
  3770. }
  3771. #if defined(VK_KHR_cooperative_matrix)
  3772. if (device->coopmat_support) {
  3773. // Query supported shapes
  3774. std::vector<VkCooperativeMatrixPropertiesKHR> cm_props;
  3775. PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR pfn_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR =
  3776. (PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR)vkGetInstanceProcAddr(vk_instance.instance, "vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR");
  3777. uint32_t cm_props_num;
  3778. pfn_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR(device->physical_device, &cm_props_num, nullptr);
  3779. cm_props.resize(cm_props_num);
  3780. for (auto& prop : cm_props) {
  3781. prop.sType = VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_KHR;
  3782. }
  3783. pfn_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR(device->physical_device, &cm_props_num, cm_props.data());
  3784. VK_LOG_DEBUG("ggml_vulkan: Cooperative Matrix Shapes: " << cm_props.size());
  3785. for (auto& prop : cm_props) {
  3786. VK_LOG_DEBUG("ggml_vulkan: M: " << prop.MSize << " N: " << prop.NSize << " K: " << prop.KSize << " A: " << vk::to_string((vk::ComponentTypeKHR)prop.AType) << " B: " << vk::to_string((vk::ComponentTypeKHR)prop.BType) << " C: " << vk::to_string((vk::ComponentTypeKHR)prop.CType) << " Result: " << vk::to_string((vk::ComponentTypeKHR)prop.ResultType) << " saturatingAccumulation: " << prop.saturatingAccumulation << " scope: " << vk::to_string((vk::ScopeKHR)prop.scope));
  3787. if ((vk::ComponentTypeKHR)prop.AType == vk::ComponentTypeKHR::eFloat16 &&
  3788. (vk::ComponentTypeKHR)prop.BType == vk::ComponentTypeKHR::eFloat16 &&
  3789. (vk::ScopeKHR)prop.scope == vk::ScopeKHR::eSubgroup
  3790. ) {
  3791. if ((vk::ComponentTypeKHR)prop.CType == vk::ComponentTypeKHR::eFloat32 &&
  3792. (vk::ComponentTypeKHR)prop.ResultType == vk::ComponentTypeKHR::eFloat32) {
  3793. // coopmat sizes not set yet
  3794. if (device->coopmat_m == 0) {
  3795. device->coopmat_acc_f32_support = true;
  3796. device->coopmat_m = prop.MSize;
  3797. device->coopmat_n = prop.NSize;
  3798. device->coopmat_k = prop.KSize;
  3799. } else if (device->coopmat_m == prop.MSize && device->coopmat_n == prop.NSize && device->coopmat_k == prop.KSize) {
  3800. // Only enable if shape is identical
  3801. device->coopmat_acc_f32_support = true;
  3802. }
  3803. if (prop.MSize == 16 && prop.NSize == 16 && prop.KSize == 16) {
  3804. device->coopmat_support_16x16x16_f32acc = true;
  3805. }
  3806. } else if ((vk::ComponentTypeKHR)prop.CType == vk::ComponentTypeKHR::eFloat16 &&
  3807. (vk::ComponentTypeKHR)prop.ResultType == vk::ComponentTypeKHR::eFloat16) {
  3808. // coopmat sizes not set yet
  3809. if (device->coopmat_m == 0) {
  3810. device->coopmat_acc_f16_support = true;
  3811. device->coopmat_m = prop.MSize;
  3812. device->coopmat_n = prop.NSize;
  3813. device->coopmat_k = prop.KSize;
  3814. } else if (device->coopmat_m == prop.MSize && device->coopmat_n == prop.NSize && device->coopmat_k == prop.KSize) {
  3815. // Only enable if shape is identical
  3816. device->coopmat_acc_f16_support = true;
  3817. }
  3818. if (prop.MSize == 16 && prop.NSize == 16 && prop.KSize == 16) {
  3819. device->coopmat_support_16x16x16_f16acc = true;
  3820. }
  3821. }
  3822. } else if ((vk::ComponentTypeKHR)prop.AType == vk::ComponentTypeKHR::eSint8 &&
  3823. (vk::ComponentTypeKHR)prop.BType == vk::ComponentTypeKHR::eSint8 &&
  3824. (vk::ComponentTypeKHR)prop.CType == vk::ComponentTypeKHR::eSint32 &&
  3825. (vk::ComponentTypeKHR)prop.ResultType == vk::ComponentTypeKHR::eSint32 &&
  3826. (vk::ScopeKHR)prop.scope == vk::ScopeKHR::eSubgroup &&
  3827. device->coopmat_int_m == 0
  3828. ) {
  3829. device->coopmat_int_support = true;
  3830. device->coopmat_int_m = prop.MSize;
  3831. device->coopmat_int_n = prop.NSize;
  3832. device->coopmat_int_k = prop.KSize;
  3833. }
  3834. #if defined(VK_KHR_shader_bfloat16) && defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  3835. if (prop.AType == VK_COMPONENT_TYPE_BFLOAT16_KHR &&
  3836. prop.BType == VK_COMPONENT_TYPE_BFLOAT16_KHR &&
  3837. prop.CType == VK_COMPONENT_TYPE_FLOAT32_KHR &&
  3838. prop.ResultType == VK_COMPONENT_TYPE_FLOAT32_KHR &&
  3839. (vk::ScopeKHR)prop.scope == vk::ScopeKHR::eSubgroup
  3840. ) {
  3841. // coopmat sizes not set yet
  3842. if (device->coopmat_m == 0) {
  3843. device->coopmat_bf16_support = true;
  3844. device->coopmat_m = prop.MSize;
  3845. device->coopmat_n = prop.NSize;
  3846. device->coopmat_k = prop.KSize;
  3847. } else if (device->coopmat_m == prop.MSize && device->coopmat_n == prop.NSize && device->coopmat_k == prop.KSize) {
  3848. // Only enable if shape is identical
  3849. device->coopmat_bf16_support = true;
  3850. }
  3851. }
  3852. #endif
  3853. }
  3854. if (device->coopmat_m == 0 || !device->coopmat_acc_f32_support) {
  3855. // No suitable matmul mode found
  3856. GGML_LOG_DEBUG("ggml_vulkan: WARNING: No suitable matrix core mode found. Disabling matrix cores.\n");
  3857. device->coopmat_support = false;
  3858. }
  3859. if (getenv("GGML_VK_DISABLE_BFLOAT16")) {
  3860. device->coopmat_bf16_support = false;
  3861. }
  3862. }
  3863. if (device->coopmat_support) {
  3864. device_extensions.push_back("VK_KHR_cooperative_matrix");
  3865. }
  3866. #if defined(VK_KHR_shader_bfloat16)
  3867. if (device->coopmat_bf16_support) {
  3868. device_extensions.push_back("VK_KHR_shader_bfloat16");
  3869. }
  3870. #endif
  3871. #endif
  3872. device->name = GGML_VK_NAME + std::to_string(idx);
  3873. device_create_info = {
  3874. vk::DeviceCreateFlags(),
  3875. device_queue_create_infos,
  3876. {},
  3877. device_extensions
  3878. };
  3879. device_create_info.setPNext(&device_features2);
  3880. device->device = device->physical_device.createDevice(device_create_info);
  3881. // Queues
  3882. ggml_vk_create_queue(device, device->compute_queue, compute_queue_family_index, 0, { vk::PipelineStageFlagBits::eComputeShader | vk::PipelineStageFlagBits::eTransfer }, false);
  3883. // Shaders
  3884. // Disable matmul tile sizes early if performance low or not supported
  3885. for (uint32_t i = 0; i < GGML_TYPE_COUNT; ++i) {
  3886. switch (device->vendor_id) {
  3887. #ifndef GGML_VULKAN_RUN_TESTS
  3888. case VK_VENDOR_ID_AMD:
  3889. case VK_VENDOR_ID_INTEL:
  3890. device->mul_mat_l[i] = false;
  3891. device->mul_mat_m[i] = true;
  3892. device->mul_mat_s[i] = true;
  3893. device->mul_mat_id_l[i] = false;
  3894. device->mul_mat_id_m[i] = true;
  3895. device->mul_mat_id_s[i] = true;
  3896. break;
  3897. case VK_VENDOR_ID_APPLE:
  3898. device->mul_mat_l[i] = false;
  3899. device->mul_mat_m[i] = true;
  3900. device->mul_mat_s[i] = false;
  3901. device->mul_mat_id_l[i] = false;
  3902. device->mul_mat_id_m[i] = true;
  3903. device->mul_mat_id_s[i] = false;
  3904. break;
  3905. #endif
  3906. default:
  3907. device->mul_mat_l[i] = true;
  3908. device->mul_mat_m[i] = true;
  3909. device->mul_mat_s[i] = true;
  3910. device->mul_mat_id_l[i] = true;
  3911. device->mul_mat_id_m[i] = true;
  3912. device->mul_mat_id_s[i] = true;
  3913. break;
  3914. }
  3915. }
  3916. std::vector<vk::DescriptorSetLayoutBinding> dsl_binding;
  3917. std::vector<vk::DescriptorBindingFlags> dsl_binding_flags;
  3918. for (uint32_t i = 0; i < MAX_PARAMETER_COUNT; i++) {
  3919. dsl_binding.push_back({i, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eCompute});
  3920. dsl_binding_flags.push_back({});
  3921. }
  3922. vk::DescriptorSetLayoutBindingFlagsCreateInfo dslbfci = { dsl_binding_flags };
  3923. vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_create_info(
  3924. {},
  3925. dsl_binding);
  3926. descriptor_set_layout_create_info.setPNext(&dslbfci);
  3927. device->dsl = device->device.createDescriptorSetLayout(descriptor_set_layout_create_info);
  3928. ggml_vk_load_shaders(device);
  3929. if (!device->single_queue) {
  3930. const uint32_t transfer_queue_index = compute_queue_family_index == transfer_queue_family_index ? 1 : 0;
  3931. ggml_vk_create_queue(device, device->transfer_queue, transfer_queue_family_index, transfer_queue_index, { vk::PipelineStageFlagBits::eTransfer }, true);
  3932. } else {
  3933. // TODO: Use pointer or reference to avoid copy
  3934. device->transfer_queue.copyFrom(device->compute_queue);
  3935. device->transfer_queue.cmd_pool.init(device, &device->transfer_queue);
  3936. }
  3937. device->buffer_type = {
  3938. /* .iface = */ ggml_backend_vk_buffer_type_interface,
  3939. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_vk_reg(), idx),
  3940. /* .context = */ new ggml_backend_vk_buffer_type_context{ device->name, device },
  3941. };
  3942. device->fence = device->device.createFence({});
  3943. device->idx = idx;
  3944. device->disable_fusion = getenv("GGML_VK_DISABLE_FUSION") != nullptr;
  3945. device->add_rms_fusion = !device->disable_fusion &&
  3946. device->subgroup_arithmetic &&
  3947. device->vendor_id != VK_VENDOR_ID_INTEL;
  3948. device->partials_binding_alignment =
  3949. std::max(4u, (uint32_t)device->properties.limits.minStorageBufferOffsetAlignment);
  3950. device->mmvq_mode = 0;
  3951. if (getenv("GGML_VK_DISABLE_MMVQ")) {
  3952. device->mmvq_mode = -1;
  3953. } else if (getenv("GGML_VK_FORCE_MMVQ")) {
  3954. device->mmvq_mode = 1;
  3955. }
  3956. return device;
  3957. }
  3958. return vk_instance.devices[idx];
  3959. }
  3960. static void ggml_vk_print_gpu_info(size_t idx) {
  3961. GGML_ASSERT(idx < vk_instance.device_indices.size());
  3962. size_t dev_num = vk_instance.device_indices[idx];
  3963. VK_LOG_DEBUG("ggml_vk_print_gpu_info(" << dev_num << ")");
  3964. GGML_ASSERT(vk_instance_initialized);
  3965. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  3966. if (dev_num >= devices.size()) {
  3967. std::cerr << "ggml_vulkan: Device with index " << dev_num << " does not exist." << std::endl;
  3968. throw std::runtime_error("Device not found");
  3969. }
  3970. vk::PhysicalDevice physical_device = devices[dev_num];
  3971. std::vector<vk::ExtensionProperties> ext_props = physical_device.enumerateDeviceExtensionProperties();
  3972. bool fp16_storage = false;
  3973. bool fp16_compute = false;
  3974. bool coopmat_support = false;
  3975. bool coopmat2_support = false;
  3976. bool integer_dot_product = false;
  3977. bool bfloat16_support = false;
  3978. for (auto properties : ext_props) {
  3979. if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) {
  3980. fp16_storage = true;
  3981. } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
  3982. fp16_compute = true;
  3983. #if defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
  3984. } else if (strcmp("VK_KHR_cooperative_matrix", properties.extensionName) == 0 &&
  3985. !getenv("GGML_VK_DISABLE_COOPMAT")) {
  3986. coopmat_support = true;
  3987. #endif
  3988. #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  3989. } else if (strcmp("VK_NV_cooperative_matrix2", properties.extensionName) == 0 &&
  3990. !getenv("GGML_VK_DISABLE_COOPMAT2")) {
  3991. coopmat2_support = true;
  3992. #endif
  3993. #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
  3994. } else if (strcmp("VK_KHR_shader_integer_dot_product", properties.extensionName) == 0 &&
  3995. !getenv("GGML_VK_DISABLE_INTEGER_DOT_PRODUCT")) {
  3996. integer_dot_product = true;
  3997. #endif
  3998. #if defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  3999. } else if (strcmp("VK_KHR_shader_bfloat16", properties.extensionName) == 0 &&
  4000. !getenv("GGML_VK_DISABLE_BFLOAT16")) {
  4001. bfloat16_support = true;
  4002. #endif
  4003. }
  4004. }
  4005. const vk_device_architecture device_architecture = get_device_architecture(physical_device);
  4006. const char* GGML_VK_DISABLE_F16 = getenv("GGML_VK_DISABLE_F16");
  4007. bool force_disable_f16 = GGML_VK_DISABLE_F16 != nullptr;
  4008. bool fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
  4009. vk::PhysicalDeviceProperties2 props2;
  4010. vk::PhysicalDeviceMaintenance3Properties props3;
  4011. vk::PhysicalDeviceSubgroupProperties subgroup_props;
  4012. vk::PhysicalDeviceDriverProperties driver_props;
  4013. vk::PhysicalDeviceShaderIntegerDotProductPropertiesKHR shader_integer_dot_product_props;
  4014. props2.pNext = &props3;
  4015. props3.pNext = &subgroup_props;
  4016. subgroup_props.pNext = &driver_props;
  4017. // Pointer to the last chain element
  4018. VkBaseOutStructure * last_struct = (VkBaseOutStructure *)&driver_props;
  4019. if (integer_dot_product) {
  4020. last_struct->pNext = (VkBaseOutStructure *)&shader_integer_dot_product_props;
  4021. last_struct = (VkBaseOutStructure *)&shader_integer_dot_product_props;
  4022. }
  4023. physical_device.getProperties2(&props2);
  4024. VkPhysicalDeviceFeatures2 device_features2;
  4025. device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
  4026. device_features2.pNext = nullptr;
  4027. VkPhysicalDeviceVulkan11Features vk11_features;
  4028. vk11_features.pNext = nullptr;
  4029. vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
  4030. device_features2.pNext = &vk11_features;
  4031. VkPhysicalDeviceVulkan12Features vk12_features;
  4032. vk12_features.pNext = nullptr;
  4033. vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
  4034. vk11_features.pNext = &vk12_features;
  4035. // Pointer to the last chain element
  4036. last_struct = (VkBaseOutStructure *)&vk12_features;
  4037. #if defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
  4038. VkPhysicalDeviceCooperativeMatrixFeaturesKHR coopmat_features;
  4039. coopmat_features.pNext = nullptr;
  4040. coopmat_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR;
  4041. coopmat_features.cooperativeMatrix = VK_FALSE;
  4042. if (coopmat_support) {
  4043. last_struct->pNext = (VkBaseOutStructure *)&coopmat_features;
  4044. last_struct = (VkBaseOutStructure *)&coopmat_features;
  4045. }
  4046. #endif
  4047. VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR shader_integer_dot_product_features {};
  4048. shader_integer_dot_product_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES_KHR;
  4049. if (integer_dot_product) {
  4050. last_struct->pNext = (VkBaseOutStructure *)&shader_integer_dot_product_features;
  4051. last_struct = (VkBaseOutStructure *)&shader_integer_dot_product_features;
  4052. }
  4053. #if defined(VK_KHR_shader_bfloat16)
  4054. VkPhysicalDeviceShaderBfloat16FeaturesKHR bfloat16_features {};
  4055. bfloat16_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_BFLOAT16_FEATURES_KHR;
  4056. if (bfloat16_support) {
  4057. last_struct->pNext = (VkBaseOutStructure *)&bfloat16_features;
  4058. last_struct = (VkBaseOutStructure *)&bfloat16_features;
  4059. }
  4060. #endif
  4061. vkGetPhysicalDeviceFeatures2(physical_device, &device_features2);
  4062. fp16 = fp16 && vk12_features.shaderFloat16;
  4063. #if defined(VK_KHR_shader_bfloat16)
  4064. bool bf16 = bfloat16_support && bfloat16_features.shaderBFloat16Type;
  4065. #else
  4066. bool bf16 = false;
  4067. #endif
  4068. uint32_t default_subgroup_size = get_subgroup_size("", device_architecture);
  4069. const size_t subgroup_size = (default_subgroup_size != 0) ? default_subgroup_size : subgroup_props.subgroupSize;
  4070. const bool uma = props2.properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
  4071. integer_dot_product = integer_dot_product
  4072. && shader_integer_dot_product_props.integerDotProduct4x8BitPackedSignedAccelerated
  4073. && shader_integer_dot_product_features.shaderIntegerDotProduct;
  4074. coopmat_support = coopmat_support
  4075. #if defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
  4076. && coopmat_features.cooperativeMatrix
  4077. #endif
  4078. && ggml_vk_khr_cooperative_matrix_support(props2.properties, driver_props, device_architecture);
  4079. std::string matrix_cores = coopmat2_support ? "NV_coopmat2" : coopmat_support ? "KHR_coopmat" : "none";
  4080. std::string device_name = props2.properties.deviceName.data();
  4081. GGML_LOG_DEBUG("ggml_vulkan: %zu = %s (%s) | uma: %d | fp16: %d | bf16: %d | warp size: %zu | shared memory: %d | int dot: %d | matrix cores: %s\n",
  4082. idx, device_name.c_str(), driver_props.driverName.data(), uma, fp16, bf16, subgroup_size,
  4083. props2.properties.limits.maxComputeSharedMemorySize, integer_dot_product, matrix_cores.c_str());
  4084. if (props2.properties.deviceType == vk::PhysicalDeviceType::eCpu) {
  4085. GGML_LOG_DEBUG("ggml_vulkan: Warning: Device type is CPU. This is probably not the device you want.\n");
  4086. }
  4087. }
  4088. static bool ggml_vk_instance_validation_ext_available();
  4089. static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions);
  4090. static bool ggml_vk_instance_debug_utils_ext_available(const std::vector<vk::ExtensionProperties> & instance_extensions);
  4091. static bool ggml_vk_device_is_supported(const vk::PhysicalDevice & vkdev);
  4092. static DispatchLoaderDynamic ggml_vk_default_dispatcher_instance;
  4093. DispatchLoaderDynamic & ggml_vk_default_dispatcher() {
  4094. return ggml_vk_default_dispatcher_instance;
  4095. }
  4096. static void ggml_vk_instance_init() {
  4097. if (vk_instance_initialized) {
  4098. return;
  4099. }
  4100. VK_LOG_DEBUG("ggml_vk_instance_init()");
  4101. // See https://github.com/KhronosGroup/Vulkan-Hpp?tab=readme-ov-file#extensions--per-device-function-pointers-
  4102. ggml_vk_default_dispatcher_instance.init(vkGetInstanceProcAddr);
  4103. uint32_t api_version = vk::enumerateInstanceVersion();
  4104. if (api_version < VK_API_VERSION_1_2) {
  4105. std::cerr << "ggml_vulkan: Error: Vulkan 1.2 required." << std::endl;
  4106. throw vk::SystemError(vk::Result::eErrorFeatureNotPresent, "Vulkan 1.2 required");
  4107. }
  4108. vk::ApplicationInfo app_info{ "ggml-vulkan", 1, nullptr, 0, api_version };
  4109. const std::vector<vk::ExtensionProperties> instance_extensions = vk::enumerateInstanceExtensionProperties();
  4110. const bool validation_ext = ggml_vk_instance_validation_ext_available();
  4111. #ifdef __APPLE__
  4112. const bool portability_enumeration_ext = ggml_vk_instance_portability_enumeration_ext_available(instance_extensions);
  4113. #endif
  4114. const bool debug_utils_ext = ggml_vk_instance_debug_utils_ext_available(instance_extensions) && getenv("GGML_VK_DEBUG_MARKERS") != nullptr;
  4115. std::vector<const char*> layers;
  4116. if (validation_ext) {
  4117. layers.push_back("VK_LAYER_KHRONOS_validation");
  4118. }
  4119. std::vector<const char*> extensions;
  4120. if (validation_ext) {
  4121. extensions.push_back("VK_EXT_validation_features");
  4122. }
  4123. #ifdef __APPLE__
  4124. if (portability_enumeration_ext) {
  4125. extensions.push_back("VK_KHR_portability_enumeration");
  4126. }
  4127. #endif
  4128. if (debug_utils_ext) {
  4129. extensions.push_back("VK_EXT_debug_utils");
  4130. }
  4131. vk::InstanceCreateInfo instance_create_info(vk::InstanceCreateFlags{}, &app_info, layers, extensions);
  4132. #ifdef __APPLE__
  4133. if (portability_enumeration_ext) {
  4134. instance_create_info.flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
  4135. }
  4136. #endif
  4137. std::vector<vk::ValidationFeatureEnableEXT> features_enable;
  4138. vk::ValidationFeaturesEXT validation_features;
  4139. if (validation_ext) {
  4140. features_enable = { vk::ValidationFeatureEnableEXT::eBestPractices };
  4141. validation_features = {
  4142. features_enable,
  4143. {},
  4144. };
  4145. validation_features.setPNext(nullptr);
  4146. instance_create_info.setPNext(&validation_features);
  4147. GGML_LOG_DEBUG("ggml_vulkan: Validation layers enabled\n");
  4148. }
  4149. vk_instance.instance = vk::createInstance(instance_create_info);
  4150. vk_instance_initialized = true;
  4151. if (debug_utils_ext) {
  4152. vk_instance.debug_utils_support = true;
  4153. vk_instance.pfn_vkSetDebugUtilsObjectNameEXT = (PFN_vkSetDebugUtilsObjectNameEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkSetDebugUtilsObjectNameEXT");
  4154. vk_instance.pfn_vkQueueBeginDebugUtilsLabelEXT = (PFN_vkQueueBeginDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkQueueBeginDebugUtilsLabelEXT");
  4155. vk_instance.pfn_vkQueueEndDebugUtilsLabelEXT = (PFN_vkQueueEndDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkQueueEndDebugUtilsLabelEXT");
  4156. vk_instance.pfn_vkCmdBeginDebugUtilsLabelEXT = (PFN_vkCmdBeginDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkCmdBeginDebugUtilsLabelEXT");
  4157. vk_instance.pfn_vkCmdEndDebugUtilsLabelEXT = (PFN_vkCmdEndDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkCmdEndDebugUtilsLabelEXT");
  4158. vk_instance.pfn_vkCmdInsertDebugUtilsLabelEXT = (PFN_vkCmdInsertDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkCmdInsertDebugUtilsLabelEXT");
  4159. }
  4160. vk_perf_logger_enabled = getenv("GGML_VK_PERF_LOGGER") != nullptr;
  4161. // See https://github.com/KhronosGroup/Vulkan-Hpp?tab=readme-ov-file#extensions--per-device-function-pointers-
  4162. VULKAN_HPP_DEFAULT_DISPATCHER.init(vk_instance.instance);
  4163. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  4164. // Emulate behavior of CUDA_VISIBLE_DEVICES for Vulkan
  4165. char * devices_env = getenv("GGML_VK_VISIBLE_DEVICES");
  4166. if (devices_env != nullptr) {
  4167. size_t num_available_devices = devices.size();
  4168. std::string devices(devices_env);
  4169. std::replace(devices.begin(), devices.end(), ',', ' ');
  4170. std::stringstream ss(devices);
  4171. size_t tmp;
  4172. while (ss >> tmp) {
  4173. if(tmp >= num_available_devices) {
  4174. std::cerr << "ggml_vulkan: Invalid device index " << tmp << " in GGML_VK_VISIBLE_DEVICES." << std::endl;
  4175. throw std::runtime_error("Invalid Vulkan device index");
  4176. }
  4177. vk_instance.device_indices.push_back(tmp);
  4178. }
  4179. } else {
  4180. // If no vulkan devices are found, return early
  4181. if (devices.empty()) {
  4182. GGML_LOG_INFO("ggml_vulkan: No devices found.\n");
  4183. return;
  4184. }
  4185. // Default to using all dedicated GPUs
  4186. for (size_t i = 0; i < devices.size(); i++) {
  4187. vk::PhysicalDeviceProperties2 new_props;
  4188. vk::PhysicalDeviceDriverProperties new_driver;
  4189. vk::PhysicalDeviceIDProperties new_id;
  4190. new_props.pNext = &new_driver;
  4191. new_driver.pNext = &new_id;
  4192. devices[i].getProperties2(&new_props);
  4193. if ((new_props.properties.deviceType == vk::PhysicalDeviceType::eDiscreteGpu || new_props.properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu) && ggml_vk_device_is_supported(devices[i])) {
  4194. // Check if there are two physical devices corresponding to the same GPU
  4195. auto old_device = std::find_if(
  4196. vk_instance.device_indices.begin(),
  4197. vk_instance.device_indices.end(),
  4198. [&devices, &new_id](const size_t k){
  4199. vk::PhysicalDeviceProperties2 old_props;
  4200. vk::PhysicalDeviceIDProperties old_id;
  4201. old_props.pNext = &old_id;
  4202. devices[k].getProperties2(&old_props);
  4203. bool equals = std::equal(std::begin(old_id.deviceUUID), std::end(old_id.deviceUUID), std::begin(new_id.deviceUUID));
  4204. equals = equals || (
  4205. old_id.deviceLUIDValid && new_id.deviceLUIDValid &&
  4206. std::equal(std::begin(old_id.deviceLUID), std::end(old_id.deviceLUID), std::begin(new_id.deviceLUID))
  4207. );
  4208. return equals;
  4209. }
  4210. );
  4211. if (old_device == vk_instance.device_indices.end()) {
  4212. vk_instance.device_indices.push_back(i);
  4213. } else {
  4214. // There can be two physical devices corresponding to the same GPU if there are 2 different drivers
  4215. // This can cause error when splitting layers aross the devices, need to keep only 1
  4216. VK_LOG_DEBUG("Device " << i << " and device " << *old_device << " have the same deviceUUID");
  4217. vk::PhysicalDeviceProperties2 old_props;
  4218. vk::PhysicalDeviceDriverProperties old_driver;
  4219. old_props.pNext = &old_driver;
  4220. devices[*old_device].getProperties2(&old_props);
  4221. std::map<vk::DriverId, int> driver_priorities {};
  4222. int old_priority = std::numeric_limits<int>::max();
  4223. int new_priority = std::numeric_limits<int>::max();
  4224. // Check https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkDriverId.html for the list of driver id
  4225. // Smaller number -> higher priority
  4226. switch (old_props.properties.vendorID) {
  4227. case VK_VENDOR_ID_AMD:
  4228. driver_priorities[vk::DriverId::eMesaRadv] = 1;
  4229. driver_priorities[vk::DriverId::eAmdOpenSource] = 2;
  4230. driver_priorities[vk::DriverId::eAmdProprietary] = 3;
  4231. break;
  4232. case VK_VENDOR_ID_INTEL:
  4233. driver_priorities[vk::DriverId::eIntelOpenSourceMESA] = 1;
  4234. driver_priorities[vk::DriverId::eIntelProprietaryWindows] = 2;
  4235. break;
  4236. case VK_VENDOR_ID_NVIDIA:
  4237. driver_priorities[vk::DriverId::eNvidiaProprietary] = 1;
  4238. #if defined(VK_API_VERSION_1_3) && VK_HEADER_VERSION >= 235
  4239. driver_priorities[vk::DriverId::eMesaNvk] = 2;
  4240. #endif
  4241. break;
  4242. }
  4243. driver_priorities[vk::DriverId::eMesaDozen] = 100;
  4244. if (driver_priorities.count(old_driver.driverID)) {
  4245. old_priority = driver_priorities[old_driver.driverID];
  4246. }
  4247. if (driver_priorities.count(new_driver.driverID)) {
  4248. new_priority = driver_priorities[new_driver.driverID];
  4249. }
  4250. if (new_priority < old_priority) {
  4251. auto r = std::remove(vk_instance.device_indices.begin(), vk_instance.device_indices.end(), *old_device);
  4252. vk_instance.device_indices.erase(r, vk_instance.device_indices.end());
  4253. vk_instance.device_indices.push_back(i);
  4254. VK_LOG_DEBUG("Prioritize device " << i << " driver " << new_driver.driverName << " over device " << *old_device << " driver " << old_driver.driverName);
  4255. }
  4256. else {
  4257. VK_LOG_DEBUG("Prioritize device " << *old_device << " driver " << old_driver.driverName << " over device " << i << " driver " << new_driver.driverName << std::endl);
  4258. }
  4259. }
  4260. }
  4261. }
  4262. // If no GPUs found, fall back to the first non-CPU device.
  4263. // If only CPU devices are available, return without devices.
  4264. if (vk_instance.device_indices.empty()) {
  4265. for (size_t i = 0; i < devices.size(); i++) {
  4266. if (devices[i].getProperties().deviceType != vk::PhysicalDeviceType::eCpu) {
  4267. vk_instance.device_indices.push_back(i);
  4268. break;
  4269. }
  4270. }
  4271. }
  4272. if (vk_instance.device_indices.empty()) {
  4273. GGML_LOG_INFO("ggml_vulkan: No devices found.\n");
  4274. return;
  4275. }
  4276. }
  4277. GGML_LOG_DEBUG("ggml_vulkan: Found %zu Vulkan devices:\n", vk_instance.device_indices.size());
  4278. for (size_t i = 0; i < vk_instance.device_indices.size(); i++) {
  4279. vk::PhysicalDevice vkdev = devices[vk_instance.device_indices[i]];
  4280. std::vector<vk::ExtensionProperties> extensionprops = vkdev.enumerateDeviceExtensionProperties();
  4281. bool membudget_supported = false;
  4282. for (const auto & ext : extensionprops) {
  4283. if (strcmp(VK_EXT_MEMORY_BUDGET_EXTENSION_NAME, ext.extensionName) == 0) {
  4284. membudget_supported = true;
  4285. break;
  4286. }
  4287. }
  4288. vk_instance.device_supports_membudget.push_back(membudget_supported);
  4289. ggml_vk_print_gpu_info(i);
  4290. }
  4291. }
  4292. static void ggml_vk_init(ggml_backend_vk_context * ctx, size_t idx) {
  4293. VK_LOG_DEBUG("ggml_vk_init(" << ctx->name << ", " << idx << ")");
  4294. ggml_vk_instance_init();
  4295. GGML_ASSERT(idx < vk_instance.device_indices.size());
  4296. ctx->name = GGML_VK_NAME + std::to_string(idx);
  4297. ctx->device = ggml_vk_get_device(idx);
  4298. ctx->semaphore_idx = 0;
  4299. ctx->event_idx = 0;
  4300. ctx->prealloc_size_x = 0;
  4301. ctx->prealloc_size_y = 0;
  4302. ctx->prealloc_size_split_k = 0;
  4303. ctx->fence = ctx->device->device.createFence({});
  4304. ctx->almost_ready_fence = ctx->device->device.createFence({});
  4305. ctx->compute_cmd_pool.init(ctx->device, &ctx->device->compute_queue);
  4306. ctx->transfer_cmd_pool.init(ctx->device, &ctx->device->transfer_queue);
  4307. #ifdef GGML_VULKAN_CHECK_RESULTS
  4308. const char* skip_checks = getenv("GGML_VULKAN_SKIP_CHECKS");
  4309. vk_skip_checks = (skip_checks == NULL ? 0 : atoi(skip_checks));
  4310. const char* output_tensor = getenv("GGML_VULKAN_OUTPUT_TENSOR");
  4311. vk_output_tensor = (output_tensor == NULL ? 0 : atoi(output_tensor));
  4312. #endif
  4313. }
  4314. static vk_pipeline ggml_vk_get_to_fp16(ggml_backend_vk_context * ctx, ggml_type type) {
  4315. VK_LOG_DEBUG("ggml_vk_get_to_fp16()");
  4316. switch (type) {
  4317. case GGML_TYPE_F32:
  4318. case GGML_TYPE_Q4_0:
  4319. case GGML_TYPE_Q4_1:
  4320. case GGML_TYPE_Q5_0:
  4321. case GGML_TYPE_Q5_1:
  4322. case GGML_TYPE_Q8_0:
  4323. case GGML_TYPE_Q2_K:
  4324. case GGML_TYPE_Q3_K:
  4325. case GGML_TYPE_Q4_K:
  4326. case GGML_TYPE_Q5_K:
  4327. case GGML_TYPE_Q6_K:
  4328. case GGML_TYPE_IQ1_S:
  4329. case GGML_TYPE_IQ1_M:
  4330. case GGML_TYPE_IQ2_XXS:
  4331. case GGML_TYPE_IQ2_XS:
  4332. case GGML_TYPE_IQ2_S:
  4333. case GGML_TYPE_IQ3_XXS:
  4334. case GGML_TYPE_IQ3_S:
  4335. case GGML_TYPE_IQ4_XS:
  4336. case GGML_TYPE_IQ4_NL:
  4337. case GGML_TYPE_MXFP4:
  4338. break;
  4339. default:
  4340. return nullptr;
  4341. }
  4342. return ctx->device->pipeline_dequant[type];
  4343. }
  4344. static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type, ggml_prec prec) {
  4345. VK_LOG_DEBUG("ggml_vk_get_mul_mat_mat_pipeline(" << ggml_type_name(src0_type) << ", " << ggml_type_name(src1_type) << ", " << prec << ")");
  4346. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  4347. return ctx->device->pipeline_matmul_f32;
  4348. }
  4349. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
  4350. return ctx->device->pipeline_matmul_f32_f16;
  4351. }
  4352. if (src0_type == GGML_TYPE_BF16 && src1_type == GGML_TYPE_BF16) {
  4353. return ctx->device->pipeline_matmul_bf16;
  4354. }
  4355. if (prec == GGML_PREC_DEFAULT && ctx->device->fp16 && !(ctx->device->coopmat_support && !ctx->device->coopmat_acc_f16_support)) {
  4356. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  4357. return ctx->device->pipeline_matmul_f16_f32.f16acc;
  4358. }
  4359. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  4360. return ctx->device->pipeline_matmul_f16.f16acc;
  4361. }
  4362. } else {
  4363. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  4364. return ctx->device->pipeline_matmul_f16_f32.f32acc;
  4365. }
  4366. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  4367. return ctx->device->pipeline_matmul_f16.f32acc;
  4368. }
  4369. }
  4370. // MMQ
  4371. if (src1_type == GGML_TYPE_Q8_1) {
  4372. vk_matmul_pipeline pipelines = ctx->device->pipeline_dequant_mul_mat_mat_q8_1[src0_type].f32acc;
  4373. if (pipelines->is_empty()) {
  4374. return nullptr;
  4375. }
  4376. return pipelines;
  4377. }
  4378. if (src1_type != GGML_TYPE_F32 && !ctx->device->coopmat2) {
  4379. return nullptr;
  4380. }
  4381. switch (src0_type) {
  4382. case GGML_TYPE_Q4_0:
  4383. case GGML_TYPE_Q4_1:
  4384. case GGML_TYPE_Q5_0:
  4385. case GGML_TYPE_Q5_1:
  4386. case GGML_TYPE_Q8_0:
  4387. case GGML_TYPE_Q2_K:
  4388. case GGML_TYPE_Q3_K:
  4389. case GGML_TYPE_Q4_K:
  4390. case GGML_TYPE_Q5_K:
  4391. case GGML_TYPE_Q6_K:
  4392. case GGML_TYPE_IQ1_S:
  4393. case GGML_TYPE_IQ1_M:
  4394. case GGML_TYPE_IQ2_XXS:
  4395. case GGML_TYPE_IQ2_XS:
  4396. case GGML_TYPE_IQ2_S:
  4397. case GGML_TYPE_IQ3_XXS:
  4398. case GGML_TYPE_IQ3_S:
  4399. case GGML_TYPE_IQ4_XS:
  4400. case GGML_TYPE_IQ4_NL:
  4401. case GGML_TYPE_MXFP4:
  4402. break;
  4403. default:
  4404. return nullptr;
  4405. }
  4406. if (ctx->device->coopmat2) {
  4407. assert(src1_type == GGML_TYPE_F16);
  4408. return prec == GGML_PREC_DEFAULT ? ctx->device->pipeline_dequant_mul_mat_mat_f16[src0_type].f16acc : ctx->device->pipeline_dequant_mul_mat_mat_f16[src0_type].f32acc;
  4409. }
  4410. if (ctx->device->coopmat_support) {
  4411. return (ctx->device->fp16 && ctx->device->coopmat_acc_f16_support && prec == GGML_PREC_DEFAULT) ? ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f16acc : ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f32acc;
  4412. }
  4413. return (ctx->device->fp16 && prec == GGML_PREC_DEFAULT) ? ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f16acc : ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f32acc;
  4414. }
  4415. static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type, uint32_t num_cols, uint32_t m, uint32_t k) {
  4416. VK_LOG_DEBUG("ggml_vk_get_dequantize_mul_mat_vec()");
  4417. GGML_ASSERT(b_type == GGML_TYPE_F32 || b_type == GGML_TYPE_F16 || b_type == GGML_TYPE_Q8_1);
  4418. GGML_ASSERT(num_cols >= 1 && num_cols <= mul_mat_vec_max_cols);
  4419. if (b_type == GGML_TYPE_Q8_1) {
  4420. switch (a_type) {
  4421. case GGML_TYPE_Q4_0:
  4422. case GGML_TYPE_Q4_1:
  4423. case GGML_TYPE_Q5_0:
  4424. case GGML_TYPE_Q5_1:
  4425. case GGML_TYPE_Q8_0:
  4426. break;
  4427. default:
  4428. return nullptr;
  4429. }
  4430. }
  4431. switch (a_type) {
  4432. case GGML_TYPE_F32:
  4433. case GGML_TYPE_F16:
  4434. case GGML_TYPE_BF16:
  4435. case GGML_TYPE_Q4_0:
  4436. case GGML_TYPE_Q4_1:
  4437. case GGML_TYPE_Q5_0:
  4438. case GGML_TYPE_Q5_1:
  4439. case GGML_TYPE_Q8_0:
  4440. case GGML_TYPE_Q2_K:
  4441. case GGML_TYPE_Q3_K:
  4442. case GGML_TYPE_Q4_K:
  4443. case GGML_TYPE_Q5_K:
  4444. case GGML_TYPE_Q6_K:
  4445. case GGML_TYPE_IQ1_S:
  4446. case GGML_TYPE_IQ1_M:
  4447. case GGML_TYPE_IQ2_XXS:
  4448. case GGML_TYPE_IQ2_XS:
  4449. case GGML_TYPE_IQ2_S:
  4450. case GGML_TYPE_IQ3_XXS:
  4451. case GGML_TYPE_IQ3_S:
  4452. case GGML_TYPE_IQ4_XS:
  4453. case GGML_TYPE_IQ4_NL:
  4454. case GGML_TYPE_MXFP4:
  4455. break;
  4456. default:
  4457. return nullptr;
  4458. }
  4459. // heuristic to choose workgroup size
  4460. uint32_t dmmv_wg = DMMV_WG_SIZE_SUBGROUP;
  4461. if ((ctx->device->vendor_id == VK_VENDOR_ID_NVIDIA && ctx->device->architecture != vk_device_architecture::NVIDIA_PRE_TURING) || ctx->device->vendor_id == VK_VENDOR_ID_INTEL) {
  4462. // Prefer larger workgroups when M is small, to spread the work out more
  4463. // and keep more SMs busy.
  4464. // q6_k seems to prefer small workgroup size even for "medium" values of M.
  4465. if (a_type == GGML_TYPE_Q6_K) {
  4466. if (m < 4096 && k >= 1024) {
  4467. dmmv_wg = DMMV_WG_SIZE_LARGE;
  4468. }
  4469. } else {
  4470. if (m <= 8192 && k >= 1024) {
  4471. dmmv_wg = DMMV_WG_SIZE_LARGE;
  4472. }
  4473. }
  4474. }
  4475. if (b_type == GGML_TYPE_Q8_1) {
  4476. if (ctx->device->vendor_id == VK_VENDOR_ID_INTEL) {
  4477. dmmv_wg = DMMV_WG_SIZE_SUBGROUP;
  4478. }
  4479. return ctx->device->pipeline_dequant_mul_mat_vec_q8_1_f32[dmmv_wg][a_type][num_cols-1];
  4480. }
  4481. return b_type == GGML_TYPE_F32 ? ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[dmmv_wg][a_type][num_cols-1] : ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[dmmv_wg][a_type][num_cols-1];
  4482. }
  4483. static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_id_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type, ggml_prec prec) {
  4484. VK_LOG_DEBUG("ggml_vk_get_mul_mat_mat_id_pipeline()");
  4485. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  4486. return ctx->device->pipeline_matmul_id_f32;
  4487. }
  4488. if (src0_type == GGML_TYPE_BF16 && src1_type == GGML_TYPE_BF16) {
  4489. return ctx->device->pipeline_matmul_id_bf16;
  4490. }
  4491. if (prec == GGML_PREC_DEFAULT && ctx->device->fp16 && !(ctx->device->coopmat_support && !ctx->device->coopmat_acc_f16_support)) {
  4492. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  4493. return ctx->device->pipeline_matmul_id_f16_f32.f16acc;
  4494. }
  4495. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  4496. return ctx->device->pipeline_matmul_id_f16.f16acc;
  4497. }
  4498. } else {
  4499. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  4500. return ctx->device->pipeline_matmul_id_f16_f32.f32acc;
  4501. }
  4502. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  4503. return ctx->device->pipeline_matmul_id_f16.f32acc;
  4504. }
  4505. }
  4506. // MMQ
  4507. if (src1_type == GGML_TYPE_Q8_1) {
  4508. vk_matmul_pipeline pipelines = ctx->device->pipeline_dequant_mul_mat_mat_id_q8_1[src0_type].f32acc;
  4509. if (pipelines->is_empty()) {
  4510. return nullptr;
  4511. }
  4512. return pipelines;
  4513. }
  4514. GGML_ASSERT(src1_type == GGML_TYPE_F32 || (ctx->device->coopmat2 && src1_type == GGML_TYPE_F16));
  4515. switch (src0_type) {
  4516. case GGML_TYPE_Q4_0:
  4517. case GGML_TYPE_Q4_1:
  4518. case GGML_TYPE_Q5_0:
  4519. case GGML_TYPE_Q5_1:
  4520. case GGML_TYPE_Q8_0:
  4521. case GGML_TYPE_Q2_K:
  4522. case GGML_TYPE_Q3_K:
  4523. case GGML_TYPE_Q4_K:
  4524. case GGML_TYPE_Q5_K:
  4525. case GGML_TYPE_Q6_K:
  4526. case GGML_TYPE_IQ1_S:
  4527. case GGML_TYPE_IQ1_M:
  4528. case GGML_TYPE_IQ2_XXS:
  4529. case GGML_TYPE_IQ2_XS:
  4530. case GGML_TYPE_IQ2_S:
  4531. case GGML_TYPE_IQ3_XXS:
  4532. case GGML_TYPE_IQ3_S:
  4533. case GGML_TYPE_IQ4_XS:
  4534. case GGML_TYPE_IQ4_NL:
  4535. case GGML_TYPE_MXFP4:
  4536. break;
  4537. default:
  4538. return nullptr;
  4539. }
  4540. vk_matmul_pipeline2& mmp = ctx->device->pipeline_dequant_mul_mat_mat_id[src0_type];
  4541. // XXX TODO 'prec' is not actually allowed in mul_mat_id.
  4542. bool prefer_fp16acc = ctx->device->fp16 /*&& prec == GGML_PREC_DEFAULT*/;
  4543. bool support_fp16acc = !mmp.f16acc->is_empty();
  4544. bool support_fp32acc = !mmp.f32acc->is_empty();
  4545. if (support_fp16acc && (prefer_fp16acc || !support_fp32acc)) {
  4546. return mmp.f16acc;
  4547. } else {
  4548. GGML_ASSERT(support_fp32acc);
  4549. return mmp.f32acc;
  4550. }
  4551. }
  4552. static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec_id(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type) {
  4553. VK_LOG_DEBUG("ggml_vk_get_dequantize_mul_mat_vec_id()");
  4554. GGML_ASSERT(b_type == GGML_TYPE_F32);
  4555. switch (a_type) {
  4556. case GGML_TYPE_F32:
  4557. case GGML_TYPE_F16:
  4558. case GGML_TYPE_BF16:
  4559. case GGML_TYPE_Q4_0:
  4560. case GGML_TYPE_Q4_1:
  4561. case GGML_TYPE_Q5_0:
  4562. case GGML_TYPE_Q5_1:
  4563. case GGML_TYPE_Q8_0:
  4564. case GGML_TYPE_Q2_K:
  4565. case GGML_TYPE_Q3_K:
  4566. case GGML_TYPE_Q4_K:
  4567. case GGML_TYPE_Q5_K:
  4568. case GGML_TYPE_Q6_K:
  4569. case GGML_TYPE_IQ1_S:
  4570. case GGML_TYPE_IQ1_M:
  4571. case GGML_TYPE_IQ2_XXS:
  4572. case GGML_TYPE_IQ2_XS:
  4573. case GGML_TYPE_IQ2_S:
  4574. case GGML_TYPE_IQ3_XXS:
  4575. case GGML_TYPE_IQ3_S:
  4576. case GGML_TYPE_IQ4_XS:
  4577. case GGML_TYPE_IQ4_NL:
  4578. case GGML_TYPE_MXFP4:
  4579. break;
  4580. default:
  4581. return nullptr;
  4582. }
  4583. return ctx->device->pipeline_dequant_mul_mat_vec_id_f32[a_type];
  4584. }
  4585. static void * ggml_vk_host_malloc(vk_device& device, size_t size) {
  4586. VK_LOG_MEMORY("ggml_vk_host_malloc(" << size << ")");
  4587. vk_buffer buf = ggml_vk_create_buffer(device, size,
  4588. {vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  4589. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent});
  4590. if(!(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible)) {
  4591. fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory\n",
  4592. size/1024.0/1024.0);
  4593. device->device.freeMemory(buf->device_memory);
  4594. device->device.destroyBuffer(buf->buffer);
  4595. return nullptr;
  4596. }
  4597. std::lock_guard<std::recursive_mutex> guard(device->mutex);
  4598. device->pinned_memory.push_back(std::make_tuple(buf->ptr, size, buf));
  4599. return buf->ptr;
  4600. }
  4601. static void ggml_vk_host_free(vk_device& device, void* ptr) {
  4602. if (ptr == nullptr) {
  4603. return;
  4604. }
  4605. VK_LOG_MEMORY("ggml_vk_host_free(" << ptr << ")");
  4606. std::lock_guard<std::recursive_mutex> guard(device->mutex);
  4607. vk_buffer buf;
  4608. size_t index;
  4609. for (size_t i = 0; i < device->pinned_memory.size(); i++) {
  4610. const uint8_t* addr = (const uint8_t*) std::get<0>(device->pinned_memory[i]);
  4611. const uint8_t* endr = addr + std::get<1>(device->pinned_memory[i]);
  4612. if (ptr >= addr && ptr < endr) {
  4613. buf = std::get<2>(device->pinned_memory[i]);
  4614. index = i;
  4615. break;
  4616. }
  4617. }
  4618. if (buf == nullptr) {
  4619. fprintf(stderr, "WARNING: failed to free pinned memory: memory not in map\n");
  4620. return;
  4621. }
  4622. ggml_vk_destroy_buffer(buf);
  4623. device->pinned_memory.erase(device->pinned_memory.begin() + index);
  4624. }
  4625. static void ggml_vk_host_get(vk_device& device, const void * ptr, vk_buffer& buf, size_t& buf_offset) {
  4626. std::lock_guard<std::recursive_mutex> guard(device->mutex);
  4627. buf = nullptr;
  4628. buf_offset = 0;
  4629. for (size_t i = 0; i < device->pinned_memory.size(); i++) {
  4630. const uint8_t* addr = (const uint8_t*) std::get<0>(device->pinned_memory[i]);
  4631. const uint8_t* endr = addr + std::get<1>(device->pinned_memory[i]);
  4632. if (ptr >= addr && ptr < endr) {
  4633. buf = std::get<2>(device->pinned_memory[i]);
  4634. buf_offset = ((const uint8_t *)ptr) - addr;
  4635. break;
  4636. }
  4637. }
  4638. }
  4639. static vk_submission ggml_vk_begin_submission(vk_device& device, vk_command_pool& p, bool one_time = true) {
  4640. vk_submission s;
  4641. s.buffer = ggml_vk_create_cmd_buffer(device, p);
  4642. if (one_time) {
  4643. s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
  4644. } else {
  4645. s.buffer.begin({ vk::CommandBufferUsageFlags{} });
  4646. }
  4647. return s;
  4648. }
  4649. template <typename T> size_t push_constant_size(const T &t) {
  4650. static_assert(std::is_class<T>::value, "T must be a struct/class");
  4651. GGML_UNUSED(t);
  4652. return sizeof(T);
  4653. }
  4654. template <typename T> size_t push_constant_size(const std::vector<T> &t) {
  4655. GGML_UNUSED(t);
  4656. return sizeof(T) * t.size();
  4657. }
  4658. template <typename T, uint32_t N> size_t push_constant_size(const std::array<T, N> &t) {
  4659. GGML_UNUSED(t);
  4660. return sizeof(T) * N;
  4661. }
  4662. template <typename T> const T *push_constant_data(const T &t) {
  4663. static_assert(std::is_class<T>::value, "T must be a struct/class");
  4664. return &t;
  4665. }
  4666. template <typename T> const T *push_constant_data(const std::vector<T> &t) {
  4667. return t.data();
  4668. }
  4669. template <typename T, uint32_t N> const T *push_constant_data(const std::array<T, N> &t) {
  4670. return t.data();
  4671. }
  4672. template <typename T>
  4673. static void ggml_vk_dispatch_pipeline(ggml_backend_vk_context* ctx, vk_context& subctx, vk_pipeline& pipeline, std::initializer_list<vk::DescriptorBufferInfo> const& descriptor_buffer_infos, const T &push_constants, std::array<uint32_t, 3> elements) {
  4674. const uint32_t wg0 = CEIL_DIV(elements[0], pipeline->wg_denoms[0]);
  4675. const uint32_t wg1 = CEIL_DIV(elements[1], pipeline->wg_denoms[1]);
  4676. const uint32_t wg2 = CEIL_DIV(elements[2], pipeline->wg_denoms[2]);
  4677. VK_LOG_DEBUG("ggml_vk_dispatch_pipeline(" << pipeline->name << ", {";
  4678. for (auto& buffer : descriptor_buffer_infos) {
  4679. std::cerr << "(" << buffer.buffer << ", " << buffer.offset << ", " << buffer.range << "), ";
  4680. }
  4681. std::cerr << "}, (" << wg0 << "," << wg1 << "," << wg2 << "))");
  4682. GGML_ASSERT(ctx->descriptor_set_idx < ctx->descriptor_sets.size());
  4683. GGML_ASSERT(descriptor_buffer_infos.size() <= MAX_PARAMETER_COUNT);
  4684. GGML_ASSERT(pipeline->parameter_count == descriptor_buffer_infos.size());
  4685. vk::DescriptorSet& descriptor_set = ctx->descriptor_sets[ctx->descriptor_set_idx++];
  4686. vk::WriteDescriptorSet write_descriptor_set{ descriptor_set, 0, 0, pipeline->parameter_count, vk::DescriptorType::eStorageBuffer, nullptr, descriptor_buffer_infos.begin() };
  4687. ctx->device->device.updateDescriptorSets({ write_descriptor_set }, {});
  4688. subctx->s->buffer.pushConstants(pipeline->layout, vk::ShaderStageFlagBits::eCompute, 0, push_constant_size(push_constants), push_constant_data(push_constants));
  4689. subctx->s->buffer.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->pipeline);
  4690. subctx->s->buffer.bindDescriptorSets(vk::PipelineBindPoint::eCompute,
  4691. pipeline->layout,
  4692. 0,
  4693. { descriptor_set },
  4694. {});
  4695. subctx->s->buffer.dispatch(wg0, wg1, wg2);
  4696. }
  4697. static void ggml_vk_end_submission(vk_submission& s, std::vector<vk_semaphore> wait_semaphores, std::vector<vk_semaphore> signal_semaphores) {
  4698. s.buffer.end();
  4699. s.wait_semaphores = std::move(wait_semaphores);
  4700. s.signal_semaphores = std::move(signal_semaphores);
  4701. }
  4702. static void ggml_vk_ctx_end(vk_context& ctx) {
  4703. VK_LOG_DEBUG("ggml_vk_ctx_end(" << ctx << ", " << ctx->seqs.size() << ")");
  4704. if (ctx->s == nullptr) {
  4705. return;
  4706. }
  4707. ctx->s->buffer.end();
  4708. ctx->s = nullptr;
  4709. }
  4710. static void ggml_vk_ctx_begin(vk_device& device, vk_context& subctx) {
  4711. VK_LOG_DEBUG("ggml_vk_ctx_begin(" << device->name << ")");
  4712. if (subctx->s != nullptr) {
  4713. ggml_vk_ctx_end(subctx);
  4714. }
  4715. subctx->seqs.push_back({ ggml_vk_begin_submission(device, *subctx->p) });
  4716. subctx->s = subctx->seqs[subctx->seqs.size() - 1].data();
  4717. }
  4718. static size_t ggml_vk_align_size(size_t width, size_t align) {
  4719. VK_LOG_DEBUG("ggml_vk_align_size(" << width << ", " << align << ")");
  4720. return CEIL_DIV(width, align) * align;
  4721. }
  4722. static void deferred_memcpy(void * dst, const void * src, size_t size, std::vector<vk_staging_memcpy>* memcpys = nullptr) {
  4723. if (memcpys == nullptr) {
  4724. memcpy(dst, src, size);
  4725. } else {
  4726. memcpys->emplace_back(dst, src, size);
  4727. }
  4728. }
  4729. static void deferred_memset(void * dst, uint32_t val, size_t size, std::vector<vk_staging_memset>* memsets = nullptr) {
  4730. if (memsets == nullptr) {
  4731. memset(dst, val, size);
  4732. } else {
  4733. memsets->emplace_back(dst, val, size);
  4734. }
  4735. }
  4736. static void ggml_vk_ensure_sync_staging_buffer(vk_device& device, size_t size) {
  4737. if (device->sync_staging == nullptr || device->sync_staging->size < size) {
  4738. VK_LOG_MEMORY("ggml_vk_ensure_sync_staging_buffer(" << size << ")");
  4739. ggml_vk_destroy_buffer(device->sync_staging);
  4740. device->sync_staging = ggml_vk_create_buffer_check(device, size,
  4741. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  4742. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  4743. }
  4744. }
  4745. static void ggml_vk_buffer_write_nc_async(ggml_backend_vk_context * ctx, vk_context& subctx, vk_buffer& dst, size_t offset, const ggml_tensor * tensor, bool sync_staging = false) {
  4746. VK_LOG_DEBUG("ggml_vk_buffer_write_nc_async(" << tensor << ")");
  4747. GGML_ASSERT(!ggml_is_contiguous(tensor));
  4748. // Buffer is already mapped
  4749. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  4750. std::cerr << "ggml_vulkan: buffer_write_nc_async dst buffer is host_visible. Use synchronous write." << std::endl;
  4751. GGML_ABORT("fatal error");
  4752. }
  4753. // Check if src is pinned memory
  4754. vk_buffer buf = nullptr;
  4755. size_t buf_offset = 0;
  4756. ggml_vk_host_get(ctx->device, tensor->data, buf, buf_offset);
  4757. const uint64_t ne0 = tensor->ne[0];
  4758. const uint64_t ne1 = tensor->ne[1];
  4759. const uint64_t ne2 = tensor->ne[2];
  4760. const uint64_t ne3 = tensor->ne[3];
  4761. const uint64_t nb0 = tensor->nb[0];
  4762. const uint64_t nb1 = tensor->nb[1];
  4763. const uint64_t nb2 = tensor->nb[2];
  4764. const uint64_t nb3 = tensor->nb[3];
  4765. const ggml_type type = tensor->type;
  4766. const uint64_t ts = ggml_type_size(type);
  4767. const uint64_t bs = ggml_blck_size(type);
  4768. const uint64_t dstnb0 = ts;
  4769. const uint64_t dstnb1 = dstnb0*(ne0/bs);
  4770. const uint64_t dstnb2 = dstnb1*ne1;
  4771. const uint64_t dstnb3 = dstnb2*ne2;
  4772. const uint64_t ne = ggml_nelements(tensor);
  4773. if (buf != nullptr) {
  4774. // Memory is pinned, use as staging buffer
  4775. std::vector<vk::BufferCopy> slices;
  4776. for (uint64_t i3 = 0; i3 < ne3; i3++) {
  4777. for (uint64_t i2 = 0; i2 < ne2; i2++) {
  4778. // Find longest contiguous slice
  4779. if (ne1*nb1 == dstnb2) {
  4780. slices.push_back({ buf_offset + i3*nb3 + i2*nb2, offset + i3*dstnb3 + i2*dstnb2, dstnb2 });
  4781. } else {
  4782. for (uint64_t i1 = 0; i1 < ne1; i1++) {
  4783. if (ne0*nb0/bs == dstnb1) {
  4784. slices.push_back({ buf_offset + i3*nb3 + i2*nb2 + i1*nb1, offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1, dstnb1 });
  4785. } else {
  4786. const uint64_t s_off = buf_offset + i3*nb3 + i2*nb2 + i1*nb1;
  4787. const uint64_t d_off = offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1;
  4788. for (uint64_t i0 = 0; i0 < ne0; i0++) {
  4789. slices.push_back({ s_off + i1*nb0, d_off + i0*dstnb0, dstnb0 });
  4790. }
  4791. }
  4792. }
  4793. }
  4794. }
  4795. }
  4796. ggml_vk_sync_buffers(ctx, subctx);
  4797. subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices);
  4798. return;
  4799. }
  4800. if (!sync_staging) {
  4801. GGML_ABORT("Asynchronous write to non-pinned memory not supported");
  4802. }
  4803. // Staging buffer required
  4804. vk_buffer& staging = ctx->device->sync_staging;
  4805. const uint64_t copy_size = ts*ne/bs;
  4806. ggml_vk_ensure_sync_staging_buffer(ctx->device, copy_size);
  4807. VkBufferCopy buf_copy{ 0, offset, copy_size };
  4808. ggml_vk_sync_buffers(ctx, subctx);
  4809. vkCmdCopyBuffer(subctx->s->buffer, (VkBuffer)staging->buffer, (VkBuffer)dst->buffer, 1, &buf_copy);
  4810. for (uint64_t i3 = 0; i3 < ne3; i3++) {
  4811. for (uint64_t i2 = 0; i2 < ne2; i2++) {
  4812. // Find longest contiguous slice
  4813. if (ne1*nb1 == dstnb2) {
  4814. deferred_memcpy((uint8_t *)staging->ptr + i3*dstnb3 + i2*dstnb2, (const uint8_t *) tensor->data + buf_offset + i3*nb3 + i2*nb2, dstnb2, &subctx->in_memcpys);
  4815. } else {
  4816. for (uint64_t i1 = 0; i1 < ne1; i1++) {
  4817. if (ne0*nb0/bs == dstnb1) {
  4818. deferred_memcpy((uint8_t *)staging->ptr + i3*dstnb3 + i2*dstnb2 + i1*dstnb1, (const uint8_t *) tensor->data + buf_offset + i3*nb3 + i2*nb2 + i1*nb1, dstnb1, &subctx->in_memcpys);
  4819. } else {
  4820. const uint64_t s_off = buf_offset + i3*nb3 + i2*nb2 + i1*nb1;
  4821. const uint64_t d_off = i3*dstnb3 + i2*dstnb2 + i1*dstnb1;
  4822. for (uint64_t i0 = 0; i0 < ne0; i0++) {
  4823. deferred_memcpy((uint8_t *)staging->ptr + d_off + i0*dstnb0, (const uint8_t *) tensor->data + s_off + i0*nb0, dstnb0, &subctx->in_memcpys);
  4824. }
  4825. }
  4826. }
  4827. }
  4828. }
  4829. }
  4830. }
  4831. static void ggml_vk_buffer_write_2d_async(vk_context subctx, vk_buffer& dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height, bool sync_staging = false) {
  4832. VK_LOG_DEBUG("ggml_vk_buffer_write_2d_async(" << width << ", " << height << ")");
  4833. // Buffer is already mapped
  4834. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  4835. std::cerr << "ggml_vulkan: buffer_write_async dst buffer is host_visible. Use synchronous write." << std::endl;
  4836. GGML_ABORT("fatal error");
  4837. }
  4838. // Check if src is pinned memory
  4839. vk_buffer buf = nullptr;
  4840. size_t buf_offset = 0;
  4841. ggml_vk_host_get(dst->device, src, buf, buf_offset);
  4842. if (buf != nullptr) {
  4843. // Memory is pinned, use as staging buffer
  4844. std::vector<vk::BufferCopy> slices(1);
  4845. if (width == spitch) {
  4846. // Only do single write if stride is equal
  4847. slices[0].srcOffset = buf_offset;
  4848. slices[0].dstOffset = offset;
  4849. slices[0].size = width * height;
  4850. } else {
  4851. slices.resize(height);
  4852. for (size_t i = 0; i < height; i++) {
  4853. slices[i].srcOffset = buf_offset + i * spitch;
  4854. slices[i].dstOffset = offset + i * width;
  4855. slices[i].size = width;
  4856. }
  4857. }
  4858. ggml_vk_sync_buffers(nullptr, subctx);
  4859. subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices);
  4860. return;
  4861. }
  4862. VK_LOG_DEBUG("STAGING");
  4863. if (!sync_staging) {
  4864. GGML_ABORT("Asynchronous write to non-pinned memory not supported");
  4865. }
  4866. // Staging buffer required
  4867. const size_t copy_size = width*height;
  4868. ggml_vk_ensure_sync_staging_buffer(dst->device, copy_size);
  4869. vk_buffer& staging_buffer = dst->device->sync_staging;
  4870. VkBufferCopy buf_copy = {
  4871. 0,
  4872. offset,
  4873. copy_size};
  4874. ggml_vk_sync_buffers(nullptr, subctx);
  4875. vkCmdCopyBuffer(subctx->s->buffer, (VkBuffer)staging_buffer->buffer, (VkBuffer)dst->buffer, 1, &buf_copy);
  4876. if (width == spitch) {
  4877. deferred_memcpy((uint8_t *)staging_buffer->ptr, src, width * height, &subctx->in_memcpys);
  4878. } else {
  4879. for (size_t i = 0; i < height; i++) {
  4880. deferred_memcpy((uint8_t *)staging_buffer->ptr + i * width, (const uint8_t *) src + i * spitch, width, &subctx->in_memcpys);
  4881. }
  4882. }
  4883. }
  4884. static void ggml_vk_buffer_write_async(vk_context subctx, vk_buffer& dst, size_t offset, const void * src, size_t size, bool sync_staging = false) {
  4885. VK_LOG_DEBUG("ggml_vk_buffer_write_async(" << size << ")");
  4886. return ggml_vk_buffer_write_2d_async(subctx, dst, offset, src, size, size, 1, sync_staging);
  4887. }
  4888. static void ggml_vk_buffer_write_2d(vk_buffer& dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height) {
  4889. VK_LOG_DEBUG("ggml_vk_buffer_write_2d(" << width << ", " << height << ")");
  4890. // Buffer is already mapped
  4891. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  4892. GGML_ASSERT(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostCoherent);
  4893. for (size_t i = 0; i < height; i++) {
  4894. memcpy((uint8_t *)dst->ptr + offset + i * width, (const uint8_t *) src + i * spitch, width);
  4895. }
  4896. } else {
  4897. std::lock_guard<std::recursive_mutex> guard(dst->device->mutex);
  4898. vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue.cmd_pool);
  4899. ggml_vk_ctx_begin(dst->device, subctx);
  4900. ggml_vk_buffer_write_2d_async(subctx, dst, offset, src, spitch, width, height, true);
  4901. ggml_vk_ctx_end(subctx);
  4902. for (auto& cpy : subctx->in_memcpys) {
  4903. memcpy(cpy.dst, cpy.src, cpy.n);
  4904. }
  4905. for (auto& mset : subctx->memsets) {
  4906. memset(mset.dst, mset.val, mset.n);
  4907. }
  4908. ggml_vk_submit(subctx, dst->device->fence);
  4909. VK_CHECK(dst->device->device.waitForFences({ dst->device->fence }, true, UINT64_MAX), "vk_buffer_write_2d waitForFences");
  4910. dst->device->device.resetFences({ dst->device->fence });
  4911. ggml_vk_queue_command_pools_cleanup(dst->device);
  4912. }
  4913. }
  4914. static void ggml_vk_buffer_write(vk_buffer& dst, size_t offset, const void * src, size_t size) {
  4915. VK_LOG_DEBUG("ggml_vk_buffer_write(" << size << ")");
  4916. ggml_vk_buffer_write_2d(dst, offset, src, 0, size, 1);
  4917. }
  4918. static void ggml_vk_buffer_read_2d_async(vk_context subctx, vk_buffer& src, size_t offset, void * dst, size_t spitch, size_t dpitch, size_t width, size_t height, bool sync_staging = false) {
  4919. VK_LOG_DEBUG("ggml_vk_buffer_read_2d_async(offset=" << offset << ", width=" << width << ", height=" << height << ")");
  4920. GGML_ASSERT(width > 0);
  4921. GGML_ASSERT(height > 0);
  4922. GGML_ASSERT(src != nullptr);
  4923. // TODO: staging_offset is not used
  4924. // Check if dst is pinned memory
  4925. vk_buffer buf = nullptr;
  4926. size_t buf_offset = 0;
  4927. ggml_vk_host_get(src->device, dst, buf, buf_offset);
  4928. std::vector<vk::BufferCopy> slices(1);
  4929. if (width == spitch && width == dpitch) {
  4930. // Only do single write if stride is equal
  4931. slices[0].srcOffset = offset;
  4932. slices[0].dstOffset = buf_offset;
  4933. slices[0].size = width * height;
  4934. } else {
  4935. slices.resize(height);
  4936. for (size_t i = 0; i < height; i++) {
  4937. slices[i].srcOffset = offset + i * spitch;
  4938. slices[i].dstOffset = buf_offset + i * dpitch;
  4939. slices[i].size = width;
  4940. }
  4941. }
  4942. if (buf != nullptr) {
  4943. // Memory is pinned, use as staging buffer
  4944. ggml_vk_sync_buffers(nullptr, subctx);
  4945. subctx->s->buffer.copyBuffer(src->buffer, buf->buffer, slices);
  4946. return;
  4947. }
  4948. VK_LOG_DEBUG("STAGING");
  4949. if (!sync_staging) {
  4950. GGML_ABORT("Asynchronous read from non-pinned memory not supported");
  4951. }
  4952. // Fall back to staging buffer
  4953. const size_t copy_size = dpitch * height;
  4954. ggml_vk_ensure_sync_staging_buffer(src->device, copy_size);
  4955. vk_buffer& staging_buffer = src->device->sync_staging;
  4956. ggml_vk_sync_buffers(nullptr, subctx);
  4957. subctx->s->buffer.copyBuffer(src->buffer, staging_buffer->buffer, slices);
  4958. deferred_memcpy(dst, staging_buffer->ptr, copy_size, &subctx->out_memcpys);
  4959. }
  4960. static void ggml_vk_buffer_read_async(vk_context subctx, vk_buffer& src, size_t offset, void * dst, size_t size, bool sync_staging = false) {
  4961. return ggml_vk_buffer_read_2d_async(subctx, src, offset, dst, size, size, size, 1, sync_staging);
  4962. }
  4963. static void ggml_vk_buffer_read(vk_buffer& src, size_t offset, void * dst, size_t size) {
  4964. VK_LOG_DEBUG("ggml_vk_buffer_read(" << src->buffer << ", " << offset << ", " << size << ")");
  4965. // If the device is not an UMA device the memory is host-accessible through rebar. While writing
  4966. // through PCIe is sufficient fast reading back data from PCIe is slower than going through
  4967. // the HW device to host copy path.
  4968. if(src->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible && src->device->uma) {
  4969. GGML_ASSERT(src->memory_property_flags & vk::MemoryPropertyFlagBits::eHostCoherent);
  4970. memcpy(dst, (uint8_t *) src->ptr + offset, size);
  4971. } else {
  4972. std::lock_guard<std::recursive_mutex> guard(src->device->mutex);
  4973. vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue.cmd_pool);
  4974. ggml_vk_ctx_begin(src->device, subctx);
  4975. ggml_vk_buffer_read_async(subctx, src, offset, dst, size, true);
  4976. ggml_vk_ctx_end(subctx);
  4977. ggml_vk_submit(subctx, src->device->fence);
  4978. VK_CHECK(src->device->device.waitForFences({ src->device->fence }, true, UINT64_MAX), "vk_buffer_read waitForFences");
  4979. src->device->device.resetFences({ src->device->fence });
  4980. ggml_vk_queue_command_pools_cleanup(src->device);
  4981. for (auto& cpy : subctx->out_memcpys) {
  4982. memcpy(cpy.dst, cpy.src, cpy.n);
  4983. }
  4984. }
  4985. }
  4986. static void ggml_vk_buffer_copy_async(vk_context& ctx, vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) {
  4987. VK_LOG_DEBUG("ggml_vk_buffer_copy_async(" << size << ")");
  4988. // Make sure both buffers are on same device
  4989. GGML_ASSERT(src->device == dst->device);
  4990. VkBufferCopy bc{ src_offset, dst_offset, size };
  4991. vkCmdCopyBuffer(ctx->s->buffer, (VkBuffer)src->buffer, (VkBuffer)dst->buffer, 1, &bc);
  4992. }
  4993. static void ggml_vk_buffer_copy(vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) {
  4994. if (src->device == dst->device) {
  4995. std::lock_guard<std::recursive_mutex> guard(src->device->mutex);
  4996. VK_LOG_DEBUG("ggml_vk_buffer_copy(SINGLE_DEVICE, " << size << ")");
  4997. // Copy within the device
  4998. vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue.cmd_pool);
  4999. ggml_vk_ctx_begin(src->device, subctx);
  5000. ggml_vk_buffer_copy_async(subctx, dst, dst_offset, src, src_offset, size);
  5001. ggml_vk_ctx_end(subctx);
  5002. ggml_vk_submit(subctx, src->device->fence);
  5003. VK_CHECK(src->device->device.waitForFences({ src->device->fence }, true, UINT64_MAX), "vk_buffer_copy waitForFences");
  5004. src->device->device.resetFences({ src->device->fence });
  5005. ggml_vk_queue_command_pools_cleanup(src->device);
  5006. } else {
  5007. VK_LOG_DEBUG("ggml_vk_buffer_copy(MULTI_DEVICE, " << size << ")");
  5008. // Copy device to device
  5009. ggml_vk_ensure_sync_staging_buffer(src->device, size);
  5010. // Copy to src staging buffer
  5011. ggml_vk_buffer_copy(src->device->sync_staging, 0, src, src_offset, size);
  5012. // Copy to dst buffer
  5013. ggml_vk_buffer_write_2d(dst, dst_offset, src->device->sync_staging->ptr, 0, size, 1);
  5014. }
  5015. }
  5016. static void ggml_vk_buffer_memset_async(vk_context& ctx, vk_buffer& dst, size_t offset, uint32_t c, size_t size) {
  5017. VK_LOG_DEBUG("ggml_vk_buffer_memset_async(" << offset << ", " << c << ", " << size << ")");
  5018. if (dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible &&
  5019. dst->device->uma) {
  5020. deferred_memset((uint8_t*)dst->ptr + offset, c, size, &ctx->memsets);
  5021. return;
  5022. }
  5023. // Fall back to GPU fillBuffer for non-UMA or non-host-visible buffers
  5024. ctx->s->buffer.fillBuffer(dst->buffer, offset, size, c);
  5025. }
  5026. static void ggml_vk_buffer_memset(vk_buffer& dst, size_t offset, uint32_t c, size_t size) {
  5027. VK_LOG_DEBUG("ggml_vk_buffer_memset(" << offset << ", " << c << ", " << size << ")");
  5028. if (dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible &&
  5029. dst->device->uma) {
  5030. memset((uint8_t*)dst->ptr + offset, c, size);
  5031. return;
  5032. }
  5033. std::lock_guard<std::recursive_mutex> guard(dst->device->mutex);
  5034. vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue.cmd_pool);
  5035. ggml_vk_ctx_begin(dst->device, subctx);
  5036. subctx->s->buffer.fillBuffer(dst->buffer, offset, size, c);
  5037. ggml_vk_ctx_end(subctx);
  5038. ggml_vk_submit(subctx, dst->device->fence);
  5039. VK_CHECK(dst->device->device.waitForFences({ dst->device->fence }, true, UINT64_MAX), "vk_memset waitForFences");
  5040. dst->device->device.resetFences({ dst->device->fence });
  5041. ggml_vk_queue_command_pools_cleanup(dst->device);
  5042. }
  5043. static uint32_t ggml_vk_guess_split_k(ggml_backend_vk_context * ctx, uint32_t m, uint32_t n, uint32_t k, bool disable_split_k, const vk_pipeline& pipeline) {
  5044. VK_LOG_DEBUG("ggml_vk_guess_split_k(" << m << ", " << n << ", " << k << ", " << disable_split_k << ")");
  5045. if (disable_split_k) {
  5046. return 1;
  5047. }
  5048. uint32_t split_k = 1;
  5049. if (ctx->device->shader_core_count != 0 && m >= pipeline->wg_denoms[0] && n >= pipeline->wg_denoms[1]) {
  5050. // If k is 'large' and the SMs will fill less than halfway, use split_k.
  5051. uint32_t m_tiles = CEIL_DIV(m, pipeline->wg_denoms[0]);
  5052. uint32_t n_tiles = CEIL_DIV(n, pipeline->wg_denoms[1]);
  5053. if (k >= 2048) {
  5054. if (m_tiles * n_tiles <= ctx->device->shader_core_count / 2) {
  5055. split_k = ctx->device->shader_core_count / (m_tiles * n_tiles);
  5056. } else if (m_tiles * n_tiles <= ctx->device->shader_core_count * 2 / 3) {
  5057. split_k = 3;
  5058. }
  5059. // Cap the split at 8x. Unless k is huge this is a lot of overhead.
  5060. split_k = std::min(split_k, 8u);
  5061. // ggml_vk_matmul will align the splits to be a multiple of 256.
  5062. // If this rounded up size would cause the last split to be empty,
  5063. // then reduce the split count.
  5064. while (true) {
  5065. if (split_k == 1) {
  5066. break;
  5067. }
  5068. uint32_t k_split = CEIL_DIV(k, split_k);
  5069. k_split = ROUNDUP_POW2(k_split, 256);
  5070. if (k_split * (split_k - 1) < k) {
  5071. break;
  5072. }
  5073. split_k--;
  5074. }
  5075. }
  5076. }
  5077. return split_k;
  5078. }
  5079. static vk_pipeline ggml_vk_guess_matmul_pipeline(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, uint32_t m, uint32_t n, bool aligned, ggml_type src0_type, ggml_type src1_type) {
  5080. VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline(" << m << ", " << n << ", " << aligned << ", " << ggml_type_name(src0_type) << ", " << ggml_type_name(src1_type) << ")");
  5081. if (ctx->device->coopmat2) {
  5082. const uint32_t shader_core_count = ctx->device->shader_core_count;
  5083. const uint32_t tiles_l = CEIL_DIV(m, mmp->a_l->wg_denoms[0]) * CEIL_DIV(n, mmp->a_l->wg_denoms[1]);
  5084. const uint32_t tiles_m = CEIL_DIV(m, mmp->a_m->wg_denoms[0]) * CEIL_DIV(n, mmp->a_m->wg_denoms[1]);
  5085. // Use large shader when the N dimension is greater than the medium shader's tile size
  5086. uint32_t crossover_large = mmp->m->wg_denoms[1];
  5087. // Prefer large over medium if either:
  5088. // - medium or large tiles would overfill the GPU
  5089. // - large tiles with a split_k==3 fits in the GPU and medium tiles with split_k==2 does not
  5090. // (medium with split_k==2 is probably better if it fits - more workgroups running and less split_k overhead)
  5091. bool prefer_large = tiles_m > shader_core_count || tiles_l > shader_core_count ||
  5092. // split_k==3 with large tiles likely better than medium tiles with no split_k.
  5093. (tiles_l <= shader_core_count / 3 && tiles_m > shader_core_count / 2);
  5094. if ((ctx->device->mul_mat_l[src0_type] && (n > crossover_large && prefer_large)) || (!ctx->device->mul_mat_m[src0_type] && !ctx->device->mul_mat_s[src0_type])) {
  5095. return aligned ? mmp->a_l : mmp->l;
  5096. }
  5097. // Use medium shader when the N dimension is greater than the small shader's tile size
  5098. uint32_t crossover_medium = mmp->s->wg_denoms[1];
  5099. if ((ctx->device->mul_mat_m[src0_type] && (n > crossover_medium)) || !ctx->device->mul_mat_s[src0_type]) {
  5100. return aligned ? mmp->a_m : mmp->m;
  5101. }
  5102. return aligned ? mmp->a_s : mmp->s;
  5103. }
  5104. if ((ctx->device->mul_mat_s[src0_type] && (m <= 32 || n <= 32)) || (!ctx->device->mul_mat_m[src0_type] && !ctx->device->mul_mat_l[src0_type])) {
  5105. return aligned ? mmp->a_s : mmp->s;
  5106. }
  5107. if ((ctx->device->mul_mat_m[src0_type] && (m <= 64 || n <= 64)) || !ctx->device->mul_mat_l[src0_type]) {
  5108. return aligned ? mmp->a_m : mmp->m;
  5109. }
  5110. return aligned ? mmp->a_l : mmp->l;
  5111. GGML_UNUSED(src1_type);
  5112. }
  5113. static uint32_t ggml_vk_guess_matmul_pipeline_align(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, ggml_type src0_type, ggml_type src1_type) {
  5114. VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline_align(" << m << ", " << n << ", " << ggml_type_name(src0_type) << ", " << ggml_type_name(src1_type) << ")");
  5115. return ggml_vk_guess_matmul_pipeline(ctx, mmp, m, n, true, src0_type, src1_type)->align;
  5116. }
  5117. static void ggml_vk_matmul(
  5118. ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline& pipeline,
  5119. vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& split_k_buffer,
  5120. uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d,
  5121. uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d,
  5122. uint32_t split_k, uint32_t batch, uint32_t ne02, uint32_t ne12, uint32_t broadcast2, uint32_t broadcast3,
  5123. uint32_t padded_n) {
  5124. VK_LOG_DEBUG("ggml_vk_matmul(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), d: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), split_k: (" << (split_k_buffer.buffer != nullptr ? split_k_buffer.buffer->buffer : VK_NULL_HANDLE) << ", " << split_k_buffer.offset << ", " << split_k_buffer.size << "), m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ", split_k: " << split_k << ", batch: " << batch << ", ne02: " << ne02 << ", ne12: " << ne12 << ", broadcast2: " << broadcast2 << ", broadcast3: " << broadcast3 << ", padded_n: " << padded_n << ")");
  5125. if (split_k == 1) {
  5126. const vk_mat_mat_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, k, ne02, ne12, broadcast2, broadcast3, padded_n };
  5127. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d }, pc, { m, n, batch });
  5128. return;
  5129. }
  5130. if (ctx->prealloc_split_k_need_sync) {
  5131. ggml_vk_sync_buffers(ctx, subctx);
  5132. }
  5133. GGML_ASSERT(batch_stride_d == m * n);
  5134. // Round the split size up to a multiple of 256 (k-quant alignment)
  5135. uint32_t k_split = CEIL_DIV(k, split_k);
  5136. k_split = ROUNDUP_POW2(k_split, 256);
  5137. const vk_mat_mat_push_constants pc1 = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, k_split, ne02, ne12, broadcast2, broadcast3, padded_n };
  5138. // Make sure enough workgroups get assigned for split k to work
  5139. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, split_k_buffer }, pc1, { (CEIL_DIV(m, pipeline->wg_denoms[0]) * pipeline->wg_denoms[0]) * split_k, n, batch });
  5140. ggml_vk_sync_buffers(ctx, subctx);
  5141. const std::array<uint32_t, 2> pc2 = { (uint32_t)(m * n * batch), split_k };
  5142. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_matmul_split_k_reduce, { split_k_buffer, d }, pc2, { m * n * batch, 1, 1 });
  5143. ctx->prealloc_split_k_need_sync = true;
  5144. }
  5145. static vk_pipeline ggml_vk_guess_matmul_id_pipeline(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, uint32_t m, uint32_t n, bool aligned, ggml_type src0_type) {
  5146. VK_LOG_DEBUG("ggml_vk_guess_matmul_id_pipeline(" << m << ", " << n << ", " << aligned << ", " << ggml_type_name(src0_type) << ")");
  5147. if (ctx->device->coopmat2) {
  5148. // Use large shader when the N dimension is greater than the medium shader's tile size
  5149. uint32_t crossover_large = mmp->m->wg_denoms[1];
  5150. if ((ctx->device->mul_mat_id_l[src0_type] && (n > crossover_large)) || (!ctx->device->mul_mat_id_m[src0_type] && !ctx->device->mul_mat_id_s[src0_type])) {
  5151. return aligned ? mmp->a_l : mmp->l;
  5152. }
  5153. // Use medium shader when the N dimension is greater than the small shader's tile size
  5154. uint32_t crossover_medium = mmp->s->wg_denoms[1];
  5155. if ((ctx->device->mul_mat_id_m[src0_type] && (n > crossover_medium)) || !ctx->device->mul_mat_id_s[src0_type]) {
  5156. return aligned ? mmp->a_m : mmp->m;
  5157. }
  5158. return aligned ? mmp->a_s : mmp->s;
  5159. }
  5160. if ((ctx->device->mul_mat_id_s[src0_type] && (m <= 32 || n <= 32)) || (!ctx->device->mul_mat_id_m[src0_type] && !ctx->device->mul_mat_id_l[src0_type])) {
  5161. return aligned ? mmp->a_s : mmp->s;
  5162. }
  5163. if ((ctx->device->mul_mat_id_m[src0_type] && (m <= 64 || n <= 64)) || !ctx->device->mul_mat_id_l[src0_type]) {
  5164. return aligned ? mmp->a_m : mmp->m;
  5165. }
  5166. return aligned ? mmp->a_l : mmp->l;
  5167. }
  5168. static uint32_t ggml_vk_guess_matmul_id_pipeline_align(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, ggml_type src0_type) {
  5169. VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline_align(" << m << ", " << n << ", " << ggml_type_name(src0_type) << ")");
  5170. return ggml_vk_guess_matmul_id_pipeline(ctx, mmp, m, n, true, src0_type)->align;
  5171. }
  5172. static void ggml_vk_matmul_id(
  5173. ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline& pipeline,
  5174. vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& ids,
  5175. uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d,
  5176. uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d,
  5177. uint32_t n_as, uint32_t nei0, uint32_t nei1, uint32_t nbi1, uint32_t ne11,
  5178. uint32_t padded_n) {
  5179. VK_LOG_DEBUG("ggml_vk_matmul_id(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), d: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), ids: (" << ids.buffer->buffer << ", " << ids.offset << ", " << ids.size << "), " <<
  5180. "m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", " <<
  5181. "batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ", " <<
  5182. "n_as: " << n_as << ", nei0: " << nei0 << ", nei1: " << nei1 << ", nbi1: " << nbi1 << ", ne11: " << ne11 << ")");
  5183. const vk_mat_mat_id_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d,
  5184. nei0, nei1, nbi1, ne11, padded_n };
  5185. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d, ids }, pc, { m, nei1, n_as });
  5186. }
  5187. static bool ggml_vk_dim01_contiguous(const ggml_tensor * tensor) {
  5188. return
  5189. tensor->nb[0] == ggml_type_size(tensor->type) &&
  5190. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  5191. (tensor->ne[3] == 1 || tensor->nb[3] == tensor->nb[2]*tensor->ne[2]);
  5192. }
  5193. static vk_pipeline ggml_vk_get_cpy_pipeline(ggml_backend_vk_context * ctx, const ggml_tensor * src, const ggml_tensor * dst, ggml_type to) {
  5194. // Choose "contiguous copy" shader if src/dst are contiguous
  5195. bool contig = ggml_is_contiguous(src) && (!dst || ggml_is_contiguous(dst));
  5196. if (src->type == GGML_TYPE_F32 && to == GGML_TYPE_F32) {
  5197. if (contig) {
  5198. return ctx->device->pipeline_contig_cpy_f32_f32;
  5199. } else {
  5200. return ctx->device->pipeline_cpy_f32_f32;
  5201. }
  5202. }
  5203. if (src->type == GGML_TYPE_F32 && to == GGML_TYPE_F16) {
  5204. if (contig) {
  5205. return ctx->device->pipeline_contig_cpy_f32_f16;
  5206. } else {
  5207. return ctx->device->pipeline_cpy_f32_f16;
  5208. }
  5209. }
  5210. if (src->type == GGML_TYPE_F16 && to == GGML_TYPE_F16) {
  5211. if (contig) {
  5212. return ctx->device->pipeline_contig_cpy_f16_f16;
  5213. } else {
  5214. return ctx->device->pipeline_cpy_f16_f16;
  5215. }
  5216. }
  5217. if (src->type == GGML_TYPE_F16 && to == GGML_TYPE_F32) {
  5218. if (contig) {
  5219. return ctx->device->pipeline_contig_cpy_f16_f32;
  5220. } else {
  5221. return ctx->device->pipeline_cpy_f16_f32;
  5222. }
  5223. }
  5224. if (src->type == GGML_TYPE_F32 && to == GGML_TYPE_BF16) {
  5225. if (contig) {
  5226. return ctx->device->pipeline_contig_cpy_f32_bf16;
  5227. } else {
  5228. return ctx->device->pipeline_cpy_f32_bf16;
  5229. }
  5230. }
  5231. if (src->type == GGML_TYPE_F32 && to == GGML_TYPE_I32) {
  5232. if (contig) {
  5233. return ctx->device->pipeline_contig_cpy_f32_i32;
  5234. } else {
  5235. return ctx->device->pipeline_cpy_f32_i32;
  5236. }
  5237. }
  5238. if (src->type == GGML_TYPE_I32 && to == GGML_TYPE_F32) {
  5239. if (contig) {
  5240. return ctx->device->pipeline_contig_cpy_i32_f32;
  5241. } else {
  5242. return ctx->device->pipeline_cpy_i32_f32;
  5243. }
  5244. }
  5245. if (src->type == GGML_TYPE_F32) {
  5246. switch (to) {
  5247. case GGML_TYPE_Q4_0:
  5248. case GGML_TYPE_Q4_1:
  5249. case GGML_TYPE_Q5_0:
  5250. case GGML_TYPE_Q5_1:
  5251. case GGML_TYPE_Q8_0:
  5252. case GGML_TYPE_IQ4_NL:
  5253. return ctx->device->pipeline_cpy_f32_quant[to];
  5254. default:
  5255. break;
  5256. }
  5257. }
  5258. if (to == GGML_TYPE_F32) {
  5259. switch (src->type) {
  5260. case GGML_TYPE_Q4_0:
  5261. case GGML_TYPE_Q4_1:
  5262. case GGML_TYPE_Q5_0:
  5263. case GGML_TYPE_Q5_1:
  5264. case GGML_TYPE_Q8_0:
  5265. case GGML_TYPE_IQ4_NL:
  5266. return ctx->device->pipeline_cpy_quant_f32[src->type];
  5267. default:
  5268. break;
  5269. }
  5270. }
  5271. if (src->type == to) {
  5272. // Copy two or four bytes at a time, depending on block size.
  5273. // For quantized types, we scale by block size/type size. But
  5274. // this path is also used for bf16->bf16 for example, where the
  5275. // type size must be exactly 2 or 4.
  5276. GGML_ASSERT(ggml_is_quantized(to) || ggml_type_size(src->type) == 2 || ggml_type_size(src->type) == 4);
  5277. if ((ggml_type_size(src->type) % 4) == 0) {
  5278. if (contig) {
  5279. return ctx->device->pipeline_contig_cpy_f32_f32;
  5280. } else {
  5281. return ctx->device->pipeline_cpy_f32_f32;
  5282. }
  5283. } else {
  5284. if (contig) {
  5285. return ctx->device->pipeline_contig_cpy_f16_f16;
  5286. } else {
  5287. return ctx->device->pipeline_cpy_f16_f16;
  5288. }
  5289. }
  5290. }
  5291. std::cerr << "Missing CPY op for types: " << ggml_type_name(src->type) << " " << ggml_type_name(to) << std::endl;
  5292. GGML_ABORT("fatal error");
  5293. }
  5294. static void ggml_vk_cpy_to_contiguous(ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline pipeline, const ggml_tensor * tensor, vk_subbuffer&& in, vk_subbuffer&& out) {
  5295. VK_LOG_DEBUG("ggml_vk_cpy_to_contiguous((" << tensor << ", type=" << tensor->type << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << "), ";
  5296. std::cerr << "buffer in size=" << in.buffer->size << ", buffer out size=" << out.buffer->size << ")");
  5297. const int tensor_type_size = ggml_type_size(tensor->type);
  5298. const uint32_t ne = ggml_nelements(tensor);
  5299. std::array<uint32_t, 3> elements;
  5300. if (ne > 262144) {
  5301. elements = { 512, 512, CEIL_DIV(ne, 262144) };
  5302. } else if (ne > 512) {
  5303. elements = { 512, CEIL_DIV(ne, 512), 1 };
  5304. } else {
  5305. elements = { ne, 1, 1 };
  5306. }
  5307. vk_op_unary_push_constants pc = {
  5308. (uint32_t)ne,
  5309. (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->ne[2], (uint32_t)tensor->ne[3], (uint32_t)tensor->nb[0] / tensor_type_size, (uint32_t)tensor->nb[1] / tensor_type_size, (uint32_t)tensor->nb[2] / tensor_type_size, (uint32_t)tensor->nb[3] / tensor_type_size,
  5310. (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->ne[2], (uint32_t)tensor->ne[3], 1 , (uint32_t)tensor->ne[0] , (uint32_t)(tensor->ne[0] * tensor->ne[1]) , (uint32_t)(tensor->ne[0] * tensor->ne[1] * tensor->ne[2]),
  5311. 0,
  5312. 0.0f, 0.0f,
  5313. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  5314. };
  5315. init_pushconst_fastdiv(pc);
  5316. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { in, out }, pc, elements);
  5317. ggml_vk_sync_buffers(ctx, subctx);
  5318. }
  5319. static vk_pipeline ggml_vk_get_quantize_pipeline(ggml_backend_vk_context * ctx, ggml_type type, bool use_x4_blocks) {
  5320. switch(type) {
  5321. case GGML_TYPE_Q8_1:
  5322. return use_x4_blocks ? ctx->device->pipeline_quantize_q8_1_x4 : ctx->device->pipeline_quantize_q8_1;
  5323. default:
  5324. std::cerr << "Missing quantize pipeline for type: " << ggml_type_name(type) << std::endl;
  5325. GGML_ABORT("fatal error");
  5326. }
  5327. }
  5328. static void ggml_vk_quantize_q8_1(ggml_backend_vk_context * ctx, vk_context& subctx, vk_subbuffer&& in, vk_subbuffer&& out, uint32_t ne, bool use_x4_blocks = false) {
  5329. VK_LOG_DEBUG("ggml_vk_quantize_q8_1(" << "buffer in size=" << in.buffer->size << ", buffer out size=" << out.buffer->size << ", " << ne << ")");
  5330. vk_pipeline pipeline = use_x4_blocks ? ggml_vk_get_quantize_pipeline(ctx, GGML_TYPE_Q8_1, true) : ggml_vk_get_quantize_pipeline(ctx, GGML_TYPE_Q8_1, false);
  5331. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { in, out }, std::array<uint32_t, 1>{ne}, { ne, 1, 1 });
  5332. ggml_vk_sync_buffers(ctx, subctx);
  5333. }
  5334. static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool disable_split_k, bool dryrun = false) {
  5335. VK_LOG_DEBUG("ggml_vk_mul_mat_q_f16((" << src0 << ", name=" << src0->name << ", type=" << ggml_type_name(src0->type) << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  5336. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << ggml_type_name(src1->type) << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  5337. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << ggml_type_name(dst->type) << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  5338. std::cerr << "), " << (dryrun ? "dryrun" : "") << ")");
  5339. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16); // NOLINT
  5340. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  5341. const uint64_t ne00 = src0->ne[0];
  5342. const uint64_t ne01 = src0->ne[1];
  5343. const uint64_t ne02 = src0->ne[2];
  5344. const uint64_t ne03 = src0->ne[3];
  5345. const uint64_t ne10 = src1->ne[0];
  5346. const uint64_t ne11 = src1->ne[1];
  5347. const uint64_t ne12 = src1->ne[2];
  5348. const uint64_t ne13 = src1->ne[3];
  5349. const uint64_t ne21 = dst->ne[1];
  5350. const uint32_t stride_d = dst->nb[1] / ggml_type_size(dst->type);
  5351. const uint32_t stride_batch_d = stride_d*ne21;
  5352. const uint64_t r2 = ne12 / ne02;
  5353. const uint64_t r3 = ne13 / ne03;
  5354. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  5355. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  5356. ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  5357. vk_buffer d_Qx = nullptr;
  5358. size_t qx_buf_offset = 0;
  5359. vk_buffer d_Qy = nullptr;
  5360. size_t qy_buf_offset = 0;
  5361. bool src0_uma = false;
  5362. bool src1_uma = false;
  5363. if (ctx->device->uma) {
  5364. ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
  5365. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  5366. src0_uma = d_Qx != nullptr;
  5367. src1_uma = d_Qy != nullptr;
  5368. }
  5369. // Reformat and convert to fp16 if non-contiguous, or for coopmat2 for better perf
  5370. const bool x_non_contig = (ctx->device->coopmat2 && src0->type == GGML_TYPE_F32) ||
  5371. !ggml_vk_dim01_contiguous(src0);
  5372. const bool y_non_contig = (ctx->device->coopmat2 && src1->type == GGML_TYPE_F32) ||
  5373. (src0->type == GGML_TYPE_BF16 && src1->type != GGML_TYPE_BF16) ||
  5374. !ggml_vk_dim01_contiguous(src1);
  5375. // If src0 is BF16, try to use a BF16 x BF16 multiply
  5376. ggml_type f16_type = src0->type == GGML_TYPE_BF16 ? GGML_TYPE_BF16 : GGML_TYPE_F16;
  5377. const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig;
  5378. bool quantize_y = ctx->device->integer_dot_product && src1->type == GGML_TYPE_F32 && ggml_is_contiguous(src1) && (ne11 * ne10) % 4 == 0;
  5379. // Check for mmq first
  5380. vk_matmul_pipeline mmp = quantize_y ? ggml_vk_get_mul_mat_mat_pipeline(ctx, src0->type, GGML_TYPE_Q8_1, (ggml_prec)dst->op_params[0]) : nullptr;
  5381. if (mmp == nullptr) {
  5382. // Fall back to f16 dequant mul mat
  5383. mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, src0->type, y_non_contig ? f16_type : src1->type, (ggml_prec)dst->op_params[0]);
  5384. quantize_y = false;
  5385. }
  5386. const bool qx_needs_dequant = mmp == nullptr || x_non_contig;
  5387. const bool qy_needs_dequant = !quantize_y && ((src1->type != f16_type && !y_f32_kernel) || y_non_contig);
  5388. if (qx_needs_dequant) {
  5389. // Fall back to dequant + f16 mulmat
  5390. mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, f16_type, y_f32_kernel ? GGML_TYPE_F32 : f16_type, (ggml_prec)dst->op_params[0]);
  5391. }
  5392. // Not implemented
  5393. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  5394. const uint32_t kpad = quantize_y ? 0 : ggml_vk_align_size(ne10, ggml_vk_guess_matmul_pipeline_align(ctx, mmp, ne01, ne11, qx_needs_dequant ? f16_type : src0->type, quantize_y ? GGML_TYPE_Q8_1 : (y_f32_kernel ? GGML_TYPE_F32 : src1->type)));
  5395. const bool aligned = !quantize_y && ne10 == kpad && ne01 > 8 && ne11 > 8;
  5396. vk_pipeline pipeline = ggml_vk_guess_matmul_pipeline(ctx, mmp, ne01, ne11, aligned, qx_needs_dequant ? f16_type : src0->type, quantize_y ? GGML_TYPE_Q8_1 : (y_f32_kernel ? GGML_TYPE_F32 : src1->type));
  5397. // Reserve extra storage in the N dimension for the Y matrix, so we can avoid bounds-checking
  5398. uint32_t padded_n = qy_needs_dequant ? ROUNDUP_POW2(ne11, pipeline->wg_denoms[1]) : ne11;
  5399. const int x_ne = ne01 * ne00;
  5400. const int y_ne = padded_n * ne10;
  5401. const int d_ne = ne11 * ne01;
  5402. const uint32_t split_k = ggml_vk_guess_split_k(ctx, ne01, ne11, ne10, disable_split_k, pipeline);
  5403. const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type);
  5404. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  5405. const uint64_t x_sz = !qx_needs_dequant ? qx_sz : sizeof(ggml_fp16_t) * x_ne;
  5406. const uint64_t y_sz = quantize_y ? (y_ne * ggml_type_size(GGML_TYPE_Q8_1) / ggml_blck_size(GGML_TYPE_Q8_1)) : (y_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne);
  5407. const uint64_t d_sz = sizeof(float) * d_ne;
  5408. vk_pipeline to_fp16_vk_0 = nullptr;
  5409. vk_pipeline to_fp16_vk_1 = nullptr;
  5410. vk_pipeline to_q8_1 = nullptr;
  5411. if (x_non_contig) {
  5412. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0, nullptr, f16_type);
  5413. } else {
  5414. to_fp16_vk_0 = ggml_vk_get_to_fp16(ctx, src0->type);
  5415. }
  5416. if (y_non_contig) {
  5417. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1, nullptr, f16_type);
  5418. } else {
  5419. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  5420. }
  5421. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  5422. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  5423. if (quantize_y) {
  5424. to_q8_1 = ggml_vk_get_quantize_pipeline(ctx, GGML_TYPE_Q8_1, true);
  5425. }
  5426. if (dryrun) {
  5427. const uint64_t x_sz_upd = x_sz * ne02 * ne03;
  5428. uint64_t y_sz_upd = y_sz * ne12 * ne13;
  5429. if (quantize_y) {
  5430. y_sz_upd = CEIL_DIV(y_sz_upd, 144) * 144;
  5431. }
  5432. const uint64_t split_k_size = split_k > 1 ? d_sz * ne12 * ne13 * split_k : 0;
  5433. if (
  5434. (qx_needs_dequant && x_sz_upd > ctx->device->properties.limits.maxStorageBufferRange) ||
  5435. (qy_needs_dequant && y_sz_upd > ctx->device->properties.limits.maxStorageBufferRange) ||
  5436. (split_k > 1 && split_k_size > ctx->device->properties.limits.maxStorageBufferRange)) {
  5437. GGML_ABORT("Requested preallocation size is too large");
  5438. }
  5439. if (qx_needs_dequant && ctx->prealloc_size_x < x_sz_upd) {
  5440. ctx->prealloc_size_x = x_sz_upd;
  5441. }
  5442. if ((qy_needs_dequant || quantize_y) && ctx->prealloc_size_y < y_sz_upd) {
  5443. ctx->prealloc_size_y = y_sz_upd;
  5444. }
  5445. if (split_k > 1 && ctx->prealloc_size_split_k < split_k_size) {
  5446. ctx->prealloc_size_split_k = split_k_size;
  5447. }
  5448. // Request descriptor sets
  5449. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  5450. if (qx_needs_dequant) {
  5451. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_0, 1);
  5452. }
  5453. if (qy_needs_dequant) {
  5454. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_1, 1);
  5455. }
  5456. if (quantize_y) {
  5457. ggml_pipeline_request_descriptor_sets(ctx, to_q8_1, 1);
  5458. }
  5459. if (split_k > 1) {
  5460. ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_matmul_split_k_reduce, 1);
  5461. }
  5462. return;
  5463. }
  5464. vk_buffer d_D = dst_buf_ctx->dev_buffer;
  5465. const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  5466. GGML_ASSERT(d_D != nullptr);
  5467. GGML_ASSERT(d_D->size >= d_buf_offset + d_sz * ne02 * ne03);
  5468. vk_buffer d_X;
  5469. uint64_t x_buf_offset = 0;
  5470. vk_buffer d_Y;
  5471. uint64_t y_buf_offset = 0;
  5472. if (!src0_uma) {
  5473. d_Qx = src0_buf_ctx->dev_buffer;
  5474. qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  5475. GGML_ASSERT(d_Qx != nullptr);
  5476. }
  5477. if (!src1_uma) {
  5478. d_Qy = src1_buf_ctx->dev_buffer;
  5479. qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  5480. GGML_ASSERT(d_Qy != nullptr);
  5481. }
  5482. if (qx_needs_dequant) {
  5483. d_X = ctx->prealloc_x;
  5484. GGML_ASSERT(d_X->size >= x_sz * ne02 * ne03);
  5485. } else {
  5486. d_X = d_Qx;
  5487. x_buf_offset = qx_buf_offset;
  5488. GGML_ASSERT(qx_sz == x_sz);
  5489. }
  5490. if (qy_needs_dequant) {
  5491. d_Y = ctx->prealloc_y;
  5492. GGML_ASSERT(d_Y->size >= y_sz * ne12 * ne13);
  5493. } else if (quantize_y) {
  5494. d_Y = ctx->prealloc_y;
  5495. GGML_ASSERT(d_Y->size >= CEIL_DIV(y_sz * ne12 * ne13, 144) * 144);
  5496. } else {
  5497. d_Y = d_Qy;
  5498. y_buf_offset = qy_buf_offset;
  5499. GGML_ASSERT(qy_sz == y_sz);
  5500. }
  5501. if (x_non_contig || qx_needs_dequant) {
  5502. if (ctx->prealloc_x_need_sync) {
  5503. ggml_vk_sync_buffers(ctx, subctx);
  5504. }
  5505. }
  5506. if (x_non_contig) {
  5507. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, ggml_vk_subbuffer(ctx, d_Qx, qx_buf_offset), ggml_vk_subbuffer(ctx, d_X, 0));
  5508. } else if (qx_needs_dequant) {
  5509. const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) };
  5510. ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0, { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, vk_subbuffer{ d_X, 0, x_sz * ne02 * ne03 } }, pc, { (uint32_t)(x_ne * ne02 * ne03), 1, 1});
  5511. ggml_vk_sync_buffers(ctx, subctx);
  5512. }
  5513. if (y_non_contig) {
  5514. if (ctx->prealloc_y_last_pipeline_used != to_fp16_vk_1.get() ||
  5515. ctx->prealloc_y_last_tensor_used != src1) {
  5516. if (ctx->prealloc_y_need_sync) {
  5517. ggml_vk_sync_buffers(ctx, subctx);
  5518. }
  5519. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, ggml_vk_subbuffer(ctx, d_Qy, qy_buf_offset), ggml_vk_subbuffer(ctx, d_Y, 0));
  5520. ctx->prealloc_y_last_pipeline_used = to_fp16_vk_1.get();
  5521. ctx->prealloc_y_last_tensor_used = src1;
  5522. }
  5523. }
  5524. if (quantize_y) {
  5525. if (ctx->prealloc_y_last_pipeline_used != to_q8_1.get() ||
  5526. ctx->prealloc_y_last_tensor_used != src1) {
  5527. if (ctx->prealloc_y_need_sync) {
  5528. ggml_vk_sync_buffers(ctx, subctx);
  5529. }
  5530. ggml_vk_quantize_q8_1(ctx, subctx, ggml_vk_subbuffer(ctx, d_Qy, qy_buf_offset), ggml_vk_subbuffer(ctx, d_Y, 0), y_ne * ne12 * ne13, true);
  5531. ctx->prealloc_y_last_pipeline_used = to_q8_1.get();
  5532. ctx->prealloc_y_last_tensor_used = src1;
  5533. }
  5534. }
  5535. uint32_t stride_batch_x = ne00*ne01;
  5536. uint32_t stride_batch_y = ne10*ne11;
  5537. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  5538. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  5539. }
  5540. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant && !quantize_y) {
  5541. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  5542. }
  5543. uint32_t y_sz_total = y_sz * ne12 * ne13;
  5544. if (quantize_y) {
  5545. y_sz_total = CEIL_DIV(y_sz_total, 144) * 144;
  5546. }
  5547. // compute
  5548. ggml_vk_matmul(
  5549. ctx, subctx, pipeline,
  5550. { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz_total },
  5551. ggml_vk_subbuffer(ctx, d_D, d_buf_offset), { ctx->prealloc_split_k, 0, d_sz * ne12 * ne13 * split_k },
  5552. ne01, ne11, ne10,
  5553. ne10, ne10, stride_d, stride_batch_x, stride_batch_y, stride_batch_d,
  5554. split_k, ne12*ne13, ne02, ne12, r2, r3, padded_n
  5555. ); // NOLINT
  5556. if (x_non_contig || qx_needs_dequant) {
  5557. ctx->prealloc_x_need_sync = true;
  5558. }
  5559. if (y_non_contig || quantize_y) {
  5560. ctx->prealloc_y_need_sync = true;
  5561. }
  5562. }
  5563. // Device tuning
  5564. static bool ggml_vk_should_use_mmvq(const vk_device& device, uint32_t m, uint32_t n, uint32_t k, ggml_type src0_type) {
  5565. if (device->mmvq_mode == 1) {
  5566. return true;
  5567. } else if (device->mmvq_mode == -1) {
  5568. return false;
  5569. }
  5570. // MMVQ is generally good for batches
  5571. if (n > 1) {
  5572. return true;
  5573. }
  5574. switch (device->vendor_id) {
  5575. case VK_VENDOR_ID_NVIDIA:
  5576. switch (src0_type) {
  5577. case GGML_TYPE_Q8_0:
  5578. return device->architecture == vk_device_architecture::NVIDIA_PRE_TURING;
  5579. default:
  5580. return true;
  5581. }
  5582. case VK_VENDOR_ID_AMD:
  5583. switch (src0_type) {
  5584. case GGML_TYPE_Q8_0:
  5585. return device->architecture == vk_device_architecture::AMD_GCN;
  5586. default:
  5587. return true;
  5588. }
  5589. case VK_VENDOR_ID_INTEL:
  5590. switch (src0_type) {
  5591. // From tests on A770 Linux, may need more tuning
  5592. case GGML_TYPE_Q4_0:
  5593. case GGML_TYPE_Q5_1:
  5594. return false;
  5595. default:
  5596. return true;
  5597. }
  5598. default:
  5599. return true;
  5600. }
  5601. GGML_UNUSED(m);
  5602. GGML_UNUSED(k);
  5603. }
  5604. static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const struct ggml_cgraph * cgraph, int node_idx, bool dryrun = false) {
  5605. ggml_tensor * dst = cgraph->nodes[node_idx];
  5606. const ggml_tensor * src0 = dst->src[0];
  5607. const ggml_tensor * src1 = dst->src[1];
  5608. VK_LOG_DEBUG("ggml_vk_mul_mat_vec_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  5609. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  5610. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  5611. std::cerr << "), " << (dryrun ? "dryrun" : "") << "),)");
  5612. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16); // NOLINT
  5613. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  5614. const uint64_t ne00 = src0->ne[0];
  5615. const uint64_t ne01 = src0->ne[1];
  5616. const uint64_t ne02 = src0->ne[2];
  5617. const uint64_t ne03 = src0->ne[3];
  5618. const uint64_t ne10 = src1->ne[0];
  5619. const uint64_t ne11 = src1->ne[1];
  5620. const uint64_t ne12 = src1->ne[2];
  5621. const uint64_t ne13 = src1->ne[3];
  5622. const uint64_t ne20 = dst->ne[0];
  5623. const uint64_t ne21 = dst->ne[1];
  5624. const uint64_t ne22 = dst->ne[2];
  5625. const uint64_t ne23 = dst->ne[3];
  5626. const uint64_t r2 = ne12 / ne02;
  5627. const uint64_t r3 = ne13 / ne03;
  5628. // batch_n indicates that we need to compute a few vector results, and this assumes
  5629. // ne12 and ne13 are 1. It overloads the batch_strides to hold the row strides.
  5630. GGML_ASSERT(ne11 == 1 || ne12 * ne13 == 1);
  5631. bool batch_n = ne11 > 1;
  5632. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  5633. ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  5634. vk_buffer d_Qx = nullptr;
  5635. size_t qx_buf_offset = 0;
  5636. vk_buffer d_Qy = nullptr;
  5637. size_t qy_buf_offset = 0;
  5638. bool src0_uma = false;
  5639. bool src1_uma = false;
  5640. if (ctx->device->uma) {
  5641. ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
  5642. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  5643. src0_uma = d_Qx != nullptr;
  5644. src1_uma = d_Qy != nullptr;
  5645. }
  5646. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  5647. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  5648. const bool f16_f32_kernel = src1->type == GGML_TYPE_F32;
  5649. bool quantize_y = ctx->device->integer_dot_product && src1->type == GGML_TYPE_F32 && ggml_is_contiguous(src1) && (ne11 * ne10) % 4 == 0 && ggml_vk_should_use_mmvq(ctx->device, ne01, ne11, ne10, src0->type);
  5650. vk_pipeline to_fp16_vk_0 = nullptr;
  5651. vk_pipeline to_fp16_vk_1 = nullptr;
  5652. if (x_non_contig) {
  5653. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0, nullptr, src0->type);
  5654. }
  5655. if (y_non_contig) {
  5656. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1, nullptr, src1->type);
  5657. } else {
  5658. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  5659. }
  5660. // Check for mmq first
  5661. vk_pipeline dmmv = quantize_y ? ggml_vk_get_dequantize_mul_mat_vec(ctx, src0->type, GGML_TYPE_Q8_1, ne11, ne20, ne00) : nullptr;
  5662. vk_pipeline to_q8_1 = nullptr;
  5663. if (dmmv == nullptr) {
  5664. // Fall back to f16 dequant mul mat
  5665. dmmv = ggml_vk_get_dequantize_mul_mat_vec(ctx, src0->type, src1->type, ne11, ne20, ne00);
  5666. quantize_y = false;
  5667. }
  5668. if (quantize_y) {
  5669. to_q8_1 = ggml_vk_get_quantize_pipeline(ctx, GGML_TYPE_Q8_1, true);
  5670. }
  5671. const bool qx_needs_dequant = x_non_contig;
  5672. const bool qy_needs_dequant = !quantize_y && ((src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig);
  5673. // Not implemented
  5674. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  5675. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  5676. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  5677. GGML_ASSERT(dmmv != nullptr);
  5678. const uint64_t x_ne = ne01 * ne00;
  5679. const uint64_t y_ne = ne11 * ne10;
  5680. const uint64_t d_ne = ne11 * ne01;
  5681. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
  5682. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  5683. const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) : qx_sz;
  5684. const uint64_t y_sz = quantize_y ? (y_ne * ggml_type_size(GGML_TYPE_Q8_1) / ggml_blck_size(GGML_TYPE_Q8_1)) : (f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne);
  5685. const uint64_t d_sz = sizeof(float) * d_ne;
  5686. if (dryrun) {
  5687. const uint64_t x_sz_upd = x_sz * ne02 * ne03;
  5688. uint64_t y_sz_upd = y_sz * ne12 * ne13;
  5689. if (quantize_y) {
  5690. y_sz_upd = CEIL_DIV(y_sz_upd, 144) * 144;
  5691. }
  5692. if (
  5693. (qx_needs_dequant && x_sz_upd > ctx->device->properties.limits.maxStorageBufferRange) ||
  5694. (qy_needs_dequant && y_sz_upd > ctx->device->properties.limits.maxStorageBufferRange)) {
  5695. GGML_ABORT("Requested preallocation size is too large");
  5696. }
  5697. if (qx_needs_dequant && ctx->prealloc_size_x < x_sz_upd) {
  5698. ctx->prealloc_size_x = x_sz_upd;
  5699. }
  5700. if ((qy_needs_dequant || quantize_y) && ctx->prealloc_size_y < y_sz_upd) {
  5701. ctx->prealloc_size_y = y_sz_upd;
  5702. }
  5703. // Request descriptor sets
  5704. if (qx_needs_dequant) {
  5705. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_0, 1);
  5706. }
  5707. if (qy_needs_dequant) {
  5708. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_1, 1);
  5709. }
  5710. if (quantize_y) {
  5711. ggml_pipeline_request_descriptor_sets(ctx, to_q8_1, 1);
  5712. }
  5713. ggml_pipeline_request_descriptor_sets(ctx, dmmv, 1);
  5714. return;
  5715. }
  5716. vk_buffer d_D;
  5717. uint64_t d_buf_offset = 0;
  5718. if (ctx->num_additional_fused_ops > 0) {
  5719. const ggml_tensor * add = cgraph->nodes[node_idx + 1];
  5720. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)add->buffer->context;
  5721. d_D = dst_buf_ctx->dev_buffer;
  5722. d_buf_offset = vk_tensor_offset(add) + add->view_offs;
  5723. } else {
  5724. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  5725. d_D = dst_buf_ctx->dev_buffer;
  5726. d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  5727. }
  5728. GGML_ASSERT(d_D != nullptr);
  5729. vk_buffer d_X;
  5730. uint64_t x_buf_offset = 0;
  5731. vk_buffer d_Y;
  5732. uint64_t y_buf_offset = 0;
  5733. if(!src0_uma) {
  5734. d_Qx = src0_buf_ctx->dev_buffer;
  5735. qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  5736. GGML_ASSERT(d_Qx != nullptr);
  5737. }
  5738. if(!src1_uma) {
  5739. d_Qy = src1_buf_ctx->dev_buffer;
  5740. qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  5741. GGML_ASSERT(d_Qy != nullptr);
  5742. }
  5743. if (qx_needs_dequant) {
  5744. d_X = ctx->prealloc_x;
  5745. } else {
  5746. d_X = d_Qx;
  5747. x_buf_offset = qx_buf_offset;
  5748. GGML_ASSERT(qx_sz == x_sz);
  5749. }
  5750. if (qy_needs_dequant) {
  5751. d_Y = ctx->prealloc_y;
  5752. } else if (quantize_y) {
  5753. d_Y = ctx->prealloc_y;
  5754. GGML_ASSERT(d_Y->size >= CEIL_DIV(y_sz * ne12 * ne13, 144) * 144);
  5755. } else {
  5756. d_Y = d_Qy;
  5757. y_buf_offset = qy_buf_offset;
  5758. GGML_ASSERT(qy_sz == y_sz);
  5759. }
  5760. if (x_non_contig) {
  5761. if (ctx->prealloc_x_need_sync) {
  5762. ggml_vk_sync_buffers(ctx, subctx);
  5763. }
  5764. GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment));
  5765. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, ggml_vk_subbuffer(ctx, d_Qx, qx_buf_offset), ggml_vk_subbuffer(ctx, d_X, 0));
  5766. }
  5767. if (y_non_contig) {
  5768. GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne);
  5769. if (ctx->prealloc_y_last_pipeline_used != to_fp16_vk_1.get() ||
  5770. ctx->prealloc_y_last_tensor_used != src1) {
  5771. if (ctx->prealloc_y_need_sync) {
  5772. ggml_vk_sync_buffers(ctx, subctx);
  5773. }
  5774. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, ggml_vk_subbuffer(ctx, d_Qy, qy_buf_offset), ggml_vk_subbuffer(ctx, d_Y, 0));
  5775. ctx->prealloc_y_last_pipeline_used = to_fp16_vk_1.get();
  5776. ctx->prealloc_y_last_tensor_used = src1;
  5777. }
  5778. }
  5779. if (quantize_y) {
  5780. if (ctx->prealloc_y_last_pipeline_used != to_q8_1.get() ||
  5781. ctx->prealloc_y_last_tensor_used != src1) {
  5782. if (ctx->prealloc_y_need_sync) {
  5783. ggml_vk_sync_buffers(ctx, subctx);
  5784. }
  5785. ggml_vk_quantize_q8_1(ctx, subctx, ggml_vk_subbuffer(ctx, d_Qy, qy_buf_offset), ggml_vk_subbuffer(ctx, d_Y, 0), y_ne * ne12 * ne13, true);
  5786. ctx->prealloc_y_last_pipeline_used = to_q8_1.get();
  5787. ctx->prealloc_y_last_tensor_used = src1;
  5788. }
  5789. }
  5790. // For batch_n, the A matrix is the same for each batch, and B/D use the row stride as the batch stride
  5791. uint32_t stride_batch_x = batch_n ? 0 : ne00*ne01;
  5792. uint32_t stride_batch_y = batch_n ? ne10 : (ne10*ne11);
  5793. uint32_t stride_batch_d = batch_n ? ne20 : (ne20*ne21);
  5794. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  5795. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  5796. }
  5797. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  5798. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  5799. }
  5800. const uint32_t max_groups_x = ctx->device->properties.limits.maxComputeWorkGroupCount[0];
  5801. uint32_t groups_x = ne01;
  5802. uint32_t groups_z = 1;
  5803. if (ne01 > max_groups_x) {
  5804. groups_z = 64;
  5805. groups_x = CEIL_DIV(groups_x, groups_z);
  5806. }
  5807. // TODO: Clean up this whole sz * ne_2 * ne_3 thing, it hasn't been necessary for a long time
  5808. uint32_t y_sz_total = y_sz * ne12 * ne13;
  5809. if (quantize_y) {
  5810. y_sz_total = CEIL_DIV(y_sz_total, 144) * 144;
  5811. }
  5812. uint32_t enable_bias = ctx->num_additional_fused_ops > 0;
  5813. vk_buffer d_B = d_D;
  5814. size_t b_buf_offset = 0;
  5815. uint64_t b_sz = 0;
  5816. if (enable_bias) {
  5817. const ggml_tensor * add = cgraph->nodes[node_idx + 1];
  5818. const ggml_tensor * bias = add->src[0] == dst ? add->src[1] : add->src[0];
  5819. bool b_uma = false;
  5820. if (ctx->device->uma) {
  5821. ggml_vk_host_get(ctx->device, bias->data, d_B, b_buf_offset);
  5822. b_uma = d_B != nullptr;
  5823. }
  5824. if(!b_uma) {
  5825. ggml_backend_vk_buffer_context * bias_buf_ctx = (ggml_backend_vk_buffer_context *)bias->buffer->context;
  5826. d_B = bias_buf_ctx->dev_buffer;
  5827. b_buf_offset = vk_tensor_offset(bias) + bias->view_offs;
  5828. GGML_ASSERT(d_B != nullptr);
  5829. b_sz = ggml_nbytes(bias);
  5830. }
  5831. }
  5832. // compute
  5833. const vk_mat_vec_push_constants pc = {
  5834. (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01,
  5835. stride_batch_x, stride_batch_y, stride_batch_d, enable_bias,
  5836. (uint32_t)ne02, (uint32_t)ne12, (uint32_t)r2, (uint32_t)r3,
  5837. };
  5838. ggml_vk_dispatch_pipeline(ctx, subctx, dmmv,
  5839. {
  5840. vk_subbuffer{ d_X, x_buf_offset, x_sz * ne02 * ne03 },
  5841. vk_subbuffer{ d_Y, y_buf_offset, y_sz_total },
  5842. vk_subbuffer{ d_D, d_buf_offset, d_sz * ne22 * ne23},
  5843. vk_subbuffer{ d_B, b_buf_offset, b_sz },
  5844. },
  5845. pc, { groups_x, (uint32_t)(ne12 * ne13), groups_z });
  5846. if (x_non_contig) {
  5847. ctx->prealloc_x_need_sync = true;
  5848. }
  5849. if (y_non_contig || quantize_y) {
  5850. ctx->prealloc_y_need_sync = true;
  5851. }
  5852. }
  5853. static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const struct ggml_cgraph * cgraph, int node_idx, bool dryrun = false) {
  5854. ggml_tensor * dst = cgraph->nodes[node_idx];
  5855. const ggml_tensor * src0 = dst->src[0];
  5856. const ggml_tensor * src1 = dst->src[1];
  5857. VK_LOG_DEBUG("ggml_vk_mul_mat_p021_f16_f32(" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  5858. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  5859. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  5860. std::cerr << "), " << (dryrun ? "dryrun" : "") << ")");
  5861. GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
  5862. GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // NOLINT
  5863. GGML_ASSERT(src1->nb[0] <= src1->nb[1] && src1->nb[2] <= src1->nb[3]); // NOLINT
  5864. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  5865. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  5866. const uint64_t ne00 = src0->ne[0];
  5867. const uint64_t ne01 = src0->ne[1];
  5868. const uint64_t ne02 = src0->ne[2];
  5869. // const uint64_t ne03 = src0->ne[3];
  5870. const uint64_t ne10 = src1->ne[0];
  5871. const uint64_t ne11 = src1->ne[1];
  5872. const uint64_t ne12 = src1->ne[2];
  5873. // const uint64_t ne13 = src1->ne[3];
  5874. GGML_ASSERT(ne11 == 1);
  5875. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  5876. ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  5877. vk_buffer d_Qy = nullptr;
  5878. size_t qy_buf_offset = 0;
  5879. bool src1_uma = false;
  5880. if (ctx->device->uma) {
  5881. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  5882. src1_uma = d_Qy != nullptr;
  5883. }
  5884. const uint64_t x_ne = ne00 * ne01 * ne02;
  5885. const uint64_t y_ne = ne10 * ne11 * ne12;
  5886. const uint64_t d_ne = ne01 * ne11 * ne12;
  5887. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
  5888. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  5889. const uint64_t d_sz = sizeof(float) * d_ne;
  5890. // With grouped query attention there are > 1 Q matrices per K, V matrix.
  5891. uint32_t gqa_ratio = (uint32_t)ne12 / (uint32_t)ne02;
  5892. if (gqa_ratio > 8 || gqa_ratio == 0 || ne12 != ne02 * gqa_ratio) {
  5893. gqa_ratio = 1;
  5894. }
  5895. if (dryrun) {
  5896. // Request descriptor sets
  5897. ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32[gqa_ratio - 1], 1);
  5898. return;
  5899. }
  5900. vk_buffer d_D;
  5901. uint64_t d_buf_offset = 0;
  5902. if (ctx->num_additional_fused_ops > 0) {
  5903. const ggml_tensor * add = cgraph->nodes[node_idx + 1];
  5904. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)add->buffer->context;
  5905. d_D = dst_buf_ctx->dev_buffer;
  5906. d_buf_offset = vk_tensor_offset(add) + add->view_offs;
  5907. } else {
  5908. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  5909. d_D = dst_buf_ctx->dev_buffer;
  5910. d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  5911. }
  5912. GGML_ASSERT(d_D != nullptr);
  5913. vk_buffer d_Qx = src0_buf_ctx->dev_buffer;
  5914. const uint64_t qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  5915. GGML_ASSERT(d_Qx != nullptr);
  5916. if (!src1_uma) {
  5917. d_Qy = src1_buf_ctx->dev_buffer;
  5918. qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  5919. GGML_ASSERT(d_Qx != nullptr);
  5920. }
  5921. const uint64_t qy_buffer_offset = (qy_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  5922. const uint64_t qy_shader_offset = qy_buf_offset - qy_buffer_offset;
  5923. const uint64_t d_buffer_offset = (d_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  5924. const uint64_t d_shader_offset = d_buf_offset - d_buffer_offset;
  5925. uint32_t enable_bias = ctx->num_additional_fused_ops > 0;
  5926. vk_buffer d_B = d_D;
  5927. size_t b_buf_offset = 0;
  5928. uint64_t b_sz = 0;
  5929. if (enable_bias) {
  5930. const ggml_tensor * add = cgraph->nodes[node_idx + 1];
  5931. const ggml_tensor * bias = add->src[0] == dst ? add->src[1] : add->src[0];
  5932. bool b_uma = false;
  5933. if (ctx->device->uma) {
  5934. ggml_vk_host_get(ctx->device, bias->data, d_B, b_buf_offset);
  5935. b_uma = d_B != nullptr;
  5936. }
  5937. if(!b_uma) {
  5938. ggml_backend_vk_buffer_context * bias_buf_ctx = (ggml_backend_vk_buffer_context *)bias->buffer->context;
  5939. d_B = bias_buf_ctx->dev_buffer;
  5940. b_buf_offset = vk_tensor_offset(bias) + bias->view_offs;
  5941. GGML_ASSERT(d_B != nullptr);
  5942. b_sz = ggml_nbytes(bias);
  5943. }
  5944. }
  5945. // compute
  5946. const std::array<uint32_t, 7> pc = { (uint32_t)ne00, (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne12, (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)), enable_bias };
  5947. uint32_t workgroups_z = (uint32_t)ne12;
  5948. // When gqa_ratio > 1, each invocation does multiple rows and we can launch fewer workgroups
  5949. if (gqa_ratio > 1) {
  5950. workgroups_z /= gqa_ratio;
  5951. }
  5952. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32[gqa_ratio - 1],
  5953. {
  5954. vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz },
  5955. vk_subbuffer{ d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset },
  5956. vk_subbuffer{ d_D, d_buffer_offset, d_sz + d_shader_offset },
  5957. vk_subbuffer{ d_B, b_buf_offset, b_sz },
  5958. }, pc, { 1, (uint32_t)ne01, workgroups_z });
  5959. }
  5960. static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const struct ggml_cgraph * cgraph, int node_idx, bool dryrun = false) {
  5961. ggml_tensor * dst = cgraph->nodes[node_idx];
  5962. const ggml_tensor * src0 = dst->src[0];
  5963. const ggml_tensor * src1 = dst->src[1];
  5964. VK_LOG_DEBUG("ggml_vk_mul_mat_nc_f16_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  5965. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  5966. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  5967. std::cerr << "), " << (dryrun ? "dryrun" : "") << ")");
  5968. GGML_ASSERT(!ggml_is_transposed(src0));
  5969. GGML_ASSERT(!ggml_is_transposed(src1));
  5970. GGML_ASSERT(!ggml_is_permuted(src0));
  5971. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  5972. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  5973. const uint64_t ne00 = src0->ne[0];
  5974. const uint64_t ne01 = src0->ne[1];
  5975. const uint64_t ne02 = src0->ne[2];
  5976. const uint64_t ne03 = src0->ne[3];
  5977. const uint64_t nb01 = src0->nb[1];
  5978. const uint64_t nb02 = src0->nb[2];
  5979. const uint64_t nb12 = src1->nb[2];
  5980. // const uint64_t ne10 = src1->ne[0];
  5981. const uint64_t ne11 = src1->ne[1];
  5982. const uint64_t ne12 = src1->ne[2];
  5983. // const uint64_t ne13 = src1->ne[3];
  5984. const uint32_t nb03 = (uint32_t)(src0->nb[3] / sizeof(ggml_fp16_t));
  5985. const uint32_t nb13 = (uint32_t)(src1->nb[3] / sizeof(float));
  5986. const uint32_t nb23 = (uint32_t)(dst->nb[3] / sizeof(float));
  5987. GGML_ASSERT(ne11 == 1);
  5988. GGML_ASSERT(src0->ne[3] == src1->ne[3]); // checked in supports_op
  5989. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  5990. ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  5991. vk_buffer d_Qy = nullptr;
  5992. size_t qy_buf_offset = 0;
  5993. bool src1_uma = false;
  5994. if (ctx->device->uma) {
  5995. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  5996. src1_uma = d_Qy != nullptr;
  5997. }
  5998. const uint64_t d_ne = ne01 * ne11 * ne12 * ne03;
  5999. const uint32_t row_stride_x = nb01 / sizeof(ggml_fp16_t);
  6000. const uint32_t channel_stride_x = nb02 / sizeof(ggml_fp16_t);
  6001. const uint32_t channel_stride_y = nb12 / sizeof(float);
  6002. const uint64_t qx_sz = ggml_nbytes(src0);
  6003. const uint64_t qy_sz = ggml_nbytes(src1);
  6004. const uint64_t d_sz = sizeof(float) * d_ne;
  6005. if (dryrun) {
  6006. // Request descriptor sets
  6007. ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32, 1);
  6008. return;
  6009. }
  6010. vk_buffer d_D;
  6011. uint64_t d_buf_offset = 0;
  6012. if (ctx->num_additional_fused_ops > 0) {
  6013. const ggml_tensor * add = cgraph->nodes[node_idx + 1];
  6014. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)add->buffer->context;
  6015. d_D = dst_buf_ctx->dev_buffer;
  6016. d_buf_offset = vk_tensor_offset(add) + add->view_offs;
  6017. } else {
  6018. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  6019. d_D = dst_buf_ctx->dev_buffer;
  6020. d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  6021. }
  6022. GGML_ASSERT(d_D != nullptr);
  6023. vk_buffer d_Qx = src0_buf_ctx->dev_buffer;
  6024. const uint64_t qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  6025. GGML_ASSERT(d_Qx != nullptr);
  6026. if (!src1_uma) {
  6027. d_Qy = src1_buf_ctx->dev_buffer;
  6028. qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  6029. GGML_ASSERT(d_Qx != nullptr);
  6030. }
  6031. const uint64_t qy_buffer_offset = (qy_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  6032. const uint64_t qy_shader_offset = qy_buf_offset - qy_buffer_offset;
  6033. const uint64_t d_buffer_offset = (d_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  6034. const uint64_t d_shader_offset = d_buf_offset - d_buffer_offset;
  6035. uint32_t enable_bias = ctx->num_additional_fused_ops > 0;
  6036. vk_buffer d_B = d_D;
  6037. size_t b_buf_offset = 0;
  6038. uint64_t b_sz = 0;
  6039. if (enable_bias) {
  6040. const ggml_tensor * add = cgraph->nodes[node_idx + 1];
  6041. const ggml_tensor * bias = add->src[0] == dst ? add->src[1] : add->src[0];
  6042. bool b_uma = false;
  6043. if (ctx->device->uma) {
  6044. ggml_vk_host_get(ctx->device, bias->data, d_B, b_buf_offset);
  6045. b_uma = d_B != nullptr;
  6046. }
  6047. if(!b_uma) {
  6048. ggml_backend_vk_buffer_context * bias_buf_ctx = (ggml_backend_vk_buffer_context *)bias->buffer->context;
  6049. d_B = bias_buf_ctx->dev_buffer;
  6050. b_buf_offset = vk_tensor_offset(bias) + bias->view_offs;
  6051. GGML_ASSERT(d_B != nullptr);
  6052. b_sz = ggml_nbytes(bias);
  6053. }
  6054. }
  6055. // compute
  6056. const std::array<uint32_t, 13> pc = { (uint32_t)ne00, (uint32_t)ne01, row_stride_x, channel_stride_x, channel_stride_y, (uint32_t)(ne12 / ne02), (uint32_t)ne12, (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)), nb03, nb13, nb23, enable_bias };
  6057. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32,
  6058. {
  6059. vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz },
  6060. vk_subbuffer{ d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset },
  6061. vk_subbuffer{ d_D, d_buffer_offset, d_sz + d_shader_offset },
  6062. vk_subbuffer{ d_B, b_buf_offset, b_sz },
  6063. }, pc, { (uint32_t)ne03, (uint32_t)ne01, (uint32_t)ne12 });
  6064. }
  6065. static void ggml_vk_mul_mat(ggml_backend_vk_context * ctx, vk_context& subctx, const struct ggml_cgraph * cgraph, int node_idx, bool dryrun = false) {
  6066. ggml_tensor * dst = cgraph->nodes[node_idx];
  6067. ggml_tensor * src0 = dst->src[0];
  6068. ggml_tensor * src1 = dst->src[1];
  6069. VK_LOG_DEBUG("ggml_vk_mul_mat(" << src0 << ", " << src1 << ", " << dst << ")");
  6070. // Handle huge A matrix by splitting the M dimensions. This works well for convolution use cases
  6071. // where the M dimension is very large.
  6072. // Split_k doesn't work with M splitting.
  6073. const size_t nbytes = ggml_nbytes(src0);
  6074. const bool needs_split = nbytes > ctx->device->properties.limits.maxStorageBufferRange;
  6075. if (needs_split) {
  6076. // Choose the number of rows that can fit (and divide by two, to allow for any additional offsets)
  6077. const uint32_t M_split = ctx->device->properties.limits.maxStorageBufferRange / (2 * src0->nb[1]);
  6078. uint32_t m_offset = 0;
  6079. while (m_offset < dst->ne[0]) {
  6080. const uint32_t cur_M_size = std::min(M_split, (uint32_t)(dst->ne[0] - m_offset));
  6081. ggml_tensor dst2 = *dst;
  6082. ggml_tensor src02 = *src0;
  6083. dst2.view_src = dst->view_src ? dst->view_src : dst;
  6084. src02.view_src = src0->view_src ? src0->view_src : src0;
  6085. dst2.view_offs += m_offset * dst->nb[0];
  6086. src02.view_offs += m_offset * src0->nb[1];
  6087. dst2.ne[0] = cur_M_size;
  6088. src02.ne[1] = cur_M_size;
  6089. ggml_vk_mul_mat_q_f16(ctx, subctx, &src02, src1, &dst2, true, dryrun);
  6090. m_offset += cur_M_size;
  6091. }
  6092. } else if (src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && dst->ne[1] == 1 &&
  6093. // detect 0213 permutation, and batch size of 1
  6094. src0->nb[0] <= src0->nb[2] &&
  6095. src0->nb[2] <= src0->nb[1] &&
  6096. src0->nb[1] <= src0->nb[3] &&
  6097. src1->nb[0] <= src1->nb[2] &&
  6098. src1->nb[2] <= src1->nb[1] &&
  6099. src1->nb[1] <= src1->nb[3] &&
  6100. src0->ne[3] == 1 &&
  6101. src1->ne[3] == 1) {
  6102. ggml_vk_mul_mat_vec_p021_f16_f32(ctx, subctx, cgraph, node_idx, dryrun);
  6103. } else if (src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && dst->ne[1] == 1 &&
  6104. !ggml_is_permuted(src0) && !ggml_is_permuted(src1)) {
  6105. ggml_vk_mul_mat_vec_nc_f16_f32(ctx, subctx, cgraph, node_idx, dryrun);
  6106. // mul_mat_vec supports batching ne12*ne13 when ne11==1, or treating ne11 as the batch size (up to four)
  6107. // when ne12 and ne13 are one.
  6108. } else if ((dst->ne[1] == 1 || (dst->ne[1] <= mul_mat_vec_max_cols && src1->ne[2] * src1->ne[3] == 1)) &&
  6109. (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16 || ggml_is_quantized(src0->type))) {
  6110. ggml_vk_mul_mat_vec_q_f16(ctx, subctx, cgraph, node_idx, dryrun);
  6111. } else {
  6112. ggml_vk_mul_mat_q_f16(ctx, subctx, src0, src1, dst, false, dryrun);
  6113. }
  6114. }
  6115. static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst, bool dryrun = false) {
  6116. VK_LOG_DEBUG("ggml_vk_mul_mat_id_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  6117. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  6118. std::cerr << "), (" << ids << ", name=" << ids->name << ", type=" << ids->type << ", ne0=" << ids->ne[0] << ", ne1=" << ids->ne[1] << ", ne2=" << ids->ne[2] << ", ne3=" << ids->ne[3] << ", nb0=" << ids->nb[0] << ", nb1=" << ids->nb[1] << ", nb2=" << ids->nb[2] << ", nb3=" << ids->nb[3];
  6119. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  6120. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  6121. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  6122. const uint64_t ne00 = src0->ne[0];
  6123. const uint64_t ne01 = src0->ne[1];
  6124. const uint64_t ne02 = src0->ne[2];
  6125. const uint64_t ne03 = src0->ne[3];
  6126. const uint64_t ne10 = src1->ne[0];
  6127. const uint64_t ne11 = src1->ne[1];
  6128. const uint64_t ne12 = src1->ne[2];
  6129. const uint64_t ne13 = src1->ne[3];
  6130. const uint64_t nei0 = ids->ne[0];
  6131. const uint64_t nei1 = ids->ne[1];
  6132. const uint32_t nbi1 = ids->nb[1];
  6133. const uint32_t nbi2 = ids->nb[2];
  6134. const uint64_t ne20 = dst->ne[0];
  6135. const uint64_t ne21 = dst->ne[1];
  6136. const uint64_t ne22 = dst->ne[2];
  6137. const uint64_t ne23 = dst->ne[3];
  6138. const uint64_t n_as = ne02;
  6139. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  6140. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  6141. ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  6142. ggml_backend_vk_buffer_context * ids_buf_ctx = (ggml_backend_vk_buffer_context *)ids->buffer->context;
  6143. vk_buffer d_Qx = nullptr;
  6144. size_t qx_buf_offset = 0;
  6145. vk_buffer d_Qy = nullptr;
  6146. size_t qy_buf_offset = 0;
  6147. vk_buffer d_ids = nullptr;
  6148. size_t ids_buf_offset = 0;
  6149. bool src0_uma = false;
  6150. bool src1_uma = false;
  6151. bool ids_uma = false;
  6152. if (ctx->device->uma) {
  6153. ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
  6154. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  6155. ggml_vk_host_get(ctx->device, ids->data, d_ids, ids_buf_offset);
  6156. src0_uma = d_Qx != nullptr;
  6157. src1_uma = d_Qy != nullptr;
  6158. ids_uma = d_ids != nullptr;
  6159. }
  6160. // Reformat and convert to fp16 if non-contiguous, or for coopmat2 for better perf
  6161. const bool x_non_contig = (ctx->device->coopmat2 && src0->type == GGML_TYPE_F32) ||
  6162. !ggml_vk_dim01_contiguous(src0);
  6163. const bool y_non_contig = (ctx->device->coopmat2 && src1->type == GGML_TYPE_F32) ||
  6164. (src0->type == GGML_TYPE_BF16 && src1->type != GGML_TYPE_BF16) ||
  6165. !ggml_vk_dim01_contiguous(src1);
  6166. // If src0 is BF16, try to use a BF16 x BF16 multiply
  6167. ggml_type f16_type = src0->type == GGML_TYPE_BF16 ? GGML_TYPE_BF16 : GGML_TYPE_F16;
  6168. const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig;
  6169. bool quantize_y = ctx->device->integer_dot_product && src1->type == GGML_TYPE_F32 && ggml_is_contiguous(src1) && (ne11 * ne10) % 4 == 0;
  6170. // Check for mmq first
  6171. vk_matmul_pipeline mmp = quantize_y ? ggml_vk_get_mul_mat_mat_id_pipeline(ctx, src0->type, GGML_TYPE_Q8_1, (ggml_prec)dst->op_params[0]) : nullptr;
  6172. if (mmp == nullptr) {
  6173. // Fall back to f16 dequant mul mat
  6174. mmp = ggml_vk_get_mul_mat_mat_id_pipeline(ctx, src0->type, y_non_contig ? f16_type : src1->type, (ggml_prec)dst->op_params[0]);
  6175. quantize_y = false;
  6176. }
  6177. const bool qx_needs_dequant = mmp == nullptr || x_non_contig;
  6178. const bool qy_needs_dequant = !quantize_y && ((src1->type != f16_type && !y_f32_kernel) || y_non_contig);
  6179. if (qx_needs_dequant) {
  6180. // Fall back to dequant + f16 mulmat
  6181. mmp = ggml_vk_get_mul_mat_mat_id_pipeline(ctx, f16_type, y_f32_kernel ? GGML_TYPE_F32 : f16_type, (ggml_prec)dst->op_params[0]);
  6182. }
  6183. // Not implemented
  6184. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  6185. const uint32_t kpad = quantize_y ? 0 : ggml_vk_align_size(ne10, ggml_vk_guess_matmul_id_pipeline_align(ctx, mmp, ne01, nei1, qx_needs_dequant ? f16_type : src0->type));
  6186. const bool aligned = !quantize_y && ne10 == kpad && ne01 > 8 && nei1 > 8;
  6187. vk_pipeline pipeline = ggml_vk_guess_matmul_id_pipeline(ctx, mmp, ne01, nei1, aligned, qx_needs_dequant ? f16_type : src0->type);
  6188. // Reserve extra storage in the N dimension for the Y matrix, so we can avoid bounds-checking
  6189. uint32_t padded_n = qy_needs_dequant ? ROUNDUP_POW2(ne11, pipeline->wg_denoms[1]) :ne11;
  6190. const uint64_t x_ne = ne01 * ne00;
  6191. const uint64_t y_ne = padded_n * ne10;
  6192. const uint64_t d_ne = ne21 * ne20;
  6193. const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type);
  6194. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  6195. const uint64_t x_sz = !qx_needs_dequant ? qx_sz : sizeof(ggml_fp16_t) * x_ne;
  6196. const uint64_t y_sz = quantize_y ? (y_ne * ggml_type_size(GGML_TYPE_Q8_1) / ggml_blck_size(GGML_TYPE_Q8_1)) : (y_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne);
  6197. const uint64_t ids_sz = nbi2;
  6198. const uint64_t d_sz = sizeof(float) * d_ne;
  6199. vk_pipeline to_fp16_vk_0 = nullptr;
  6200. vk_pipeline to_fp16_vk_1 = nullptr;
  6201. vk_pipeline to_q8_1 = nullptr;
  6202. if (x_non_contig) {
  6203. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0, nullptr, f16_type);
  6204. } else {
  6205. to_fp16_vk_0 = ggml_vk_get_to_fp16(ctx, src0->type);
  6206. }
  6207. if (y_non_contig) {
  6208. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1, nullptr, f16_type);
  6209. } else {
  6210. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  6211. }
  6212. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  6213. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  6214. if (quantize_y) {
  6215. to_q8_1 = ggml_vk_get_quantize_pipeline(ctx, GGML_TYPE_Q8_1, true);
  6216. }
  6217. if (dryrun) {
  6218. const uint64_t x_sz_upd = x_sz * ne02 * ne03;
  6219. uint64_t y_sz_upd = y_sz * ne12 * ne13;
  6220. if (quantize_y) {
  6221. y_sz_upd = CEIL_DIV(y_sz_upd, 144) * 144;
  6222. }
  6223. if (
  6224. (qx_needs_dequant && x_sz_upd > ctx->device->properties.limits.maxStorageBufferRange) ||
  6225. (qy_needs_dequant && y_sz_upd > ctx->device->properties.limits.maxStorageBufferRange)) {
  6226. GGML_ABORT("Requested preallocation size is too large");
  6227. }
  6228. if (qx_needs_dequant && ctx->prealloc_size_x < x_sz_upd) {
  6229. ctx->prealloc_size_x = x_sz_upd;
  6230. }
  6231. if ((qy_needs_dequant || quantize_y) && ctx->prealloc_size_y < y_sz_upd) {
  6232. ctx->prealloc_size_y = y_sz_upd;
  6233. }
  6234. // Request descriptor sets
  6235. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  6236. if (qx_needs_dequant) {
  6237. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_0, 1);
  6238. }
  6239. if (qy_needs_dequant) {
  6240. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_1, 1);
  6241. }
  6242. if (quantize_y) {
  6243. ggml_pipeline_request_descriptor_sets(ctx, to_q8_1, 1);
  6244. }
  6245. return;
  6246. }
  6247. vk_buffer d_D = dst_buf_ctx->dev_buffer;
  6248. const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  6249. GGML_ASSERT(d_D != nullptr);
  6250. vk_buffer d_X;
  6251. uint64_t x_buf_offset = 0;
  6252. vk_buffer d_Y;
  6253. uint64_t y_buf_offset = 0;
  6254. if (!src0_uma) {
  6255. d_Qx = src0_buf_ctx->dev_buffer;
  6256. qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  6257. GGML_ASSERT(d_Qx != nullptr);
  6258. }
  6259. if (!src1_uma) {
  6260. d_Qy = src1_buf_ctx->dev_buffer;
  6261. qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  6262. GGML_ASSERT(d_Qy != nullptr);
  6263. }
  6264. if (!ids_uma) {
  6265. d_ids = ids_buf_ctx->dev_buffer;
  6266. ids_buf_offset = vk_tensor_offset(ids) + ids->view_offs;
  6267. GGML_ASSERT(d_ids != nullptr);
  6268. }
  6269. if (qx_needs_dequant) {
  6270. d_X = ctx->prealloc_x;
  6271. GGML_ASSERT(d_X->size >= x_sz * ne02 * ne03);
  6272. } else {
  6273. d_X = d_Qx;
  6274. x_buf_offset = qx_buf_offset;
  6275. GGML_ASSERT(qx_sz == x_sz);
  6276. }
  6277. if (qy_needs_dequant) {
  6278. d_Y = ctx->prealloc_y;
  6279. GGML_ASSERT(d_Y->size >= y_sz * ne12 * ne13);
  6280. } else if (quantize_y) {
  6281. d_Y = ctx->prealloc_y;
  6282. GGML_ASSERT(d_Y->size >= CEIL_DIV(y_sz * ne12 * ne13, 144) * 144);
  6283. } else {
  6284. d_Y = d_Qy;
  6285. y_buf_offset = qy_buf_offset;
  6286. GGML_ASSERT(qy_sz == y_sz);
  6287. }
  6288. if (x_non_contig || qx_needs_dequant) {
  6289. if (ctx->prealloc_x_need_sync) {
  6290. ggml_vk_sync_buffers(ctx, subctx);
  6291. }
  6292. }
  6293. if (x_non_contig) {
  6294. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, ggml_vk_subbuffer(ctx, d_Qx, qx_buf_offset), ggml_vk_subbuffer(ctx, d_X, 0));
  6295. } else if (qx_needs_dequant) {
  6296. const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) };
  6297. ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0,
  6298. { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, vk_subbuffer{ d_X, 0, x_sz * ne02 * ne03 } }, pc, { (uint32_t)(x_ne * ne02 * ne03), 1, 1});
  6299. ggml_vk_sync_buffers(ctx, subctx);
  6300. }
  6301. if (y_non_contig) {
  6302. if (ctx->prealloc_y_last_pipeline_used != to_fp16_vk_1.get() ||
  6303. ctx->prealloc_y_last_tensor_used != src1) {
  6304. if (ctx->prealloc_y_need_sync) {
  6305. ggml_vk_sync_buffers(ctx, subctx);
  6306. }
  6307. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, ggml_vk_subbuffer(ctx, d_Qy, qy_buf_offset), ggml_vk_subbuffer(ctx, d_Y, 0));
  6308. ctx->prealloc_y_last_pipeline_used = to_fp16_vk_1.get();
  6309. ctx->prealloc_y_last_tensor_used = src1;
  6310. }
  6311. }
  6312. if (quantize_y) {
  6313. if (ctx->prealloc_y_last_pipeline_used != to_q8_1.get() ||
  6314. ctx->prealloc_y_last_tensor_used != src1) {
  6315. if (ctx->prealloc_y_need_sync) {
  6316. ggml_vk_sync_buffers(ctx, subctx);
  6317. }
  6318. ggml_vk_quantize_q8_1(ctx, subctx, ggml_vk_subbuffer(ctx, d_Qy, qy_buf_offset), ggml_vk_subbuffer(ctx, d_Y, 0), y_ne * ne12 * ne13, true);
  6319. ctx->prealloc_y_last_pipeline_used = to_q8_1.get();
  6320. ctx->prealloc_y_last_tensor_used = src1;
  6321. }
  6322. }
  6323. uint32_t stride_batch_x = ne00*ne01;
  6324. uint32_t stride_batch_y = ne10*ne11;
  6325. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  6326. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  6327. }
  6328. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant && !quantize_y) {
  6329. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  6330. }
  6331. uint32_t y_sz_total = y_sz * ne12 * ne13;
  6332. if (quantize_y) {
  6333. y_sz_total = CEIL_DIV(y_sz_total, 144) * 144;
  6334. }
  6335. // compute
  6336. ggml_vk_matmul_id(
  6337. ctx, subctx, pipeline,
  6338. { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz_total },
  6339. { d_D, d_buf_offset, d_sz * ne22 * ne23 }, { d_ids, ids_buf_offset, ids_sz },
  6340. ne01, ne21, ne10, ne10, ne10, ne01,
  6341. stride_batch_x, stride_batch_y, ne20*ne21,
  6342. n_as, nei0, nei1, nbi1 / ggml_type_size(ids->type), ne11, padded_n
  6343. ); // NOLINT
  6344. if (x_non_contig || qx_needs_dequant) {
  6345. ctx->prealloc_x_need_sync = true;
  6346. }
  6347. if (y_non_contig) {
  6348. ctx->prealloc_y_need_sync = true;
  6349. }
  6350. }
  6351. static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const struct ggml_cgraph * cgraph, int node_idx, bool dryrun = false) {
  6352. ggml_tensor * dst = cgraph->nodes[node_idx];
  6353. ggml_tensor * src0 = dst->src[0];
  6354. ggml_tensor * src1 = dst->src[1];
  6355. ggml_tensor * ids = dst->src[2];
  6356. VK_LOG_DEBUG("ggml_vk_mul_mat_vec_id_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  6357. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  6358. std::cerr << "), (" << ids << ", name=" << ids->name << ", type=" << ids->type << ", ne0=" << ids->ne[0] << ", ne1=" << ids->ne[1] << ", ne2=" << ids->ne[2] << ", ne3=" << ids->ne[3] << ", nb0=" << ids->nb[0] << ", nb1=" << ids->nb[1] << ", nb2=" << ids->nb[2] << ", nb3=" << ids->nb[3];
  6359. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  6360. std::cerr << "), " << (dryrun ? "dryrun" : "") << ")");
  6361. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16); // NOLINT
  6362. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  6363. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  6364. const uint64_t ne00 = src0->ne[0];
  6365. const uint64_t ne01 = src0->ne[1];
  6366. const uint64_t ne02 = src0->ne[2];
  6367. const uint64_t ne03 = src0->ne[3];
  6368. const uint64_t ne10 = src1->ne[0];
  6369. const uint64_t ne11 = src1->ne[1];
  6370. const uint64_t ne12 = src1->ne[2];
  6371. const uint64_t ne13 = src1->ne[3];
  6372. const uint64_t nei0 = ids->ne[0];
  6373. const uint64_t nei1 = ids->ne[1];
  6374. const uint64_t nbi2 = ids->nb[2];
  6375. GGML_ASSERT(nei1 == 1);
  6376. const uint64_t ne20 = dst->ne[0];
  6377. const uint64_t ne21 = dst->ne[1];
  6378. const uint64_t ne22 = dst->ne[2];
  6379. const uint64_t ne23 = dst->ne[3];
  6380. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  6381. ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  6382. ggml_backend_vk_buffer_context * ids_buf_ctx = (ggml_backend_vk_buffer_context *)ids->buffer->context;
  6383. vk_buffer d_Qx = nullptr;
  6384. size_t qx_buf_offset = 0;
  6385. vk_buffer d_Qy = nullptr;
  6386. size_t qy_buf_offset = 0;
  6387. vk_buffer d_ids = nullptr;
  6388. size_t ids_buf_offset = 0;
  6389. bool src0_uma = false;
  6390. bool src1_uma = false;
  6391. bool ids_uma = false;
  6392. if (ctx->device->uma) {
  6393. ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
  6394. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  6395. ggml_vk_host_get(ctx->device, ids->data, d_ids, ids_buf_offset);
  6396. src0_uma = d_Qx != nullptr;
  6397. src1_uma = d_Qy != nullptr;
  6398. ids_uma = d_ids != nullptr;
  6399. }
  6400. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  6401. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  6402. const bool f16_f32_kernel = src1->type == GGML_TYPE_F32;
  6403. const bool qx_needs_dequant = x_non_contig;
  6404. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig;
  6405. // Not implemented
  6406. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  6407. const uint64_t x_ne = ne01 * ne00;
  6408. const uint64_t y_ne = ne11 * ne10;
  6409. const uint64_t d_ne = ne21 * ne20;
  6410. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
  6411. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  6412. const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) : qx_sz;
  6413. const uint64_t y_sz = f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  6414. const uint64_t ids_sz = nbi2;
  6415. const uint64_t d_sz = sizeof(float) * d_ne;
  6416. vk_pipeline to_fp16_vk_0 = nullptr;
  6417. vk_pipeline to_fp16_vk_1 = nullptr;
  6418. if (x_non_contig) {
  6419. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0, nullptr, src0->type);
  6420. }
  6421. if (y_non_contig) {
  6422. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1, nullptr, src1->type);
  6423. } else {
  6424. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  6425. }
  6426. vk_pipeline dmmv = ggml_vk_get_dequantize_mul_mat_vec_id(ctx, src0->type, src1->type);
  6427. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  6428. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  6429. GGML_ASSERT(dmmv != nullptr);
  6430. if (dryrun) {
  6431. const uint64_t x_sz_upd = x_sz * ne02 * ne03;
  6432. const uint64_t y_sz_upd = y_sz * ne12 * ne13;
  6433. if (
  6434. (qx_needs_dequant && x_sz_upd > ctx->device->properties.limits.maxStorageBufferRange) ||
  6435. (qy_needs_dequant && y_sz_upd > ctx->device->properties.limits.maxStorageBufferRange)) {
  6436. GGML_ABORT("Requested preallocation size is too large");
  6437. }
  6438. if (qx_needs_dequant && ctx->prealloc_size_x < x_sz_upd) {
  6439. ctx->prealloc_size_x = x_sz_upd;
  6440. }
  6441. if (qy_needs_dequant && ctx->prealloc_size_y < y_sz_upd) {
  6442. ctx->prealloc_size_y = y_sz_upd;
  6443. }
  6444. // Request descriptor sets
  6445. if (qx_needs_dequant) {
  6446. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_0, 1);
  6447. }
  6448. if (qy_needs_dequant) {
  6449. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_1, 1);
  6450. }
  6451. ggml_pipeline_request_descriptor_sets(ctx, dmmv, 1);
  6452. return;
  6453. }
  6454. vk_buffer d_D;
  6455. uint64_t d_buf_offset = 0;
  6456. if (ctx->num_additional_fused_ops > 0) {
  6457. const ggml_tensor * add = cgraph->nodes[node_idx + 1];
  6458. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)add->buffer->context;
  6459. d_D = dst_buf_ctx->dev_buffer;
  6460. d_buf_offset = vk_tensor_offset(add) + add->view_offs;
  6461. } else {
  6462. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  6463. d_D = dst_buf_ctx->dev_buffer;
  6464. d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  6465. }
  6466. GGML_ASSERT(d_D != nullptr);
  6467. vk_buffer d_X;
  6468. uint64_t x_buf_offset = 0;
  6469. vk_buffer d_Y;
  6470. uint64_t y_buf_offset = 0;
  6471. if(!src0_uma) {
  6472. d_Qx = src0_buf_ctx->dev_buffer;
  6473. qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  6474. GGML_ASSERT(d_Qx != nullptr);
  6475. }
  6476. if(!src1_uma) {
  6477. d_Qy = src1_buf_ctx->dev_buffer;
  6478. qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  6479. GGML_ASSERT(d_Qy != nullptr);
  6480. }
  6481. if(!ids_uma) {
  6482. d_ids = ids_buf_ctx->dev_buffer;
  6483. ids_buf_offset = vk_tensor_offset(ids) + ids->view_offs;
  6484. GGML_ASSERT(d_ids != nullptr);
  6485. }
  6486. if (qx_needs_dequant) {
  6487. d_X = ctx->prealloc_x;
  6488. } else {
  6489. d_X = d_Qx;
  6490. x_buf_offset = qx_buf_offset;
  6491. GGML_ASSERT(qx_sz == x_sz);
  6492. }
  6493. if (qy_needs_dequant) {
  6494. d_Y = ctx->prealloc_y;
  6495. } else {
  6496. d_Y = d_Qy;
  6497. y_buf_offset = qy_buf_offset;
  6498. GGML_ASSERT(qy_sz == y_sz);
  6499. }
  6500. if (x_non_contig) {
  6501. if (ctx->prealloc_x_need_sync) {
  6502. ggml_vk_sync_buffers(ctx, subctx);
  6503. }
  6504. }
  6505. if (x_non_contig) {
  6506. GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment));
  6507. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, ggml_vk_subbuffer(ctx, d_Qx, qx_buf_offset), ggml_vk_subbuffer(ctx, d_X, 0));
  6508. }
  6509. if (y_non_contig) {
  6510. GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne);
  6511. if (ctx->prealloc_y_last_pipeline_used != to_fp16_vk_1.get() ||
  6512. ctx->prealloc_y_last_tensor_used != src1) {
  6513. if (ctx->prealloc_y_need_sync) {
  6514. ggml_vk_sync_buffers(ctx, subctx);
  6515. }
  6516. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, ggml_vk_subbuffer(ctx, d_Qy, qy_buf_offset), ggml_vk_subbuffer(ctx, d_Y, 0));
  6517. ctx->prealloc_y_last_pipeline_used = to_fp16_vk_1.get();
  6518. ctx->prealloc_y_last_tensor_used = src1;
  6519. }
  6520. }
  6521. uint32_t stride_batch_y = ne10*ne11;
  6522. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  6523. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  6524. }
  6525. const uint32_t max_groups_x = ctx->device->properties.limits.maxComputeWorkGroupCount[0];
  6526. uint32_t groups_x = ne01;
  6527. uint32_t groups_z = 1;
  6528. if (ne01 > max_groups_x) {
  6529. groups_z = 64;
  6530. groups_x = CEIL_DIV(groups_x, groups_z);
  6531. }
  6532. uint32_t enable_bias = ctx->num_additional_fused_ops > 0;
  6533. vk_buffer d_B = d_D;
  6534. size_t b_buf_offset = 0;
  6535. uint64_t b_sz = 0;
  6536. if (enable_bias) {
  6537. const ggml_tensor * bias = cgraph->nodes[node_idx + 1]->src[1];
  6538. bool b_uma = false;
  6539. if (ctx->device->uma) {
  6540. ggml_vk_host_get(ctx->device, bias->data, d_B, b_buf_offset);
  6541. b_uma = d_B != nullptr;
  6542. }
  6543. if(!b_uma) {
  6544. ggml_backend_vk_buffer_context * bias_buf_ctx = (ggml_backend_vk_buffer_context *)bias->buffer->context;
  6545. d_B = bias_buf_ctx->dev_buffer;
  6546. b_buf_offset = vk_tensor_offset(bias) + bias->view_offs;
  6547. GGML_ASSERT(d_B != nullptr);
  6548. b_sz = ggml_nbytes(bias);
  6549. }
  6550. }
  6551. // compute
  6552. const vk_mat_vec_id_push_constants pc = {
  6553. (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01,
  6554. (uint32_t)x_ne, stride_batch_y, (uint32_t)(ne20*ne21),
  6555. enable_bias,
  6556. (uint32_t)nei0, (uint32_t)ne11,
  6557. };
  6558. ggml_vk_dispatch_pipeline(ctx, subctx, dmmv,
  6559. {
  6560. vk_subbuffer{ d_X, x_buf_offset, x_sz * ne02 * ne03 },
  6561. vk_subbuffer{ d_Y, y_buf_offset, y_sz * ne12 * ne13 },
  6562. vk_subbuffer{ d_D, d_buf_offset, d_sz * ne22 * ne23},
  6563. vk_subbuffer{ d_B, b_buf_offset, b_sz },
  6564. vk_subbuffer{ d_ids, ids_buf_offset, ids_sz },
  6565. },
  6566. pc, { groups_x, (uint32_t)nei0, groups_z });
  6567. if (x_non_contig) {
  6568. ctx->prealloc_x_need_sync = true;
  6569. }
  6570. if (y_non_contig) {
  6571. ctx->prealloc_y_need_sync = true;
  6572. }
  6573. }
  6574. static bool ggml_vk_use_mul_mat_vec_id(const struct ggml_cgraph * cgraph, int node_idx) {
  6575. ggml_tensor * dst = cgraph->nodes[node_idx];
  6576. ggml_tensor * src0 = dst->src[0];
  6577. ggml_tensor * src2 = dst->src[2];
  6578. return src2->ne[1] == 1 && (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type));
  6579. }
  6580. static void ggml_vk_mul_mat_id(ggml_backend_vk_context * ctx, vk_context& subctx, const struct ggml_cgraph * cgraph, int node_idx, bool dryrun = false) {
  6581. ggml_tensor * dst = cgraph->nodes[node_idx];
  6582. ggml_tensor * src0 = dst->src[0];
  6583. ggml_tensor * src1 = dst->src[1];
  6584. ggml_tensor * src2 = dst->src[2];
  6585. VK_LOG_DEBUG("ggml_vk_mul_mat_id(" << src0 << ", " << src1 << ", " << src2 << ", " << dst << ")");
  6586. if (ggml_vk_use_mul_mat_vec_id(cgraph, node_idx)) {
  6587. ggml_vk_mul_mat_vec_id_q_f16(ctx, subctx, cgraph, node_idx, dryrun);
  6588. } else {
  6589. ggml_vk_mul_mat_id_q_f16(ctx, subctx, src0, src1, src2, dst, dryrun);
  6590. }
  6591. }
  6592. static bool ggml_vk_flash_attn_scalar_shmem_support(const vk_device& device, const uint32_t hsk, uint32_t hsv) {
  6593. // Needs to be kept up to date on shader changes
  6594. GGML_UNUSED(hsv);
  6595. const uint32_t wg_size = scalar_flash_attention_workgroup_size;
  6596. const uint32_t Br = get_fa_scalar_num_large_rows(hsv);
  6597. const uint32_t Bc = scalar_flash_attention_Bc;
  6598. const uint32_t tmpsh = wg_size * sizeof(float);
  6599. const uint32_t tmpshv4 = wg_size * 4 * sizeof(float);
  6600. const uint32_t masksh = Bc * Br * sizeof(float);
  6601. const uint32_t Qf = Br * (hsk / 4 + 2) * 4 * sizeof(float);
  6602. const uint32_t total_size = tmpsh + tmpshv4 + masksh + Qf;
  6603. const bool supported = total_size <= device->properties.limits.maxComputeSharedMemorySize;
  6604. VK_LOG_DEBUG("ggml_vk_flash_attn_coopmat_shmem_support(HSK=" << hsk << ", HSV=" << hsv << ", total_size=" << total_size << ", supported=" << supported);
  6605. return supported;
  6606. }
  6607. static bool ggml_vk_flash_attn_coopmat_shmem_support(const vk_device& device, const uint32_t hsk, uint32_t hsv, bool f32acc) {
  6608. // Needs to be kept up to date on shader changes
  6609. GGML_UNUSED(hsv);
  6610. const uint32_t wg_size = scalar_flash_attention_workgroup_size;
  6611. const uint32_t Br = coopmat1_flash_attention_num_large_rows;
  6612. const uint32_t Bc = scalar_flash_attention_Bc;
  6613. const uint32_t hsk_pad = ROUNDUP_POW2(hsk, 16);
  6614. const uint32_t acctype = f32acc ? 4 : 2;
  6615. const uint32_t f16vec4 = 8;
  6616. const uint32_t tmpsh = wg_size * sizeof(float);
  6617. const uint32_t tmpshv4 = wg_size * 4 * acctype;
  6618. const uint32_t qstride = hsk_pad / 4 + 2;
  6619. const uint32_t Qf = Br * qstride * f16vec4;
  6620. const uint32_t sfshstride = (hsk <= 128) ? (Br + 8) : Br;
  6621. const uint32_t sfsh = Bc * sfshstride * acctype;
  6622. const uint32_t kshstride = hsk_pad / 4 + 2;
  6623. const uint32_t ksh = Bc * kshstride * f16vec4;
  6624. const uint32_t slope = Br * sizeof(float);
  6625. const uint32_t total_size = tmpsh + tmpshv4 + Qf + sfsh + ksh + slope;
  6626. const bool supported = total_size <= device->properties.limits.maxComputeSharedMemorySize;
  6627. VK_LOG_DEBUG("ggml_vk_flash_attn_coopmat_shmem_support(HSK=" << hsk << ", HSV=" << hsv << ", f32acc=" << f32acc << ", total_size=" << total_size << ", supported=" << supported);
  6628. return supported;
  6629. }
  6630. static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * q, const ggml_tensor * k, const ggml_tensor * v, const ggml_tensor * mask, const ggml_tensor * sinks, ggml_tensor * dst, bool dryrun = false) {
  6631. VK_LOG_DEBUG("ggml_vk_flash_attn((" << q << ", name=" << q->name << ", type=" << q->type << ", ne0=" << q->ne[0] << ", ne1=" << q->ne[1] << ", ne2=" << q->ne[2] << ", ne3=" << q->ne[3] << ", nb0=" << q->nb[0] << ", nb1=" << q->nb[1] << ", nb2=" << q->nb[2] << ", nb3=" << q->nb[3];
  6632. std::cerr << "), (" << k << ", name=" << k->name << ", type=" << k->type << ", ne0=" << k->ne[0] << ", ne1=" << k->ne[1] << ", ne2=" << k->ne[2] << ", ne3=" << k->ne[3] << ", nb0=" << k->nb[0] << ", nb1=" << k->nb[1] << ", nb2=" << k->nb[2] << ", nb3=" << k->nb[3];
  6633. std::cerr << "), (" << v << ", name=" << v->name << ", type=" << v->type << ", ne0=" << v->ne[0] << ", ne1=" << v->ne[1] << ", ne2=" << v->ne[2] << ", ne3=" << v->ne[3] << ", nb0=" << v->nb[0] << ", nb1=" << v->nb[1] << ", nb2=" << v->nb[2] << ", nb3=" << v->nb[3];
  6634. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  6635. if (sinks) {
  6636. std::cerr << "), (" << sinks << ", name=" << sinks->name << ", type=" << sinks->type << ", ne0=" << sinks->ne[0] << ", ne1=" << sinks->ne[1] << ", ne2=" << sinks->ne[2] << ", ne3=" << sinks->ne[3] << ", nb0=" << sinks->nb[0] << ", nb1=" << sinks->nb[1] << ", nb2=" << sinks->nb[2] << ", nb3=" << sinks->nb[3];
  6637. }
  6638. std::cerr << "), " << (dryrun ? "dryrun" : "") << ")");
  6639. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  6640. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  6641. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  6642. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  6643. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  6644. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  6645. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  6646. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  6647. const uint32_t nem1 = mask ? mask->ne[1] : 0;
  6648. const uint32_t nem2 = mask ? mask->ne[2] : 0;
  6649. const uint32_t nem3 = mask ? mask->ne[3] : 0;
  6650. const uint32_t HSK = nek0;
  6651. const uint32_t HSV = nev0;
  6652. uint32_t N = neq1;
  6653. const uint32_t KV = nek1;
  6654. GGML_ASSERT(ne0 == HSV);
  6655. GGML_ASSERT(ne2 == N);
  6656. // input tensor rows must be contiguous
  6657. GGML_ASSERT(nbq0 == ggml_type_size(q->type));
  6658. GGML_ASSERT(nbk0 == ggml_type_size(k->type));
  6659. GGML_ASSERT(nbv0 == ggml_type_size(v->type));
  6660. GGML_ASSERT(neq0 == HSK);
  6661. GGML_ASSERT(neq1 == N);
  6662. GGML_ASSERT(nev1 == nek1);
  6663. // dst cannot be transposed or permuted
  6664. GGML_ASSERT(nb0 == sizeof(float));
  6665. GGML_ASSERT(nb0 <= nb1);
  6666. GGML_ASSERT(nb1 <= nb2);
  6667. GGML_ASSERT(nb2 <= nb3);
  6668. assert(dst->type == GGML_TYPE_F32);
  6669. assert(q->type == GGML_TYPE_F32);
  6670. assert(k->type == v->type);
  6671. FaCodePath path = ctx->device->coopmat2 ? FA_COOPMAT2 :
  6672. ctx->device->coopmat1_fa_support ? FA_COOPMAT1 : FA_SCALAR;
  6673. if (path == FA_COOPMAT1) {
  6674. const bool coopmat_shape_supported = (dst->op_params[3] == GGML_PREC_F32 && ctx->device->coopmat_support_16x16x16_f32acc) ||
  6675. (dst->op_params[3] != GGML_PREC_F32 && ctx->device->coopmat_support_16x16x16_f16acc);
  6676. const bool coopmat_shmem_supported = ggml_vk_flash_attn_coopmat_shmem_support(ctx->device, HSK, HSV, dst->op_params[3] == GGML_PREC_F32);
  6677. if (!coopmat_shape_supported || !coopmat_shmem_supported) {
  6678. path = FA_SCALAR;
  6679. }
  6680. }
  6681. uint32_t gqa_ratio = 1;
  6682. uint32_t qk_ratio = neq2 / nek2;
  6683. uint32_t workgroups_x = (uint32_t)neq1;
  6684. uint32_t workgroups_y = (uint32_t)neq2;
  6685. uint32_t workgroups_z = (uint32_t)neq3;
  6686. // For scalar/coopmat1 FA, we can use the "large" size to accommodate qga.
  6687. // For coopmat2 FA, we always use the small size (which is still pretty large for gqa).
  6688. uint32_t max_gqa;
  6689. switch (path) {
  6690. case FA_SCALAR:
  6691. case FA_COOPMAT1:
  6692. // We may switch from coopmat1 to scalar, so use the scalar limit for both
  6693. max_gqa = get_fa_scalar_num_large_rows(HSV);
  6694. break;
  6695. case FA_COOPMAT2:
  6696. max_gqa = get_fa_num_small_rows(FA_COOPMAT2);
  6697. break;
  6698. default:
  6699. GGML_ASSERT(0);
  6700. }
  6701. if (N == 1 && qk_ratio > 1 && qk_ratio <= max_gqa &&
  6702. qk_ratio * nek2 == neq2 && nek2 == nev2 && nem2 <= 1) {
  6703. // grouped query attention - make the N dimension equal to gqa_ratio, reduce
  6704. // workgroups proportionally in y dimension. The shader will detect gqa_ratio > 1
  6705. // and change addressing calculations to index Q's dimension 2.
  6706. gqa_ratio = qk_ratio;
  6707. N = gqa_ratio;
  6708. workgroups_y /= N;
  6709. }
  6710. bool small_rows = N <= get_fa_num_small_rows(path);
  6711. // coopmat1 does not actually support "small rows" (it needs 16 rows).
  6712. // So use scalar instead.
  6713. if (small_rows && path == FA_COOPMAT1) {
  6714. path = FA_SCALAR;
  6715. }
  6716. // scalar is faster than coopmat2 when N==1
  6717. if (N == 1 && path == FA_COOPMAT2) {
  6718. path = FA_SCALAR;
  6719. }
  6720. // with large hsk/hsv, scalar path may need to use small_rows to fit in shared memory
  6721. if (path == FA_SCALAR &&
  6722. !ggml_vk_flash_attn_scalar_shmem_support(ctx->device, HSK, HSV)) {
  6723. small_rows = true;
  6724. }
  6725. const uint32_t q_stride = (uint32_t)(nbq1 / ggml_type_size(q->type));
  6726. uint32_t k_stride = (uint32_t)(nbk1 / ggml_type_size(k->type));
  6727. uint32_t v_stride = (uint32_t)(nbv1 / ggml_type_size(v->type));
  6728. // For F32, the shader treats it as a block of size 4 (for vec4 loads)
  6729. if (k->type == GGML_TYPE_F32) {
  6730. k_stride /= 4;
  6731. }
  6732. if (v->type == GGML_TYPE_F32) {
  6733. v_stride /= 4;
  6734. }
  6735. uint32_t alignment = fa_align(path, HSK, HSV, k->type, small_rows);
  6736. bool aligned = (KV % alignment) == 0 &&
  6737. // the "aligned" shader variant will forcibly align strides, for performance
  6738. (q_stride & 7) == 0 && (k_stride & 7) == 0 && (v_stride & 7) == 0;
  6739. // Need to use the coopmat2 variant that clamps loads when HSK/HSV aren't sufficiently aligned.
  6740. if (((HSK | HSV) % 16) != 0 && path == FA_COOPMAT2) {
  6741. aligned = false;
  6742. }
  6743. bool f32acc = path == FA_SCALAR || dst->op_params[3] == GGML_PREC_F32;
  6744. vk_fa_pipeline_state fa_pipeline_state(HSK, HSV, small_rows, path, aligned, f32acc);
  6745. vk_pipeline pipeline = nullptr;
  6746. auto &pipelines = ctx->device->pipeline_flash_attn_f32_f16[k->type];
  6747. auto it = pipelines.find(fa_pipeline_state);
  6748. if (it != pipelines.end()) {
  6749. pipeline = it->second;
  6750. } else {
  6751. pipelines[fa_pipeline_state] = pipeline = std::make_shared<vk_pipeline_struct>();
  6752. }
  6753. assert(pipeline);
  6754. uint32_t split_kv = KV;
  6755. uint32_t split_k = 1;
  6756. // Use a placeholder core count if one isn't available. split_k is a big help for perf.
  6757. const uint32_t shader_core_count = ctx->device->shader_core_count ? ctx->device->shader_core_count : 16;
  6758. // Try to use split_k when KV is large enough to be worth the overhead
  6759. if (workgroups_x == 1 && shader_core_count > 0) {
  6760. // Try to run two workgroups per SM.
  6761. split_k = shader_core_count * 2 / (workgroups_y * workgroups_z);
  6762. if (split_k > 1) {
  6763. // Try to evenly split KV into split_k chunks, but it needs to be a multiple
  6764. // of "align", so recompute split_k based on that.
  6765. split_kv = ROUNDUP_POW2(std::max(1u, KV / split_k), alignment);
  6766. split_k = CEIL_DIV(KV, split_kv);
  6767. workgroups_x = split_k;
  6768. }
  6769. }
  6770. // Reserve space for split_k temporaries. For each split x batch, we need to store the O matrix (D x ne1)
  6771. // and the per-row m and L values (ne1 rows). We store all the matrices first, followed by the rows.
  6772. const uint64_t split_k_size = split_k > 1 ? (HSV * ne1 * sizeof(float) + ne1 * sizeof(float) * 2) * split_k * ne3 : 0;
  6773. if (split_k_size > ctx->device->properties.limits.maxStorageBufferRange) {
  6774. GGML_ABORT("Requested preallocation size is too large");
  6775. }
  6776. if (ctx->prealloc_size_split_k < split_k_size) {
  6777. ctx->prealloc_size_split_k = split_k_size;
  6778. }
  6779. if (dryrun) {
  6780. // Request descriptor sets
  6781. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  6782. if (split_k > 1) {
  6783. ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_flash_attn_split_k_reduce, 1);
  6784. }
  6785. return;
  6786. }
  6787. float scale = 1.0f;
  6788. float max_bias = 0.0f;
  6789. float logit_softcap = 0.0f;
  6790. memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float));
  6791. memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float));
  6792. memcpy(&logit_softcap, (const float *) dst->op_params + 2, sizeof(float));
  6793. if (logit_softcap != 0) {
  6794. scale /= logit_softcap;
  6795. }
  6796. const uint32_t n_head_kv = neq2;
  6797. const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
  6798. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  6799. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  6800. vk_buffer d_Q = nullptr, d_K = nullptr, d_V = nullptr, d_D = nullptr, d_M = nullptr, d_S = nullptr;
  6801. size_t q_buf_offset = 0, k_buf_offset = 0, v_buf_offset = 0, d_buf_offset = 0, m_buf_offset = 0, s_buf_offset = 0;
  6802. bool Q_uma = false, K_uma = false, V_uma = false, D_uma = false, M_uma = false, S_uma = false;
  6803. if (ctx->device->uma) {
  6804. ggml_vk_host_get(ctx->device, q->data, d_Q, q_buf_offset);
  6805. ggml_vk_host_get(ctx->device, k->data, d_K, k_buf_offset);
  6806. ggml_vk_host_get(ctx->device, v->data, d_V, v_buf_offset);
  6807. ggml_vk_host_get(ctx->device, dst->data, d_D, d_buf_offset);
  6808. Q_uma = d_Q != nullptr;
  6809. K_uma = d_K != nullptr;
  6810. V_uma = d_V != nullptr;
  6811. D_uma = d_D != nullptr;
  6812. if (mask) {
  6813. ggml_vk_host_get(ctx->device, mask->data, d_M, m_buf_offset);
  6814. M_uma = d_M != nullptr;
  6815. }
  6816. if (sinks) {
  6817. ggml_vk_host_get(ctx->device, sinks->data, d_S, s_buf_offset);
  6818. S_uma = d_S != nullptr;
  6819. }
  6820. }
  6821. ggml_backend_vk_buffer_context * d_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  6822. ggml_backend_vk_buffer_context * q_buf_ctx = (ggml_backend_vk_buffer_context *)q->buffer->context;
  6823. ggml_backend_vk_buffer_context * k_buf_ctx = (ggml_backend_vk_buffer_context *)k->buffer->context;
  6824. ggml_backend_vk_buffer_context * v_buf_ctx = (ggml_backend_vk_buffer_context *)v->buffer->context;
  6825. if (!Q_uma) {
  6826. d_Q = q_buf_ctx->dev_buffer;
  6827. q_buf_offset = vk_tensor_offset(q) + q->view_offs;
  6828. }
  6829. if (!K_uma) {
  6830. d_K = k_buf_ctx->dev_buffer;
  6831. k_buf_offset = vk_tensor_offset(k) + k->view_offs;
  6832. }
  6833. if (!V_uma) {
  6834. d_V = v_buf_ctx->dev_buffer;
  6835. v_buf_offset = vk_tensor_offset(v) + v->view_offs;
  6836. }
  6837. if (!D_uma) {
  6838. d_D = d_buf_ctx->dev_buffer;
  6839. d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  6840. }
  6841. if (!M_uma) {
  6842. d_M = d_Q;
  6843. m_buf_offset = q_buf_offset;
  6844. if (mask) {
  6845. ggml_backend_vk_buffer_context * m_buf_ctx = (ggml_backend_vk_buffer_context*)mask->buffer->context;
  6846. d_M = m_buf_ctx->dev_buffer;
  6847. m_buf_offset = vk_tensor_offset(mask) + mask->view_offs;
  6848. }
  6849. }
  6850. if (!S_uma) {
  6851. d_S = d_Q;
  6852. s_buf_offset = q_buf_offset;
  6853. if (sinks) {
  6854. ggml_backend_vk_buffer_context * s_buf_ctx = (ggml_backend_vk_buffer_context*)sinks->buffer->context;
  6855. d_S = s_buf_ctx->dev_buffer;
  6856. s_buf_offset = vk_tensor_offset(sinks) + sinks->view_offs;
  6857. }
  6858. }
  6859. uint32_t mask_n_head_log2 = ((sinks != nullptr) << 24) | ((mask != nullptr) << 16) | n_head_log2;
  6860. const vk_flash_attn_push_constants pc = { N, KV,
  6861. (uint32_t)ne1, (uint32_t)ne2, (uint32_t)ne3,
  6862. (uint32_t)neq2, (uint32_t)neq3,
  6863. (uint32_t)nek2, (uint32_t)nek3,
  6864. (uint32_t)nev2, (uint32_t)nev3,
  6865. nem1, nem2, nem3,
  6866. q_stride, (uint32_t)nbq2, (uint32_t)nbq3,
  6867. k_stride, (uint32_t)nbk2, (uint32_t)nbk3,
  6868. v_stride, (uint32_t)nbv2, (uint32_t)nbv3,
  6869. scale, max_bias, logit_softcap,
  6870. mask_n_head_log2, m0, m1,
  6871. gqa_ratio, split_kv, split_k };
  6872. if (split_k > 1) {
  6873. if (ctx->prealloc_split_k_need_sync) {
  6874. ggml_vk_sync_buffers(ctx, subctx);
  6875. }
  6876. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
  6877. {
  6878. ggml_vk_subbuffer(ctx, d_Q, q_buf_offset),
  6879. ggml_vk_subbuffer(ctx, d_K, k_buf_offset),
  6880. ggml_vk_subbuffer(ctx, d_V, v_buf_offset),
  6881. ggml_vk_subbuffer(ctx, d_M, m_buf_offset),
  6882. ggml_vk_subbuffer(ctx, d_S, s_buf_offset),
  6883. ggml_vk_subbuffer(ctx, ctx->prealloc_split_k, 0),
  6884. },
  6885. // We only use split_k when group query attention is enabled, which means
  6886. // there's no more than one tile of rows (i.e. workgroups_x would have been
  6887. // one). We reuse workgroups_x to mean the number of splits, so we need to
  6888. // cancel out the divide by wg_denoms[0].
  6889. pc, { workgroups_x * pipeline->wg_denoms[0], workgroups_y, workgroups_z });
  6890. ggml_vk_sync_buffers(ctx, subctx);
  6891. const std::array<uint32_t, 5> pc2 = { HSV, (uint32_t)ne1, (uint32_t)ne3, split_k, (sinks != nullptr) };
  6892. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_flash_attn_split_k_reduce,
  6893. {
  6894. ggml_vk_subbuffer(ctx, ctx->prealloc_split_k, 0),
  6895. ggml_vk_subbuffer(ctx, d_S, s_buf_offset),
  6896. ggml_vk_subbuffer(ctx, d_D, d_buf_offset),
  6897. },
  6898. pc2, { (uint32_t)ne1, HSV, (uint32_t)ne3 });
  6899. ctx->prealloc_split_k_need_sync = true;
  6900. } else {
  6901. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
  6902. {
  6903. ggml_vk_subbuffer(ctx, d_Q, q_buf_offset),
  6904. ggml_vk_subbuffer(ctx, d_K, k_buf_offset),
  6905. ggml_vk_subbuffer(ctx, d_V, v_buf_offset),
  6906. ggml_vk_subbuffer(ctx, d_M, m_buf_offset),
  6907. ggml_vk_subbuffer(ctx, d_S, s_buf_offset),
  6908. ggml_vk_subbuffer(ctx, d_D, d_buf_offset),
  6909. },
  6910. pc, { workgroups_x, workgroups_y, workgroups_z });
  6911. }
  6912. }
  6913. static std::array<uint32_t, 3> ggml_vk_get_conv_elements(const ggml_tensor *dst) {
  6914. const ggml_tensor *src0 = dst->src[0];
  6915. const ggml_tensor *src1 = dst->src[1];
  6916. // src0 - kernel: [KW, KH, Cin, Cout]
  6917. // src1 - input: [W, H, Cin, N]
  6918. // dst - result: [OW, OH, Cout, N]
  6919. // Copied from ggml.c: int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d)
  6920. auto calc_conv_output_size = [](int64_t ins, int64_t ks, int s, int p, int d) -> int64_t {
  6921. return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
  6922. };
  6923. // parallelize in {OW/BS_K, OH/BS_NPQ, 1}
  6924. int64_t W = src1->ne[0];
  6925. int64_t H = src1->ne[1];
  6926. int64_t KW = src0->ne[0];
  6927. int64_t KH = src0->ne[1];
  6928. int64_t Cout = src0->ne[3];
  6929. int64_t N = src1->ne[3];
  6930. int64_t OH = calc_conv_output_size(H, KH, dst->op_params[1], dst->op_params[3], dst->op_params[5]);
  6931. int64_t OW = calc_conv_output_size(W, KW, dst->op_params[0], dst->op_params[2], dst->op_params[4]);
  6932. int64_t NPQ = N * OW * OH;
  6933. // Tile output matrix to (K/NB_K, NPQ/NB_NPQ, 1) workgroups
  6934. std::array<uint32_t, 3> elements = { static_cast<uint32_t>(Cout), static_cast<uint32_t>(NPQ), 1 };
  6935. return elements;
  6936. }
  6937. static std::array<uint32_t, 3> ggml_vk_get_conv_transpose_2d_elements(const ggml_tensor *dst) {
  6938. const ggml_tensor *src0 = dst->src[0];
  6939. const ggml_tensor *src1 = dst->src[1];
  6940. // src0 - kernel: [KW, KH, Cout, Cin]
  6941. // src1 - input: [W, H, Cin, N]
  6942. // dst - result: [OW, OH, Cout, N]
  6943. auto calc_conv_output_size = [](int64_t ins, int64_t ks, int s, int p, int d) -> int64_t {
  6944. return (ins - 1) * s - 2 * p + (ks - 1) * d + 1;
  6945. };
  6946. // parallelize in {OW/BS_K, OH/BS_NPQ, 1}
  6947. int64_t W = src1->ne[0];
  6948. int64_t H = src1->ne[1];
  6949. int64_t KW = src0->ne[0];
  6950. int64_t KH = src0->ne[1];
  6951. int64_t Cout = src0->ne[2];
  6952. int64_t N = src1->ne[3];
  6953. int64_t OH = calc_conv_output_size(H, KH, dst->op_params[0], 0, 1);
  6954. int64_t OW = calc_conv_output_size(W, KW, dst->op_params[0], 0, 1);
  6955. int64_t NPQ = N * OW * OH;
  6956. // Tile output matrix to (K/NB_K, NPQ/NB_NPQ, 1) workgroups
  6957. std::array<uint32_t, 3> elements = { static_cast<uint32_t>(Cout), static_cast<uint32_t>(NPQ), 1 };
  6958. return elements;
  6959. }
  6960. static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * dst, ggml_op op) {
  6961. switch (op) {
  6962. case GGML_OP_GET_ROWS:
  6963. GGML_ASSERT(src1->type == GGML_TYPE_I32);
  6964. if (dst->type == GGML_TYPE_F16) {
  6965. return ctx->device->pipeline_get_rows[src0->type];
  6966. }
  6967. if (dst->type == GGML_TYPE_F32) {
  6968. return ctx->device->pipeline_get_rows_f32[src0->type];
  6969. }
  6970. return nullptr;
  6971. case GGML_OP_ACC:
  6972. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  6973. return ctx->device->pipeline_acc_f32;
  6974. }
  6975. return nullptr;
  6976. case GGML_OP_ADD:
  6977. case GGML_OP_SUB:
  6978. case GGML_OP_MUL:
  6979. case GGML_OP_DIV:
  6980. if ((src0->type != GGML_TYPE_F32 && src0->type != GGML_TYPE_F16) ||
  6981. (src1->type != GGML_TYPE_F32 && src1->type != GGML_TYPE_F16) ||
  6982. (dst->type != GGML_TYPE_F32 && dst->type != GGML_TYPE_F16)) {
  6983. return nullptr;
  6984. }
  6985. switch (op) {
  6986. case GGML_OP_ADD:
  6987. {
  6988. if (ctx->num_additional_fused_ops > 0) {
  6989. if (ctx->do_add_rms_partials) {
  6990. return ctx->device->pipeline_multi_add_rms[ctx->num_additional_fused_ops];
  6991. } else {
  6992. return ctx->device->pipeline_multi_add[ctx->num_additional_fused_ops];
  6993. }
  6994. }
  6995. if (ctx->do_add_rms_partials) {
  6996. auto pipelines = ggml_are_same_shape(src0, src1) ? ctx->device->pipeline_add_rms_norepeat : ctx->device->pipeline_add_rms;
  6997. return pipelines[src0->type == GGML_TYPE_F16][src1->type == GGML_TYPE_F16][dst->type == GGML_TYPE_F16];
  6998. } else {
  6999. auto pipelines = ggml_are_same_shape(src0, src1) ? ctx->device->pipeline_add_norepeat : ctx->device->pipeline_add;
  7000. return pipelines[src0->type == GGML_TYPE_F16][src1->type == GGML_TYPE_F16][dst->type == GGML_TYPE_F16];
  7001. }
  7002. }
  7003. case GGML_OP_SUB:
  7004. {
  7005. auto pipelines = ggml_are_same_shape(src0, src1) ? ctx->device->pipeline_sub_norepeat : ctx->device->pipeline_sub;
  7006. return pipelines[src0->type == GGML_TYPE_F16][src1->type == GGML_TYPE_F16][dst->type == GGML_TYPE_F16];
  7007. }
  7008. case GGML_OP_MUL:
  7009. {
  7010. auto pipelines = ggml_are_same_shape(src0, src1) ? ctx->device->pipeline_mul_norepeat : ctx->device->pipeline_mul;
  7011. return pipelines[src0->type == GGML_TYPE_F16][src1->type == GGML_TYPE_F16][dst->type == GGML_TYPE_F16];
  7012. }
  7013. case GGML_OP_DIV:
  7014. {
  7015. auto pipelines = ggml_are_same_shape(src0, src1) ? ctx->device->pipeline_div_norepeat : ctx->device->pipeline_div;
  7016. return pipelines[src0->type == GGML_TYPE_F16][src1->type == GGML_TYPE_F16][dst->type == GGML_TYPE_F16];
  7017. }
  7018. default:
  7019. break;
  7020. }
  7021. return nullptr;
  7022. case GGML_OP_ADD_ID:
  7023. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && src2->type == GGML_TYPE_I32 && dst->type == GGML_TYPE_F32) {
  7024. return ctx->device->pipeline_add_id_f32;
  7025. }
  7026. return nullptr;
  7027. case GGML_OP_CONCAT:
  7028. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7029. return ctx->device->pipeline_concat_f32;
  7030. }
  7031. if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  7032. return ctx->device->pipeline_concat_f16;
  7033. }
  7034. if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32 && dst->type == GGML_TYPE_I32) {
  7035. return ctx->device->pipeline_concat_i32;
  7036. }
  7037. return nullptr;
  7038. case GGML_OP_UPSCALE:
  7039. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7040. ggml_scale_mode mode = (ggml_scale_mode)(ggml_get_op_params_i32(dst, 0) & 0xFF);
  7041. switch (mode) {
  7042. case GGML_SCALE_MODE_NEAREST:
  7043. return ctx->device->pipeline_upscale_nearest_f32;
  7044. case GGML_SCALE_MODE_BILINEAR:
  7045. return ctx->device->pipeline_upscale_bilinear_f32;
  7046. default:
  7047. return nullptr;
  7048. }
  7049. }
  7050. return nullptr;
  7051. case GGML_OP_SCALE:
  7052. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7053. return ctx->device->pipeline_scale_f32;
  7054. }
  7055. return nullptr;
  7056. case GGML_OP_SQR:
  7057. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7058. return ctx->device->pipeline_sqr_f32;
  7059. }
  7060. return nullptr;
  7061. case GGML_OP_SQRT:
  7062. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7063. return ctx->device->pipeline_sqrt_f32;
  7064. }
  7065. return nullptr;
  7066. case GGML_OP_SIN:
  7067. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7068. return ctx->device->pipeline_sin_f32;
  7069. }
  7070. return nullptr;
  7071. case GGML_OP_COS:
  7072. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7073. return ctx->device->pipeline_cos_f32;
  7074. }
  7075. return nullptr;
  7076. case GGML_OP_CLAMP:
  7077. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7078. return ctx->device->pipeline_clamp_f32;
  7079. }
  7080. return nullptr;
  7081. case GGML_OP_PAD:
  7082. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7083. return ctx->device->pipeline_pad_f32;
  7084. }
  7085. return nullptr;
  7086. case GGML_OP_ROLL:
  7087. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7088. return ctx->device->pipeline_roll_f32;
  7089. }
  7090. return nullptr;
  7091. case GGML_OP_REPEAT:
  7092. if (ggml_type_size(src0->type) == sizeof(float) && ggml_type_size(dst->type) == sizeof(float)) {
  7093. return ctx->device->pipeline_repeat_f32;
  7094. }
  7095. return nullptr;
  7096. case GGML_OP_REPEAT_BACK:
  7097. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7098. return ctx->device->pipeline_repeat_back_f32;
  7099. }
  7100. return nullptr;
  7101. case GGML_OP_CPY:
  7102. case GGML_OP_CONT:
  7103. case GGML_OP_DUP:
  7104. return ggml_vk_get_cpy_pipeline(ctx, src0, dst, dst->type);
  7105. case GGML_OP_SET_ROWS:
  7106. if (src1->type == GGML_TYPE_I64) {
  7107. return ctx->device->pipeline_set_rows_i64[dst->type];
  7108. } else {
  7109. return ctx->device->pipeline_set_rows_i32[dst->type];
  7110. }
  7111. case GGML_OP_SILU_BACK:
  7112. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7113. return ctx->device->pipeline_silu_back_f32;
  7114. }
  7115. return nullptr;
  7116. case GGML_OP_NORM:
  7117. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7118. return ctx->device->pipeline_norm_f32;
  7119. }
  7120. return nullptr;
  7121. case GGML_OP_GROUP_NORM:
  7122. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7123. return ctx->device->pipeline_group_norm_f32;
  7124. }
  7125. return nullptr;
  7126. case GGML_OP_RMS_NORM:
  7127. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7128. if (ctx->do_add_rms_partials) {
  7129. return ctx->num_additional_fused_ops > 0 ? ctx->device->pipeline_rms_norm_mul_partials_f32 : ctx->device->pipeline_rms_norm_partials_f32;
  7130. } else {
  7131. return ctx->num_additional_fused_ops > 0 ? ctx->device->pipeline_rms_norm_mul_f32 : ctx->device->pipeline_rms_norm_f32;
  7132. }
  7133. }
  7134. return nullptr;
  7135. case GGML_OP_RMS_NORM_BACK:
  7136. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7137. return ctx->device->pipeline_rms_norm_back_f32;
  7138. }
  7139. return nullptr;
  7140. case GGML_OP_L2_NORM:
  7141. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7142. return ctx->device->pipeline_l2_norm_f32;
  7143. }
  7144. return nullptr;
  7145. case GGML_OP_UNARY:
  7146. if ((src0->type != GGML_TYPE_F32 && src0->type != GGML_TYPE_F16) ||
  7147. (dst->type != GGML_TYPE_F32 && dst->type != GGML_TYPE_F16) ||
  7148. (src0->type != dst->type)) {
  7149. return nullptr;
  7150. }
  7151. switch (ggml_get_unary_op(dst)) {
  7152. case GGML_UNARY_OP_EXP:
  7153. return ctx->device->pipeline_exp[dst->type == GGML_TYPE_F16];
  7154. case GGML_UNARY_OP_SILU:
  7155. return ctx->device->pipeline_silu[dst->type == GGML_TYPE_F16];
  7156. case GGML_UNARY_OP_GELU:
  7157. return ctx->device->pipeline_gelu[dst->type == GGML_TYPE_F16];
  7158. case GGML_UNARY_OP_GELU_ERF:
  7159. return ctx->device->pipeline_gelu_erf[dst->type == GGML_TYPE_F16];
  7160. case GGML_UNARY_OP_GELU_QUICK:
  7161. return ctx->device->pipeline_gelu_quick[dst->type == GGML_TYPE_F16];
  7162. case GGML_UNARY_OP_RELU:
  7163. return ctx->device->pipeline_relu[dst->type == GGML_TYPE_F16];
  7164. case GGML_UNARY_OP_TANH:
  7165. return ctx->device->pipeline_tanh[dst->type == GGML_TYPE_F16];
  7166. case GGML_UNARY_OP_SIGMOID:
  7167. return ctx->device->pipeline_sigmoid[dst->type == GGML_TYPE_F16];
  7168. case GGML_UNARY_OP_HARDSIGMOID:
  7169. return ctx->device->pipeline_hardsigmoid[dst->type == GGML_TYPE_F16];
  7170. case GGML_UNARY_OP_HARDSWISH:
  7171. return ctx->device->pipeline_hardswish[dst->type == GGML_TYPE_F16];
  7172. default:
  7173. break;
  7174. }
  7175. return nullptr;
  7176. case GGML_OP_GLU:
  7177. if ((src0->type != GGML_TYPE_F32 && src0->type != GGML_TYPE_F16) ||
  7178. (dst->type != GGML_TYPE_F32 && dst->type != GGML_TYPE_F16) ||
  7179. (src0->type != dst->type)) {
  7180. return nullptr;
  7181. }
  7182. switch (ggml_get_glu_op(dst)) {
  7183. case GGML_GLU_OP_GEGLU:
  7184. return ctx->device->pipeline_geglu[dst->type == GGML_TYPE_F16];
  7185. case GGML_GLU_OP_REGLU:
  7186. return ctx->device->pipeline_reglu[dst->type == GGML_TYPE_F16];
  7187. case GGML_GLU_OP_SWIGLU:
  7188. return ctx->device->pipeline_swiglu[dst->type == GGML_TYPE_F16];
  7189. case GGML_GLU_OP_SWIGLU_OAI:
  7190. return ctx->device->pipeline_swiglu_oai[dst->type == GGML_TYPE_F16];
  7191. case GGML_GLU_OP_GEGLU_ERF:
  7192. return ctx->device->pipeline_geglu_erf[dst->type == GGML_TYPE_F16];
  7193. case GGML_GLU_OP_GEGLU_QUICK:
  7194. return ctx->device->pipeline_geglu_quick[dst->type == GGML_TYPE_F16];
  7195. default:
  7196. break;
  7197. }
  7198. return nullptr;
  7199. case GGML_OP_DIAG_MASK_INF:
  7200. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7201. return ctx->device->pipeline_diag_mask_inf_f32;
  7202. }
  7203. return nullptr;
  7204. case GGML_OP_SOFT_MAX:
  7205. GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16);
  7206. GGML_ASSERT(!src2 || src2->type == GGML_TYPE_F32);
  7207. if (ctx->num_additional_fused_ops) {
  7208. uint32_t idx = (uint32_t)ceilf(log2f(float(dst->ne[0])));
  7209. GGML_ASSERT(idx < num_topk_moe_pipelines);
  7210. topk_moe_mode mode = ggml_vk_num_additional_ops_to_topk_moe_mode(ctx->num_additional_fused_ops);
  7211. return ctx->device->pipeline_topk_moe[idx][mode];
  7212. }
  7213. if (src0->type == GGML_TYPE_F32 && (src1 == nullptr || src1->type == GGML_TYPE_F32) && dst->type == GGML_TYPE_F32) {
  7214. return src0->ne[0] > 1024 ? ctx->device->pipeline_soft_max_f32_wg512 : ctx->device->pipeline_soft_max_f32;
  7215. }
  7216. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
  7217. return src0->ne[0] > 1024 ? ctx->device->pipeline_soft_max_f32_f16_wg512 : ctx->device->pipeline_soft_max_f32_f16;
  7218. }
  7219. return nullptr;
  7220. case GGML_OP_SOFT_MAX_BACK:
  7221. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7222. return ctx->device->pipeline_soft_max_back_f32;
  7223. }
  7224. return nullptr;
  7225. case GGML_OP_ROPE:
  7226. case GGML_OP_ROPE_BACK:
  7227. {
  7228. const ggml_tensor *rope = ctx->num_additional_fused_ops == 2 ? dst->src[0]->src[0] : dst;
  7229. const int mode = ((const int32_t *) rope->op_params)[2];
  7230. const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
  7231. const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE;
  7232. const bool is_vision = mode == GGML_ROPE_TYPE_VISION;
  7233. if (is_neox) {
  7234. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7235. return ctx->device->pipeline_rope_neox_f32;
  7236. }
  7237. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
  7238. return ctx->device->pipeline_rope_neox_f32_f16;
  7239. }
  7240. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  7241. return ctx->device->pipeline_rope_neox_f16;
  7242. }
  7243. } else if (is_mrope && !is_vision) {
  7244. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7245. return ctx->device->pipeline_rope_multi_f32;
  7246. }
  7247. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  7248. return ctx->device->pipeline_rope_multi_f16;
  7249. }
  7250. } else if (is_vision) {
  7251. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7252. return ctx->device->pipeline_rope_vision_f32;
  7253. }
  7254. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  7255. return ctx->device->pipeline_rope_vision_f16;
  7256. }
  7257. } else {
  7258. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7259. return ctx->device->pipeline_rope_norm_f32;
  7260. }
  7261. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
  7262. return ctx->device->pipeline_rope_norm_f32_f16;
  7263. }
  7264. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  7265. return ctx->device->pipeline_rope_norm_f16;
  7266. }
  7267. }
  7268. return nullptr;
  7269. }
  7270. case GGML_OP_ARGSORT:
  7271. if (ctx->num_additional_fused_ops) {
  7272. uint32_t idx = (uint32_t)ceilf(log2f(float(dst->ne[0])));
  7273. GGML_ASSERT(idx < num_topk_moe_pipelines);
  7274. topk_moe_mode mode = ggml_vk_num_additional_ops_to_topk_moe_mode(ctx->num_additional_fused_ops);
  7275. return ctx->device->pipeline_topk_moe[idx][mode];
  7276. }
  7277. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_I32) {
  7278. uint32_t idx = (uint32_t)ceilf(log2f(float(dst->ne[0])));
  7279. return ctx->device->pipeline_argsort_f32[idx];
  7280. }
  7281. return nullptr;
  7282. case GGML_OP_SUM:
  7283. case GGML_OP_SUM_ROWS:
  7284. case GGML_OP_MEAN:
  7285. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7286. return ctx->device->pipeline_sum_rows_f32;
  7287. }
  7288. return nullptr;
  7289. case GGML_OP_ARGMAX:
  7290. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_I32) {
  7291. return ctx->device->pipeline_argmax_f32;
  7292. }
  7293. return nullptr;
  7294. case GGML_OP_COUNT_EQUAL:
  7295. if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32 && dst->type == GGML_TYPE_I64) {
  7296. return ctx->device->pipeline_count_equal_i32;
  7297. }
  7298. return nullptr;
  7299. case GGML_OP_IM2COL:
  7300. if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7301. return ctx->device->pipeline_im2col_f32;
  7302. }
  7303. if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
  7304. return ctx->device->pipeline_im2col_f32_f16;
  7305. }
  7306. return nullptr;
  7307. case GGML_OP_IM2COL_3D:
  7308. if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7309. return ctx->device->pipeline_im2col_3d_f32;
  7310. }
  7311. if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
  7312. return ctx->device->pipeline_im2col_3d_f32_f16;
  7313. }
  7314. return nullptr;
  7315. case GGML_OP_TIMESTEP_EMBEDDING:
  7316. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7317. return ctx->device->pipeline_timestep_embedding_f32;
  7318. }
  7319. return nullptr;
  7320. case GGML_OP_CONV_TRANSPOSE_1D:
  7321. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7322. return ctx->device->pipeline_conv_transpose_1d_f32;
  7323. }
  7324. return nullptr;
  7325. case GGML_OP_POOL_2D:
  7326. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7327. return ctx->device->pipeline_pool2d_f32;
  7328. }
  7329. return nullptr;
  7330. case GGML_OP_RWKV_WKV6:
  7331. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7332. return ctx->device->pipeline_rwkv_wkv6_f32;
  7333. }
  7334. return nullptr;
  7335. case GGML_OP_RWKV_WKV7:
  7336. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7337. return ctx->device->pipeline_rwkv_wkv7_f32;
  7338. }
  7339. return nullptr;
  7340. case GGML_OP_SSM_SCAN:
  7341. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7342. const uint32_t d_state = src0->ne[0];
  7343. if (d_state == 128) {
  7344. return ctx->device->pipeline_ssm_scan_f32_d128;
  7345. } else if (d_state == 256) {
  7346. return ctx->device->pipeline_ssm_scan_f32_d256;
  7347. }
  7348. }
  7349. return nullptr;
  7350. case GGML_OP_SSM_CONV:
  7351. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7352. return ctx->device->pipeline_ssm_conv_f32;
  7353. }
  7354. return nullptr;
  7355. case GGML_OP_OPT_STEP_ADAMW:
  7356. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7357. return ctx->device->pipeline_opt_step_adamw_f32;
  7358. }
  7359. return nullptr;
  7360. case GGML_OP_OPT_STEP_SGD:
  7361. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7362. return ctx->device->pipeline_opt_step_sgd_f32;
  7363. }
  7364. return nullptr;
  7365. case GGML_OP_LEAKY_RELU:
  7366. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7367. return ctx->device->pipeline_leaky_relu_f32;
  7368. }
  7369. return nullptr;
  7370. case GGML_OP_CONV_2D:
  7371. case GGML_OP_CONV_TRANSPOSE_2D:
  7372. if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 &&
  7373. ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && ggml_is_contiguous(dst)) {
  7374. std::array<uint32_t, 3> elements;
  7375. if (op == GGML_OP_CONV_2D) elements = ggml_vk_get_conv_elements(dst);
  7376. else if (op == GGML_OP_CONV_TRANSPOSE_2D) elements = ggml_vk_get_conv_transpose_2d_elements(dst);
  7377. vk_conv_shapes shape;
  7378. uint32_t tiles[CONV_SHAPE_COUNT];
  7379. for (uint32_t i = 0; i < CONV_SHAPE_COUNT; ++i) {
  7380. tiles[i] = CEIL_DIV(elements[0], ctx->device->pipeline_conv2d_f32[i]->wg_denoms[0]) * CEIL_DIV(elements[1], ctx->device->pipeline_conv2d_f32[i]->wg_denoms[1]);
  7381. }
  7382. // We can't query number of shader cores on Intel, use 32 as a placeholder
  7383. // so small convolutions will still choose a smaller tile.
  7384. const uint32_t shader_core_count = ctx->device->shader_core_count > 0 ? ctx->device->shader_core_count : 32;
  7385. if (elements[0] > 64 && tiles[CONV_SHAPE_128x128] >= shader_core_count * 2) {
  7386. shape = CONV_SHAPE_128x128;
  7387. } else if (elements[0] <= 32 && tiles[CONV_SHAPE_32x256] >= shader_core_count * 2) {
  7388. shape = CONV_SHAPE_32x256;
  7389. } else {
  7390. shape = CONV_SHAPE_64x32;
  7391. }
  7392. if (op == GGML_OP_CONV_2D) {
  7393. if (src0->type == GGML_TYPE_F32) {
  7394. return ctx->device->pipeline_conv2d_f32[shape];
  7395. } else if (src0->type == GGML_TYPE_F16) {
  7396. return ctx->device->pipeline_conv2d_f16_f32[shape];
  7397. }
  7398. } else if (op == GGML_OP_CONV_TRANSPOSE_2D) {
  7399. if (src0->type == GGML_TYPE_F32) {
  7400. return ctx->device->pipeline_conv_transpose_2d_f32[shape];
  7401. } else if (src0->type == GGML_TYPE_F16) {
  7402. return ctx->device->pipeline_conv_transpose_2d_f16_f32[shape];
  7403. }
  7404. }
  7405. }
  7406. return nullptr;
  7407. case GGML_OP_CONV_2D_DW:
  7408. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7409. if (ggml_is_contiguous(src1)) {
  7410. return ctx->device->pipeline_conv2d_dw_whcn_f32;
  7411. } else if (ggml_is_contiguous_channels(src1)) {
  7412. return ctx->device->pipeline_conv2d_dw_cwhn_f32;
  7413. }
  7414. } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
  7415. if (ggml_is_contiguous(src1)) {
  7416. return ctx->device->pipeline_conv2d_dw_whcn_f16_f32;
  7417. } else if (ggml_is_contiguous_channels(src1)) {
  7418. return ctx->device->pipeline_conv2d_dw_cwhn_f16_f32;
  7419. }
  7420. }
  7421. return nullptr;
  7422. default:
  7423. return nullptr;
  7424. }
  7425. GGML_UNUSED(src2);
  7426. }
  7427. static bool ggml_vk_op_supports_incontiguous(ggml_op op) {
  7428. switch (op) {
  7429. case GGML_OP_CPY:
  7430. case GGML_OP_GET_ROWS:
  7431. case GGML_OP_ADD:
  7432. case GGML_OP_SUB:
  7433. case GGML_OP_MUL:
  7434. case GGML_OP_DIV:
  7435. case GGML_OP_ADD_ID:
  7436. case GGML_OP_CONCAT:
  7437. case GGML_OP_UPSCALE:
  7438. case GGML_OP_SQR:
  7439. case GGML_OP_SQRT:
  7440. case GGML_OP_SIN:
  7441. case GGML_OP_COS:
  7442. case GGML_OP_CLAMP:
  7443. case GGML_OP_PAD:
  7444. case GGML_OP_REPEAT:
  7445. case GGML_OP_REPEAT_BACK:
  7446. case GGML_OP_ROPE:
  7447. case GGML_OP_RMS_NORM:
  7448. case GGML_OP_CONV_2D_DW:
  7449. case GGML_OP_IM2COL:
  7450. case GGML_OP_IM2COL_3D:
  7451. case GGML_OP_SET_ROWS:
  7452. case GGML_OP_SUM:
  7453. case GGML_OP_SUM_ROWS:
  7454. case GGML_OP_MEAN:
  7455. return true;
  7456. default:
  7457. return false;
  7458. }
  7459. }
  7460. static uint32_t get_misalign_bytes(const ggml_backend_vk_context * ctx, const ggml_tensor * t)
  7461. {
  7462. return ((vk_tensor_offset(t) + t->view_offs) & (ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1));;
  7463. }
  7464. template <typename T> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, T &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  7465. GGML_UNUSED(p);
  7466. GGML_UNUSED(src0);
  7467. GGML_UNUSED(src1);
  7468. GGML_UNUSED(src2);
  7469. GGML_UNUSED(src3);
  7470. GGML_UNUSED(dst);
  7471. static_assert(!std::is_const<T>::value, "unexpected type");
  7472. GGML_ASSERT(!src0 || get_misalign_bytes(ctx, src0) == 0);
  7473. GGML_ASSERT(!src1 || get_misalign_bytes(ctx, src1) == 0);
  7474. GGML_ASSERT(!src2 || get_misalign_bytes(ctx, src2) == 0);
  7475. GGML_ASSERT(!src3 || get_misalign_bytes(ctx, src3) == 0);
  7476. GGML_ASSERT(!dst || get_misalign_bytes(ctx, dst) == 0);
  7477. }
  7478. template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_unary_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  7479. const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
  7480. const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
  7481. p.misalign_offsets = (a_offset << 16) | d_offset;
  7482. GGML_UNUSED(src1);
  7483. GGML_UNUSED(src2);
  7484. GGML_UNUSED(src3);
  7485. }
  7486. template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_sum_rows_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  7487. const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
  7488. const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
  7489. p.misalign_offsets = (a_offset << 16) | d_offset;
  7490. GGML_UNUSED(src1);
  7491. GGML_UNUSED(src2);
  7492. GGML_UNUSED(src3);
  7493. }
  7494. template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_pad_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  7495. const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
  7496. const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
  7497. p.misalign_offsets = (a_offset << 16) | d_offset;
  7498. GGML_UNUSED(src1);
  7499. GGML_UNUSED(src2);
  7500. GGML_UNUSED(src3);
  7501. }
  7502. template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_im2col_3d_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  7503. const uint32_t a_offset = get_misalign_bytes(ctx, src1) / ggml_type_size(src1->type);
  7504. const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
  7505. p.misalign_offsets = (a_offset << 16) | d_offset;
  7506. GGML_UNUSED(src0);
  7507. GGML_UNUSED(src2);
  7508. GGML_UNUSED(src3);
  7509. }
  7510. template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_binary_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  7511. const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
  7512. const uint32_t b_offset = get_misalign_bytes(ctx, src1) / ggml_type_size(src1->type);
  7513. const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
  7514. GGML_ASSERT(dst->op != GGML_OP_GET_ROWS || (a_offset == 0 && b_offset == 0 && d_offset == 0));
  7515. p.misalign_offsets = (a_offset << 16) | (b_offset << 8) | d_offset;
  7516. GGML_UNUSED(src2);
  7517. GGML_UNUSED(src3);
  7518. }
  7519. template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_upscale_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  7520. const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
  7521. const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
  7522. p.a_offset = a_offset;
  7523. p.d_offset = d_offset;
  7524. GGML_UNUSED(src1);
  7525. GGML_UNUSED(src2);
  7526. GGML_UNUSED(src3);
  7527. }
  7528. template<typename PC>
  7529. static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst, ggml_op op, PC&& pc, bool dryrun = false) {
  7530. VK_LOG_DEBUG("ggml_vk_op_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  7531. if (src1 != nullptr) {
  7532. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  7533. }
  7534. if (src2 != nullptr) {
  7535. std::cerr << "), (" << src2 << ", name=" << src2->name << ", type=" << src2->type << ", ne0=" << src2->ne[0] << ", ne1=" << src2->ne[1] << ", ne2=" << src2->ne[2] << ", ne3=" << src2->ne[3] << ", nb0=" << src2->nb[0] << ", nb1=" << src2->nb[1] << ", nb2=" << src2->nb[2] << ", nb3=" << src2->nb[3];
  7536. }
  7537. if (src3 != nullptr) {
  7538. std::cerr << "), (" << src3 << ", name=" << src3->name << ", type=" << src3->type << ", ne0=" << src3->ne[0] << ", ne1=" << src3->ne[1] << ", ne2=" << src3->ne[2] << ", ne3=" << src3->ne[3] << ", nb0=" << src3->nb[0] << ", nb1=" << src3->nb[1] << ", nb2=" << src3->nb[2] << ", nb3=" << src3->nb[3];
  7539. }
  7540. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  7541. std::cerr << "), " << ggml_op_name(op) << ", " << (dryrun ? "dryrun" : "") << ")");
  7542. GGML_ASSERT(op == GGML_OP_GET_ROWS || op == GGML_OP_CPY || (!ggml_is_quantized(src0->type) && (src1 == nullptr || !ggml_is_quantized(src1->type)))); // NOLINT
  7543. GGML_ASSERT(ggml_vk_op_supports_incontiguous(op) || ggml_vk_dim01_contiguous(src0)); // NOLINT
  7544. GGML_ASSERT(dst->buffer != nullptr);
  7545. const uint64_t ne00 = src0->ne[0];
  7546. const uint64_t ne01 = src0->ne[1];
  7547. const uint64_t ne02 = src0->ne[2];
  7548. const uint64_t ne03 = src0->ne[3];
  7549. const uint64_t ne0 = ne00 * ne01;
  7550. const bool use_src1 = src1 != nullptr;
  7551. const uint64_t ne10 = use_src1 ? src1->ne[0] : 0;
  7552. const uint64_t ne11 = use_src1 ? src1->ne[1] : 0;
  7553. const uint64_t ne12 = use_src1 ? src1->ne[2] : 0;
  7554. const uint64_t ne13 = use_src1 ? src1->ne[3] : 0;
  7555. const uint64_t ne1 = ne10 * ne11;
  7556. // const uint64_t nb10 = use_src1 ? src1->nb[0] : 0;
  7557. const bool use_src2 = src2 != nullptr;
  7558. const uint64_t ne20 = use_src2 ? src2->ne[0] : 0;
  7559. const uint64_t ne21 = use_src2 ? src2->ne[1] : 0;
  7560. const uint64_t ne22 = use_src2 ? src2->ne[2] : 0;
  7561. const uint64_t ne23 = use_src2 ? src2->ne[3] : 0;
  7562. const uint64_t ne2 = ne20 * ne21;
  7563. const bool use_src3 = src3 != nullptr;
  7564. const uint64_t ne30 = use_src3 ? src3->ne[0] : 0;
  7565. const uint64_t ne31 = use_src3 ? src3->ne[1] : 0;
  7566. const uint64_t ne32 = use_src3 ? src3->ne[2] : 0;
  7567. const uint64_t ne33 = use_src3 ? src3->ne[3] : 0;
  7568. const uint64_t ne3 = ne30 * ne31;
  7569. const uint64_t ned0 = dst->ne[0];
  7570. const uint64_t ned1 = dst->ne[1];
  7571. const uint64_t ned2 = dst->ne[2];
  7572. const uint64_t ned3 = dst->ne[3];
  7573. const uint64_t ned = ned0 * ned1;
  7574. init_pushconst_fastdiv(pc);
  7575. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, dst, op);
  7576. if (pipeline == nullptr) {
  7577. std::cerr << "ggml_vulkan: Error: Missing op: " << ggml_op_name(op) << " for " << ggml_type_name(src0->type);
  7578. if (src1 != nullptr) {
  7579. std::cerr << " and " << ggml_type_name(src1->type);
  7580. }
  7581. std::cerr << " to " << ggml_type_name(dst->type) << std::endl;
  7582. GGML_ABORT("fatal error");
  7583. }
  7584. if (dryrun) {
  7585. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  7586. return;
  7587. }
  7588. const bool op_supports_incontiguous = ggml_vk_op_supports_incontiguous(op);
  7589. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  7590. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  7591. ggml_backend_vk_buffer_context * src1_buf_ctx = use_src1 ? (ggml_backend_vk_buffer_context *)src1->buffer->context : nullptr;
  7592. ggml_backend_vk_buffer_context * src2_buf_ctx = use_src2 ? (ggml_backend_vk_buffer_context *)src2->buffer->context : nullptr;
  7593. ggml_backend_vk_buffer_context * src3_buf_ctx = use_src3 ? (ggml_backend_vk_buffer_context *)src3->buffer->context : nullptr;
  7594. vk_buffer d_X = nullptr;
  7595. size_t x_buf_offset = 0;
  7596. vk_buffer d_Y = nullptr;
  7597. size_t y_buf_offset = 0;
  7598. vk_buffer d_Z = nullptr;
  7599. size_t z_buf_offset = 0;
  7600. vk_buffer d_W = nullptr;
  7601. size_t w_buf_offset = 0;
  7602. bool src0_uma = false;
  7603. bool src1_uma = false;
  7604. bool src2_uma = false;
  7605. bool src3_uma = false;
  7606. if (ctx->device->uma) {
  7607. ggml_vk_host_get(ctx->device, src0->data, d_X, x_buf_offset);
  7608. src0_uma = d_X != nullptr;
  7609. if (use_src1) {
  7610. ggml_vk_host_get(ctx->device, src1->data, d_Y, y_buf_offset);
  7611. src1_uma = d_Y != nullptr;
  7612. }
  7613. if (use_src2) {
  7614. ggml_vk_host_get(ctx->device, src2->data, d_Z, z_buf_offset);
  7615. src2_uma = d_Z != nullptr;
  7616. }
  7617. if (use_src3) {
  7618. ggml_vk_host_get(ctx->device, src3->data, d_W, w_buf_offset);
  7619. src3_uma = d_W != nullptr;
  7620. }
  7621. }
  7622. vk_buffer d_D = dst_buf_ctx->dev_buffer;
  7623. GGML_ASSERT(d_D != nullptr);
  7624. uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  7625. if(!src0_uma) {
  7626. d_X = src0_buf_ctx->dev_buffer;
  7627. x_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  7628. GGML_ASSERT(d_X != nullptr);
  7629. }
  7630. if (use_src1 && !src1_uma) {
  7631. d_Y = src1_buf_ctx->dev_buffer;
  7632. y_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  7633. GGML_ASSERT(d_Y != nullptr);
  7634. }
  7635. if (use_src2 && !src2_uma) {
  7636. d_Z = src2_buf_ctx->dev_buffer;
  7637. z_buf_offset = vk_tensor_offset(src2) + src2->view_offs;
  7638. GGML_ASSERT(d_Z != nullptr);
  7639. }
  7640. if (use_src3 && !src3_uma) {
  7641. d_W = src3_buf_ctx->dev_buffer;
  7642. w_buf_offset = vk_tensor_offset(src3) + src3->view_offs;
  7643. GGML_ASSERT(d_W != nullptr);
  7644. }
  7645. // Compute misalignment offset for descriptors and store it in in push constants, then align the descriptor offsets.
  7646. init_pushconst_tensor_offsets(ctx, pc, src0, src1, src2, src3, dst);
  7647. x_buf_offset &= ~(ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1);
  7648. y_buf_offset &= ~(ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1);
  7649. z_buf_offset &= ~(ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1);
  7650. w_buf_offset &= ~(ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1);
  7651. d_buf_offset &= ~(ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1);
  7652. std::array<uint32_t, 3> elements;
  7653. // Single call if dimension 2 is contiguous
  7654. GGML_ASSERT(op_supports_incontiguous || (ggml_is_contiguous(src0) && (src1 == nullptr || ggml_is_contiguous(src1))));
  7655. switch (op) {
  7656. case GGML_OP_NORM:
  7657. case GGML_OP_RMS_NORM_BACK:
  7658. case GGML_OP_L2_NORM:
  7659. case GGML_OP_SOFT_MAX:
  7660. case GGML_OP_SOFT_MAX_BACK:
  7661. case GGML_OP_SUM_ROWS:
  7662. case GGML_OP_MEAN:
  7663. case GGML_OP_ARGMAX:
  7664. {
  7665. const uint32_t nr = ggml_nrows(src0);
  7666. if (nr > 262144) {
  7667. elements = { 512, 512, CEIL_DIV(nr, 262144) };
  7668. } else if (nr > 512) {
  7669. elements = { 512, CEIL_DIV(nr, 512), 1 };
  7670. } else {
  7671. elements = { nr, 1, 1 };
  7672. }
  7673. } break;
  7674. case GGML_OP_RMS_NORM:
  7675. if (ctx->do_add_rms_partials) {
  7676. // Run one element per thread, 128 threads per workgroup
  7677. elements = { (uint32_t)CEIL_DIV(ne00, 128), 1, 1 };
  7678. } else {
  7679. elements = { (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne03 };
  7680. }
  7681. break;
  7682. case GGML_OP_SUM:
  7683. // We use GGML_OP_SUM_ROWS with 1 row.
  7684. elements = { 1, 1, 1 };
  7685. break;
  7686. case GGML_OP_GROUP_NORM:
  7687. {
  7688. const uint32_t num_groups = dst->op_params[0];
  7689. elements = { num_groups * (uint32_t)src0->ne[3], 1, 1 };
  7690. } break;
  7691. case GGML_OP_DIAG_MASK_INF:
  7692. case GGML_OP_ROPE:
  7693. case GGML_OP_ROPE_BACK:
  7694. elements = { (uint32_t)ggml_nrows(src0), (uint32_t)ne00, 1 };
  7695. break;
  7696. case GGML_OP_GET_ROWS:
  7697. elements = { (uint32_t)ne00, (uint32_t)ne10, (uint32_t)(ne11 * ne12) };
  7698. elements[1] = std::min(elements[1], ctx->device->properties.limits.maxComputeWorkGroupCount[1]);
  7699. elements[2] = std::min(elements[2], ctx->device->properties.limits.maxComputeWorkGroupCount[2]);
  7700. break;
  7701. case GGML_OP_ARGSORT:
  7702. elements = { (uint32_t)ne00, (uint32_t)ggml_nrows(src0), 1 };
  7703. elements[1] = std::min(elements[1], ctx->device->properties.limits.maxComputeWorkGroupCount[1]);
  7704. break;
  7705. case GGML_OP_IM2COL:
  7706. {
  7707. const bool is_2D = dst->op_params[6] == 1;
  7708. const uint32_t IC = src1->ne[is_2D ? 2 : 1];
  7709. const uint32_t KH = is_2D ? src0->ne[1] : 1;
  7710. const uint32_t KW = src0->ne[0];
  7711. const uint32_t OH = is_2D ? dst->ne[2] : 1;
  7712. const uint32_t OW = dst->ne[1];
  7713. const uint32_t batch = src1->ne[is_2D ? 3 : 2];
  7714. elements = { OW * KW * KH, OH, batch * IC };
  7715. } break;
  7716. case GGML_OP_IM2COL_3D:
  7717. {
  7718. const uint32_t IC = ((const uint32_t *)(dst->op_params))[9];
  7719. const uint32_t N = ne13 / IC;
  7720. const uint32_t KD = ne02;
  7721. const uint32_t KH = ne01;
  7722. const uint32_t KW = ne00;
  7723. const uint32_t OD = ned3 / N;
  7724. const uint32_t OH = ned2;
  7725. const uint32_t OW = ned1;
  7726. const uint32_t IC_KD_KH_KW = IC*KD*KH*KW;
  7727. const uint32_t N_OD_OH = N*OD*OH;
  7728. elements = { IC_KD_KH_KW, OW, N_OD_OH };
  7729. elements[2] = std::min(elements[2], ctx->device->properties.limits.maxComputeWorkGroupCount[2]);
  7730. } break;
  7731. case GGML_OP_TIMESTEP_EMBEDDING:
  7732. {
  7733. const uint32_t dim = dst->op_params[0];
  7734. uint32_t half_ceil = (dim + 1) / 2;
  7735. elements = { half_ceil, (uint32_t)src0->ne[0], 1 };
  7736. } break;
  7737. case GGML_OP_CONV_TRANSPOSE_1D:
  7738. {
  7739. elements = {uint32_t(src0->ne[1]), 1, 1}; // parallelize in {Cout, 1, 1}
  7740. } break;
  7741. case GGML_OP_POOL_2D:
  7742. {
  7743. const uint32_t N = dst->ne[3];
  7744. const uint32_t OC = dst->ne[2];
  7745. const uint32_t OH = dst->ne[1];
  7746. const uint32_t OW = dst->ne[0];
  7747. elements = { N * OC * OH * OW, 1, 1};
  7748. } break;
  7749. case GGML_OP_CONV_2D:
  7750. {
  7751. elements = ggml_vk_get_conv_elements(dst);
  7752. } break;
  7753. case GGML_OP_CONV_TRANSPOSE_2D:
  7754. {
  7755. elements = ggml_vk_get_conv_transpose_2d_elements(dst);
  7756. } break;
  7757. case GGML_OP_ADD:
  7758. case GGML_OP_SUB:
  7759. case GGML_OP_DIV:
  7760. case GGML_OP_MUL:
  7761. case GGML_OP_SCALE:
  7762. case GGML_OP_SQR:
  7763. case GGML_OP_SQRT:
  7764. case GGML_OP_SIN:
  7765. case GGML_OP_COS:
  7766. case GGML_OP_CLAMP:
  7767. case GGML_OP_PAD:
  7768. case GGML_OP_ROLL:
  7769. case GGML_OP_REPEAT:
  7770. case GGML_OP_REPEAT_BACK:
  7771. case GGML_OP_CPY:
  7772. case GGML_OP_CONCAT:
  7773. case GGML_OP_UPSCALE:
  7774. case GGML_OP_UNARY:
  7775. case GGML_OP_GLU:
  7776. case GGML_OP_CONV_2D_DW:
  7777. {
  7778. uint32_t ne = ggml_nelements(dst);
  7779. if (op == GGML_OP_CPY && ggml_is_quantized(src0->type) && ggml_is_quantized(dst->type)) {
  7780. // Convert from number of logical elements to 2- or 4-byte units.
  7781. ne /= ggml_blck_size(src0->type);
  7782. if ((ggml_type_size(src0->type) % 4) == 0) {
  7783. ne *= ggml_type_size(src0->type) / 4;
  7784. } else {
  7785. ne *= ggml_type_size(src0->type) / 2;
  7786. }
  7787. }
  7788. // copy_to_quant has block size of 32, and each thread does QUANT_K elements.
  7789. // Splitting into 512x512xZ wouldn't work well since each workgroup does 1024 elements.
  7790. // So divide by block size here before splitting into 512x512 groups.
  7791. if (op == GGML_OP_CPY && !ggml_is_quantized(src0->type) && ggml_is_quantized(dst->type)) {
  7792. ne = CEIL_DIV(ne, ggml_blck_size(dst->type));
  7793. }
  7794. if (ne > 262144) {
  7795. elements = { 512, 512, CEIL_DIV(ne, 262144) };
  7796. } else if (ne > 512) {
  7797. elements = { 512, CEIL_DIV(ne, 512), 1 };
  7798. } else {
  7799. elements = { ne, 1, 1 };
  7800. }
  7801. } break;
  7802. case GGML_OP_ADD_ID:
  7803. {
  7804. elements = { (uint32_t)ne01, (uint32_t)ne02, 1 };
  7805. } break;
  7806. case GGML_OP_SET_ROWS:
  7807. {
  7808. uint32_t ne = ggml_nelements(src0);
  7809. if (ggml_is_quantized(dst->type)) {
  7810. // quants run 32 threads each doing QUANT_K elements
  7811. ne = CEIL_DIV(ne, 32 * ggml_blck_size(dst->type));
  7812. } else {
  7813. // scalar types do one element per thread, running 512 threads
  7814. ne = CEIL_DIV(ne, 512);
  7815. }
  7816. if (ne > 262144) {
  7817. elements = { 512, 512, CEIL_DIV(ne, 262144) };
  7818. } else if (ne > 512) {
  7819. elements = { 512, CEIL_DIV(ne, 512), 1 };
  7820. } else {
  7821. elements = { ne, 1, 1 };
  7822. }
  7823. }
  7824. break;
  7825. case GGML_OP_SSM_CONV:
  7826. {
  7827. const uint32_t nr = src0->ne[1];
  7828. const uint32_t n_t = dst->ne[1];
  7829. const uint32_t n_s = dst->ne[2];
  7830. elements = { nr, n_t, n_s };
  7831. }
  7832. break;
  7833. default:
  7834. elements = { (uint32_t)ggml_nelements(src0), 1, 1 };
  7835. break;
  7836. }
  7837. uint64_t x_sz, y_sz, z_sz, w_sz, d_sz;
  7838. if (op_supports_incontiguous) {
  7839. x_sz = ggml_nbytes(src0) + get_misalign_bytes(ctx, src0);
  7840. y_sz = use_src1 ? ggml_nbytes(src1) + get_misalign_bytes(ctx, src1) : 0;
  7841. z_sz = use_src2 ? ggml_nbytes(src2) + get_misalign_bytes(ctx, src2) : 0;
  7842. w_sz = use_src3 ? ggml_nbytes(src3) + get_misalign_bytes(ctx, src3) : 0;
  7843. d_sz = ggml_nbytes(dst) + get_misalign_bytes(ctx, dst);
  7844. if (x_buf_offset + x_sz >= d_X->size) {
  7845. x_sz = ggml_vk_get_max_buffer_range(ctx, d_X, x_buf_offset);
  7846. }
  7847. if (use_src1 && y_buf_offset + y_sz >= d_Y->size) {
  7848. y_sz = ggml_vk_get_max_buffer_range(ctx, d_Y, y_buf_offset);
  7849. }
  7850. if (use_src2 && z_buf_offset + z_sz >= d_Z->size) {
  7851. z_sz = ggml_vk_get_max_buffer_range(ctx, d_Z, z_buf_offset);
  7852. }
  7853. if (use_src3 && w_buf_offset + w_sz >= d_W->size) {
  7854. w_sz = ggml_vk_get_max_buffer_range(ctx, d_W, w_buf_offset);
  7855. }
  7856. if (d_buf_offset + d_sz >= d_D->size) {
  7857. d_sz = ggml_vk_get_max_buffer_range(ctx, d_D, d_buf_offset);
  7858. }
  7859. } else {
  7860. x_sz = ggml_type_size(src0->type)/ggml_blck_size(src0->type) * ne0 * ne02 * ne03;
  7861. y_sz = use_src1 ? ggml_type_size(src1->type) * ne1 * ne12 * ne13 : 0;
  7862. z_sz = use_src2 ? ggml_type_size(src2->type) * ne2 * ne22 * ne23 : 0;
  7863. w_sz = use_src3 ? ggml_type_size(src3->type) * ne3 * ne32 * ne33 : 0;
  7864. d_sz = ggml_type_size(dst->type) * ned * ned2 * ned3;
  7865. }
  7866. if (op == GGML_OP_ADD || op == GGML_OP_RMS_NORM) {
  7867. vk_buffer d_A = ctx->do_add_rms_partials ? ctx->prealloc_add_rms_partials : d_X;
  7868. size_t a_buf_offset = ctx->do_add_rms_partials ? ctx->prealloc_size_add_rms_partials_offset : 0;
  7869. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
  7870. { vk_subbuffer{ d_X, x_buf_offset, x_sz },
  7871. vk_subbuffer{ d_Y, y_buf_offset, y_sz },
  7872. vk_subbuffer{ d_D, d_buf_offset, d_sz },
  7873. ggml_vk_subbuffer(ctx, d_A, a_buf_offset),
  7874. }, pc, elements);
  7875. } else if (op == GGML_OP_GLU) {
  7876. // Empty src1 is possible in glu, but the shader needs a buffer
  7877. vk_subbuffer subbuf_y;
  7878. if (use_src1) {
  7879. subbuf_y = { d_Y, y_buf_offset, y_sz };
  7880. } else {
  7881. subbuf_y = { d_X, 0, x_sz };
  7882. }
  7883. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, subbuf_y, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, pc, elements);
  7884. } else if (op == GGML_OP_SOFT_MAX) {
  7885. // Empty src1 and src2 is possible in soft_max, but the shader needs a buffer
  7886. vk_subbuffer subbuf_y;
  7887. if (use_src1) {
  7888. subbuf_y = { d_Y, y_buf_offset, y_sz };
  7889. } else {
  7890. subbuf_y = { d_X, 0, x_sz };
  7891. }
  7892. vk_subbuffer subbuf_z;
  7893. if (use_src2) {
  7894. subbuf_z = { d_Z, z_buf_offset, z_sz };
  7895. } else {
  7896. subbuf_z = { d_X, 0, x_sz };
  7897. }
  7898. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, subbuf_y, subbuf_z, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, pc, elements);
  7899. } else if (op == GGML_OP_ROPE || op == GGML_OP_ROPE_BACK) {
  7900. // Empty src2 is possible in rope, but the shader needs a buffer
  7901. vk_subbuffer subbuf_z, subbuf_w;
  7902. if (use_src2) {
  7903. subbuf_z = { d_Z, z_buf_offset, z_sz };
  7904. } else {
  7905. subbuf_z = { d_X, 0, x_sz };
  7906. }
  7907. if (use_src3) {
  7908. subbuf_w = { d_W, w_buf_offset, w_sz };
  7909. } else {
  7910. subbuf_w = { d_X, 0, x_sz };
  7911. }
  7912. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, subbuf_z, vk_subbuffer{ d_D, d_buf_offset, d_sz }, subbuf_w }, pc, elements);
  7913. } else if (op == GGML_OP_IM2COL || op == GGML_OP_IM2COL_3D) {
  7914. if (ctx->device->shader_int64 && ctx->device->buffer_device_address) {
  7915. // buffer device address path doesn't use dst buffer
  7916. d_sz = 1;
  7917. }
  7918. // im2col uses only src1 and dst buffers
  7919. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, pc, elements);
  7920. } else if (op == GGML_OP_COUNT_EQUAL) {
  7921. // count_equal assumes that destination buffer is initialized with zeroes
  7922. ggml_vk_buffer_memset_async(subctx, d_D, d_buf_offset, 0, d_sz);
  7923. ggml_vk_sync_buffers(ctx, subctx);
  7924. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, pc, elements);
  7925. } else if (op == GGML_OP_OPT_STEP_SGD) {
  7926. // OPT_STEP_SGD works on src0, it does not need dst
  7927. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_Z, z_buf_offset, z_sz } }, pc, elements);
  7928. } else if (use_src3) {
  7929. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_Z, z_buf_offset, z_sz }, vk_subbuffer{ d_W, w_buf_offset, w_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, pc, elements);
  7930. } else if (use_src2) {
  7931. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_Z, z_buf_offset, z_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, pc, elements);
  7932. } else if (use_src1) {
  7933. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, pc, elements);
  7934. } else {
  7935. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, pc, elements);
  7936. }
  7937. }
  7938. static void ggml_vk_get_rows(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  7939. const uint32_t src0_type_size = ggml_type_size(src0->type);
  7940. const uint32_t src1_type_size = ggml_type_size(src1->type);
  7941. const uint32_t dst_type_size = ggml_type_size(dst->type);
  7942. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_GET_ROWS, {
  7943. (uint32_t)ggml_nelements(src0),
  7944. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  7945. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  7946. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  7947. 0,
  7948. 0.0f, 0.0f, 0,
  7949. }, dryrun);
  7950. }
  7951. static void ggml_vk_acc(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  7952. const uint32_t src0_type_size = ggml_type_size(src0->type);
  7953. const uint32_t src1_type_size = ggml_type_size(src1->type);
  7954. const uint32_t dst_type_size = ggml_type_size(dst->type);
  7955. int nb1 = dst->op_params[0] / 4; // 4 bytes of float32
  7956. int nb2 = dst->op_params[1] / 4; // 4 bytes of float32
  7957. // int nb3 = dst->op_params[2] / 4; // 4 bytes of float32 - unused
  7958. int offset = dst->op_params[3] / 4; // offset in bytes
  7959. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_ACC, {
  7960. (uint32_t)ggml_nelements(src0),
  7961. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)nb1, (uint32_t)nb2, (uint32_t)src0->nb[3] / src0_type_size,
  7962. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  7963. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t)nb1, (uint32_t)nb2, (uint32_t) dst->nb[3] / dst_type_size,
  7964. 0,
  7965. 0.0f, 0.0f, offset,
  7966. }, dryrun);
  7967. }
  7968. static void ggml_vk_multi_add(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_cgraph * cgraph, int node_idx, bool dryrun = false) {
  7969. const ggml_tensor *first_node = cgraph->nodes[node_idx];
  7970. const ggml_tensor *dst = cgraph->nodes[node_idx + ctx->num_additional_fused_ops];
  7971. // Make a list of all the tensors used by the op.
  7972. // Last element of the list is the dest tensor.
  7973. const ggml_tensor *tensors[MAX_PARAMETER_COUNT];
  7974. uint32_t num_srcs = ctx->num_additional_fused_ops + 2;
  7975. uint32_t num_tensors = num_srcs + 1;
  7976. GGML_ASSERT(num_tensors + ctx->do_add_rms_partials <= MAX_PARAMETER_COUNT);
  7977. tensors[0] = first_node->src[0];
  7978. tensors[1] = first_node->src[1];
  7979. for (int32_t i = 0; i < ctx->num_additional_fused_ops; ++i) {
  7980. // check whether the previous result is src[0] or src[1]
  7981. if (cgraph->nodes[node_idx + i] == cgraph->nodes[node_idx + i + 1]->src[0]) {
  7982. tensors[i+2] = cgraph->nodes[node_idx + i + 1]->src[1];
  7983. } else {
  7984. tensors[i+2] = cgraph->nodes[node_idx + i + 1]->src[0];
  7985. }
  7986. }
  7987. tensors[num_srcs] = dst;
  7988. vk_op_multi_add_push_constants pc;
  7989. pc.ne20 = (uint32_t)dst->ne[0];
  7990. pc.ne21 = (uint32_t)dst->ne[1];
  7991. pc.ne22 = (uint32_t)dst->ne[2];
  7992. pc.ne23 = (uint32_t)dst->ne[3];
  7993. for (uint32_t i = 0; i < num_tensors; ++i) {
  7994. const ggml_tensor *t = tensors[i];
  7995. pc.nb[i][0] = (uint32_t)t->nb[0] / sizeof(float);
  7996. pc.nb[i][1] = (uint32_t)t->nb[1] / sizeof(float);
  7997. pc.nb[i][2] = (uint32_t)t->nb[2] / sizeof(float);
  7998. pc.nb[i][3] = (uint32_t)t->nb[3] / sizeof(float);
  7999. }
  8000. pc.rms_partials = ctx->do_add_rms_partials;
  8001. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, tensors[0], tensors[1], nullptr, dst, dst->op);
  8002. if (pipeline == nullptr) {
  8003. std::cerr << "ggml_vulkan: Error: Missing multi_add";
  8004. GGML_ABORT("fatal error");
  8005. }
  8006. if (dryrun) {
  8007. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8008. return;
  8009. }
  8010. ggml_backend_vk_buffer_context * buf_ctx[MAX_PARAMETER_COUNT];
  8011. vk_buffer buf[MAX_PARAMETER_COUNT];
  8012. size_t offset[MAX_PARAMETER_COUNT];
  8013. bool uma[MAX_PARAMETER_COUNT];
  8014. for (uint32_t i = 0; i < num_tensors; ++i) {
  8015. buf_ctx[i] = (ggml_backend_vk_buffer_context *)tensors[i]->buffer->context;
  8016. buf[i] = nullptr;
  8017. offset[i] = 0;
  8018. uma[i] = false;
  8019. if (ctx->device->uma) {
  8020. ggml_vk_host_get(ctx->device, tensors[i]->data, buf[i], offset[i]);
  8021. uma[i] = buf[i] != nullptr;
  8022. }
  8023. if (!uma[i]) {
  8024. buf[i] = buf_ctx[i]->dev_buffer;
  8025. offset[i] = vk_tensor_offset(tensors[i]) + tensors[i]->view_offs;
  8026. }
  8027. GGML_ASSERT(buf[i] != nullptr);
  8028. }
  8029. // If any remaining descriptors are unused, just point them at src[0]
  8030. for (uint32_t i = num_tensors; i < MAX_PARAMETER_COUNT; ++i) {
  8031. buf[i] = buf[0];
  8032. offset[i] = 0;
  8033. }
  8034. if (ctx->do_add_rms_partials) {
  8035. buf[num_tensors] = ctx->prealloc_add_rms_partials;
  8036. offset[num_tensors] = ctx->prealloc_size_add_rms_partials_offset;
  8037. }
  8038. std::array<uint32_t, 3> elements;
  8039. uint32_t ne = ggml_nelements(dst);
  8040. if (ne > 262144) {
  8041. elements = { 512, 512, CEIL_DIV(ne, 262144) };
  8042. } else if (ne > 512) {
  8043. elements = { 512, CEIL_DIV(ne, 512), 1 };
  8044. } else {
  8045. elements = { ne, 1, 1 };
  8046. }
  8047. static_assert(MAX_PARAMETER_COUNT == 12);
  8048. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
  8049. {
  8050. ggml_vk_subbuffer(ctx, buf[0], offset[0]),
  8051. ggml_vk_subbuffer(ctx, buf[1], offset[1]),
  8052. ggml_vk_subbuffer(ctx, buf[2], offset[2]),
  8053. ggml_vk_subbuffer(ctx, buf[3], offset[3]),
  8054. ggml_vk_subbuffer(ctx, buf[4], offset[4]),
  8055. ggml_vk_subbuffer(ctx, buf[5], offset[5]),
  8056. ggml_vk_subbuffer(ctx, buf[6], offset[6]),
  8057. ggml_vk_subbuffer(ctx, buf[7], offset[7]),
  8058. ggml_vk_subbuffer(ctx, buf[8], offset[8]),
  8059. ggml_vk_subbuffer(ctx, buf[9], offset[9]),
  8060. ggml_vk_subbuffer(ctx, buf[10], offset[10]),
  8061. ggml_vk_subbuffer(ctx, buf[11], offset[11]),
  8062. }, pc, elements);
  8063. }
  8064. static void ggml_vk_add(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8065. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8066. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8067. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8068. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_ADD, {
  8069. (uint32_t)ggml_nelements(src0),
  8070. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8071. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8072. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8073. 0,
  8074. 0.0f, 0.0f, ctx->do_add_rms_partials,
  8075. }, dryrun);
  8076. }
  8077. static void ggml_vk_sub(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8078. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8079. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8080. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8081. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SUB, {
  8082. (uint32_t)ggml_nelements(src0),
  8083. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8084. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8085. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8086. 0,
  8087. 0.0f, 0.0f, 0,
  8088. }, dryrun);
  8089. }
  8090. static void ggml_vk_mul(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8091. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8092. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8093. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8094. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_MUL, {
  8095. (uint32_t)ggml_nelements(src0),
  8096. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8097. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8098. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8099. 0,
  8100. 0.0f, 0.0f, 0,
  8101. }, dryrun);
  8102. }
  8103. static void ggml_vk_div(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8104. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8105. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8106. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8107. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_DIV, {
  8108. (uint32_t)ggml_nelements(src0),
  8109. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8110. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8111. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8112. 0,
  8113. 0.0f, 0.0f, 0,
  8114. }, dryrun);
  8115. }
  8116. static void ggml_vk_add_id(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, bool dryrun = false) {
  8117. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8118. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8119. const uint32_t src2_type_size = ggml_type_size(src2->type);
  8120. ggml_vk_op_f32<vk_op_add_id_push_constants>(ctx, subctx, src0, src1, src2, nullptr, dst, GGML_OP_ADD_ID, {
  8121. (uint32_t)dst->ne[0],
  8122. (uint32_t)dst->ne[1],
  8123. (uint32_t)src0->nb[1] / src0_type_size,
  8124. (uint32_t)src0->nb[2] / src0_type_size,
  8125. (uint32_t)src1->nb[1] / src1_type_size,
  8126. (uint32_t)src2->nb[1] / src2_type_size,
  8127. }, dryrun);
  8128. }
  8129. static void ggml_vk_op_f32_wkv(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, const vk_op_rwkv_wkv6_push_constants&& pc, int version, bool dryrun = false) {
  8130. GGML_ASSERT(version == 6 || version == 7);
  8131. int num_srcs = version == 6 ? 6 : 7;
  8132. for (int i = 0; i < num_srcs; i++) {
  8133. GGML_ASSERT(!ggml_is_quantized(dst->src[i]->type));
  8134. }
  8135. GGML_ASSERT(dst->buffer != nullptr);
  8136. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, dst->src[0], dst->src[1], dst->src[2], dst, dst->op);
  8137. GGML_ASSERT(pipeline != nullptr);
  8138. if (dryrun) {
  8139. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8140. return;
  8141. }
  8142. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  8143. ggml_backend_vk_buffer_context * src_buf_ctxs[7] = { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr };
  8144. for (int i = 0; i < num_srcs; i++) {
  8145. src_buf_ctxs[i] = (ggml_backend_vk_buffer_context *)dst->src[i]->buffer->context;
  8146. }
  8147. vk_buffer d_D = nullptr, d_srcs[7] = { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr };
  8148. size_t dst_offset = 0, src_offsets[7] = { 0, 0, 0, 0, 0, 0, 0 };
  8149. bool dst_uma = false, srcs_uma[7] = { false, false, false, false, false, false, false };
  8150. if (ctx->device->uma) {
  8151. for (int i = 0; i < num_srcs; i++) {
  8152. ggml_vk_host_get(ctx->device, dst->src[i]->data, d_srcs[i], src_offsets[i]);
  8153. srcs_uma[i] = d_srcs[i] != nullptr;
  8154. }
  8155. ggml_vk_host_get(ctx->device, dst->data, d_D, dst_offset);
  8156. dst_uma = d_D != nullptr;
  8157. }
  8158. uint64_t src_sizes[7] = { 0, 0, 0, 0, 0, 0, 0 };
  8159. for (int i = 0; i < num_srcs; i++) {
  8160. src_sizes[i] = ggml_nbytes(dst->src[i]);
  8161. if (!srcs_uma[i]) {
  8162. d_srcs[i] = src_buf_ctxs[i]->dev_buffer;
  8163. src_offsets[i] = vk_tensor_offset(dst->src[i]) + dst->src[i]->view_offs;
  8164. }
  8165. }
  8166. const uint64_t dst_size = ggml_nbytes(dst);
  8167. if (!dst_uma) {
  8168. d_D = dst_buf_ctx->dev_buffer;
  8169. dst_offset = vk_tensor_offset(dst) + dst->view_offs;
  8170. }
  8171. std::array<uint32_t, 3> elements = {
  8172. (uint32_t)(pc.B * pc.H),
  8173. 1,
  8174. 1
  8175. };
  8176. if (version == 6) {
  8177. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, {
  8178. vk_subbuffer{ d_srcs[0], src_offsets[0], src_sizes[0] },
  8179. vk_subbuffer{ d_srcs[1], src_offsets[1], src_sizes[1] },
  8180. vk_subbuffer{ d_srcs[2], src_offsets[2], src_sizes[2] },
  8181. vk_subbuffer{ d_srcs[3], src_offsets[3], src_sizes[3] },
  8182. vk_subbuffer{ d_srcs[4], src_offsets[4], src_sizes[4] },
  8183. vk_subbuffer{ d_srcs[5], src_offsets[5], src_sizes[5] },
  8184. vk_subbuffer{ d_D, dst_offset, dst_size }
  8185. }, pc, elements);
  8186. } else if (version == 7) {
  8187. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, {
  8188. vk_subbuffer{ d_srcs[0], src_offsets[0], src_sizes[0] },
  8189. vk_subbuffer{ d_srcs[1], src_offsets[1], src_sizes[1] },
  8190. vk_subbuffer{ d_srcs[2], src_offsets[2], src_sizes[2] },
  8191. vk_subbuffer{ d_srcs[3], src_offsets[3], src_sizes[3] },
  8192. vk_subbuffer{ d_srcs[4], src_offsets[4], src_sizes[4] },
  8193. vk_subbuffer{ d_srcs[5], src_offsets[5], src_sizes[5] },
  8194. vk_subbuffer{ d_srcs[6], src_offsets[6], src_sizes[6] },
  8195. vk_subbuffer{ d_D, dst_offset, dst_size }
  8196. }, pc, elements);
  8197. } else {
  8198. // shouldn't happen
  8199. GGML_ASSERT(false);
  8200. }
  8201. }
  8202. static void ggml_vk_rwkv_wkv6(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, bool dryrun = false) {
  8203. const size_t seq_length = dst->src[0]->ne[2];
  8204. const size_t n_embed = dst->ne[0];
  8205. const size_t n_heads = dst->src[0]->ne[1];
  8206. const size_t n_seqs = dst->src[5]->ne[1];
  8207. ggml_vk_op_f32_wkv(
  8208. ctx, subctx, dst,
  8209. {
  8210. (uint32_t)n_seqs,
  8211. (uint32_t)seq_length,
  8212. (uint32_t)n_embed,
  8213. (uint32_t)n_heads,
  8214. },
  8215. 6,
  8216. dryrun
  8217. );
  8218. }
  8219. static void ggml_vk_rwkv_wkv7(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, bool dryrun = false) {
  8220. const size_t seq_length = dst->src[0]->ne[2];
  8221. const size_t n_embed = dst->ne[0];
  8222. const size_t n_heads = dst->src[0]->ne[1];
  8223. const size_t n_seqs = dst->src[6]->ne[1];
  8224. ggml_vk_op_f32_wkv(
  8225. ctx, subctx, dst,
  8226. {
  8227. (uint32_t)n_seqs,
  8228. (uint32_t)seq_length,
  8229. (uint32_t)n_embed,
  8230. (uint32_t)n_heads,
  8231. },
  8232. 7,
  8233. dryrun
  8234. );
  8235. }
  8236. static void ggml_vk_ssm_scan(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, bool dryrun = false) {
  8237. const ggml_tensor * src0 = dst->src[0];
  8238. const ggml_tensor * src1 = dst->src[1];
  8239. const ggml_tensor * src2 = dst->src[2];
  8240. const ggml_tensor * src3 = dst->src[3];
  8241. const ggml_tensor * src4 = dst->src[4];
  8242. const ggml_tensor * src5 = dst->src[5];
  8243. GGML_ASSERT(dst->buffer != nullptr);
  8244. const uint32_t head_dim = src0->ne[1];
  8245. const uint32_t n_head = src1->ne[1];
  8246. const uint32_t n_group = src4->ne[1];
  8247. const uint32_t n_tok = src1->ne[2];
  8248. const uint32_t n_seq = src1->ne[3];
  8249. bool is_mamba2 = (src3->nb[1] == sizeof(float));
  8250. GGML_ASSERT(is_mamba2);
  8251. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, dst, dst->op);
  8252. GGML_ASSERT(pipeline != nullptr);
  8253. if (dryrun) {
  8254. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8255. return;
  8256. }
  8257. const int64_t s_off = ggml_nelements(src1) * sizeof(float);
  8258. const vk_op_ssm_scan_push_constants pc = {
  8259. (uint32_t)src0->nb[2], (uint32_t)src0->nb[3],
  8260. (uint32_t)src1->nb[2], (uint32_t)src1->nb[3],
  8261. (uint32_t)src2->nb[1], (uint32_t)src2->nb[2],
  8262. (uint32_t)src3->nb[1],
  8263. (uint32_t)src4->nb[2], (uint32_t)src4->nb[3],
  8264. (uint32_t)src5->nb[2], (uint32_t)src5->nb[3],
  8265. (uint32_t)s_off,
  8266. n_head, head_dim, n_group, n_tok
  8267. };
  8268. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  8269. ggml_backend_vk_buffer_context * src_buf_ctxs[GGML_MAX_SRC];
  8270. for (int i = 0; i < GGML_MAX_SRC && dst->src[i] != nullptr; i++) {
  8271. src_buf_ctxs[i] = (ggml_backend_vk_buffer_context *)dst->src[i]->buffer->context;
  8272. }
  8273. vk_buffer d_D = nullptr, d_srcs[GGML_MAX_SRC] = { nullptr };
  8274. size_t dst_offset = 0, src_offsets[GGML_MAX_SRC] = { 0 };
  8275. bool dst_uma = false, srcs_uma[GGML_MAX_SRC] = { false };
  8276. if (ctx->device->uma) {
  8277. for (int i = 0; i < GGML_MAX_SRC && dst->src[i] != nullptr; i++) {
  8278. ggml_vk_host_get(ctx->device, dst->src[i]->data, d_srcs[i], src_offsets[i]);
  8279. srcs_uma[i] = d_srcs[i] != nullptr;
  8280. }
  8281. ggml_vk_host_get(ctx->device, dst->data, d_D, dst_offset);
  8282. dst_uma = d_D != nullptr;
  8283. }
  8284. if (!dst_uma) {
  8285. d_D = dst_buf_ctx->dev_buffer;
  8286. dst_offset = vk_tensor_offset(dst) + dst->view_offs;
  8287. }
  8288. for (int i = 0; i < GGML_MAX_SRC && dst->src[i] != nullptr; i++) {
  8289. if (!srcs_uma[i]) {
  8290. d_srcs[i] = src_buf_ctxs[i]->dev_buffer;
  8291. src_offsets[i] = vk_tensor_offset(dst->src[i]) + dst->src[i]->view_offs;
  8292. }
  8293. }
  8294. size_t dst_size = ggml_nbytes(dst);
  8295. size_t src_sizes[GGML_MAX_SRC];
  8296. for (int i = 0; i < GGML_MAX_SRC && dst->src[i] != nullptr; i++) {
  8297. src_sizes[i] = ggml_nbytes(dst->src[i]);
  8298. }
  8299. std::array<uint32_t, 3> elements;
  8300. const int splitH = 16;
  8301. const uint32_t num_workgroups_x = CEIL_DIV(n_head * head_dim, splitH);
  8302. const uint32_t num_workgroups_y = n_seq;
  8303. elements = { num_workgroups_x, num_workgroups_y, 1 };
  8304. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, {
  8305. vk_subbuffer{ d_srcs[0], src_offsets[0], src_sizes[0] },
  8306. vk_subbuffer{ d_srcs[1], src_offsets[1], src_sizes[1] },
  8307. vk_subbuffer{ d_srcs[2], src_offsets[2], src_sizes[2] },
  8308. vk_subbuffer{ d_srcs[3], src_offsets[3], src_sizes[3] },
  8309. vk_subbuffer{ d_srcs[4], src_offsets[4], src_sizes[4] },
  8310. vk_subbuffer{ d_srcs[5], src_offsets[5], src_sizes[5] },
  8311. vk_subbuffer{ d_srcs[6], src_offsets[6], src_sizes[6] },
  8312. vk_subbuffer{ d_D, dst_offset, dst_size }
  8313. }, pc, elements);
  8314. }
  8315. static void ggml_vk_ssm_conv(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, bool dryrun = false) {
  8316. const ggml_tensor * src0 = dst->src[0];
  8317. const ggml_tensor * src1 = dst->src[1];
  8318. ggml_vk_op_f32<vk_op_ssm_conv_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SSM_CONV, {
  8319. (uint32_t)src0->nb[1], (uint32_t)src0->nb[2],
  8320. (uint32_t)src1->nb[1],
  8321. (uint32_t)dst->nb[0], (uint32_t)dst->nb[1], (uint32_t)dst->nb[2],
  8322. (uint32_t)src1->ne[0],
  8323. (uint32_t)src0->ne[0],
  8324. (uint32_t)src0->ne[1],
  8325. (uint32_t)dst->ne[1],
  8326. (uint32_t)dst->ne[2],
  8327. }, dryrun);
  8328. }
  8329. static void ggml_vk_op_f32_opt_step_adamw(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, const vk_op_push_constants&& pc, bool dryrun = false) {
  8330. const ggml_tensor * x = dst->src[0];
  8331. const ggml_tensor * g = dst->src[1];
  8332. const ggml_tensor * gm = dst->src[2];
  8333. const ggml_tensor * gv = dst->src[3];
  8334. const ggml_tensor * p = dst->src[4];
  8335. GGML_ASSERT(x->type == GGML_TYPE_F32);
  8336. GGML_ASSERT(g->type == GGML_TYPE_F32);
  8337. GGML_ASSERT(gm->type == GGML_TYPE_F32);
  8338. GGML_ASSERT(gv->type == GGML_TYPE_F32);
  8339. GGML_ASSERT(p->type == GGML_TYPE_F32);
  8340. GGML_ASSERT(dst->buffer != nullptr);
  8341. GGML_ASSERT(ggml_is_contiguous(x));
  8342. GGML_ASSERT(ggml_is_contiguous(g));
  8343. GGML_ASSERT(ggml_is_contiguous(gm));
  8344. GGML_ASSERT(ggml_is_contiguous(gv));
  8345. GGML_ASSERT(ggml_is_contiguous(p));
  8346. GGML_ASSERT(ggml_are_same_shape(x, g));
  8347. GGML_ASSERT(ggml_are_same_shape(x, gm));
  8348. GGML_ASSERT(ggml_are_same_shape(x, gv));
  8349. GGML_ASSERT(ggml_nelements(p) == 7);
  8350. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, g, gm, gv, dst, GGML_OP_OPT_STEP_ADAMW);
  8351. GGML_ASSERT(pipeline != nullptr);
  8352. if (dryrun) {
  8353. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8354. return;
  8355. }
  8356. ggml_backend_vk_buffer_context * x_buf_ctx = (ggml_backend_vk_buffer_context *)x->buffer->context;
  8357. ggml_backend_vk_buffer_context * g_buf_ctx = (ggml_backend_vk_buffer_context *)g->buffer->context;
  8358. ggml_backend_vk_buffer_context * gm_buf_ctx = (ggml_backend_vk_buffer_context *)gm->buffer->context;
  8359. ggml_backend_vk_buffer_context * gv_buf_ctx = (ggml_backend_vk_buffer_context *)gv->buffer->context;
  8360. ggml_backend_vk_buffer_context * p_buf_ctx = (ggml_backend_vk_buffer_context *)p->buffer->context;
  8361. vk_buffer d_X = nullptr, d_G = nullptr, d_GM = nullptr, d_GV = nullptr, d_P = nullptr;
  8362. size_t x_offset = 0, g_offset = 0, gm_offset = 0, gv_offset = 0, p_offset = 0;
  8363. bool X_uma = false, G_uma = false, GM_uma = false, GV_uma = false, P_uma = false;
  8364. if (ctx->device->uma) {
  8365. ggml_vk_host_get(ctx->device, x->data, d_X, x_offset);
  8366. ggml_vk_host_get(ctx->device, g->data, d_G, g_offset);
  8367. ggml_vk_host_get(ctx->device, gm->data, d_GM, gm_offset);
  8368. ggml_vk_host_get(ctx->device, gv->data, d_GV, gv_offset);
  8369. ggml_vk_host_get(ctx->device, p->data, d_P, p_offset);
  8370. X_uma = d_X != nullptr;
  8371. G_uma = d_G != nullptr;
  8372. GM_uma = d_GM != nullptr;
  8373. GV_uma = d_GV != nullptr;
  8374. P_uma = d_P != nullptr;
  8375. }
  8376. if (!X_uma) {
  8377. d_X = x_buf_ctx->dev_buffer;
  8378. x_offset = vk_tensor_offset(x) + x->view_offs;
  8379. }
  8380. if (!G_uma) {
  8381. d_G = g_buf_ctx->dev_buffer;
  8382. g_offset = vk_tensor_offset(g) + g->view_offs;
  8383. }
  8384. if (!GM_uma) {
  8385. d_GM = gm_buf_ctx->dev_buffer;
  8386. gm_offset = vk_tensor_offset(gm) + gm->view_offs;
  8387. }
  8388. if (!GV_uma) {
  8389. d_GV = gv_buf_ctx->dev_buffer;
  8390. gv_offset = vk_tensor_offset(gv) + gv->view_offs;
  8391. }
  8392. if (!P_uma) {
  8393. d_P = p_buf_ctx->dev_buffer;
  8394. p_offset = vk_tensor_offset(p) + p->view_offs;
  8395. }
  8396. const uint64_t x_size = ggml_nbytes(x);
  8397. const uint64_t g_size = ggml_nbytes(g);
  8398. const uint64_t gm_size = ggml_nbytes(gm);
  8399. const uint64_t gv_size = ggml_nbytes(gv);
  8400. const uint64_t p_size = ggml_nbytes(p);
  8401. std::array<uint32_t, 3> elements = { (uint32_t)ggml_nelements(x), 1, 1 };
  8402. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, {
  8403. vk_subbuffer{ d_X, x_offset, x_size },
  8404. vk_subbuffer{ d_G, g_offset, g_size },
  8405. vk_subbuffer{ d_GM, gm_offset, gm_size },
  8406. vk_subbuffer{ d_GV, gv_offset, gv_size },
  8407. vk_subbuffer{ d_P, p_offset, p_size },
  8408. }, pc, elements);
  8409. }
  8410. static void ggml_vk_opt_step_adamw(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, bool dryrun = false) {
  8411. const size_t n = ggml_nelements(dst->src[0]);
  8412. ggml_vk_op_f32_opt_step_adamw(
  8413. ctx, subctx, dst,
  8414. { (uint32_t)n, 0, 0.0f, 0.0f },
  8415. dryrun
  8416. );
  8417. }
  8418. static void ggml_vk_opt_step_sgd(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, bool dryrun = false) {
  8419. const size_t n = ggml_nelements(dst->src[0]);
  8420. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, src2, nullptr, dst, GGML_OP_OPT_STEP_SGD, { (uint32_t)n, 0, 0.0f, 0.0f }, dryrun);
  8421. }
  8422. static void ggml_vk_concat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8423. int * op_params = (int *)dst->op_params;
  8424. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8425. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8426. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8427. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_CONCAT, {
  8428. (uint32_t)ggml_nelements(dst),
  8429. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8430. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8431. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8432. 0,
  8433. 0.0f, 0.0f, op_params[0],
  8434. }, dryrun);
  8435. }
  8436. static void ggml_vk_upscale(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8437. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8438. const uint32_t mode = (uint32_t)ggml_get_op_params_i32(dst, 0);
  8439. GGML_TENSOR_UNARY_OP_LOCALS
  8440. float sf0 = (float)ne0 / ne00;
  8441. float sf1 = (float)ne1 / ne01;
  8442. float sf2 = (float)ne2 / ne02;
  8443. float sf3 = (float)ne3 / ne03;
  8444. float pixel_offset = 0.5f;
  8445. if (mode & GGML_SCALE_FLAG_ALIGN_CORNERS) {
  8446. sf0 = ne0 > 1 && ne00 > 1 ? (float)(ne0 - 1) / (ne00 - 1) : sf0;
  8447. sf1 = ne1 > 1 && ne01 > 1 ? (float)(ne1 - 1) / (ne01 - 1) : sf1;
  8448. pixel_offset = 0.0f;
  8449. }
  8450. ggml_vk_op_f32<vk_op_upscale_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_UPSCALE, {
  8451. (uint32_t)ggml_nelements(dst), 0, 0,
  8452. (uint32_t)ne00, (uint32_t)ne01,
  8453. (uint32_t)nb00 / src0_type_size, (uint32_t)nb01 / src0_type_size, (uint32_t)nb02 / src0_type_size, (uint32_t)nb03 / src0_type_size,
  8454. (uint32_t)ne0, (uint32_t)ne1, (uint32_t)ne2, (uint32_t)ne3,
  8455. sf0, sf1, sf2, sf3, pixel_offset
  8456. }, dryrun);
  8457. }
  8458. static void ggml_vk_scale(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8459. vk_op_unary_push_constants p = vk_op_unary_push_constants_init(src0, dst);
  8460. p.param1 = ggml_get_op_params_f32(dst, 0);
  8461. p.param2 = ggml_get_op_params_f32(dst, 1);
  8462. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_SCALE, std::move(p), dryrun);
  8463. }
  8464. static void ggml_vk_sqr(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8465. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_SQR, vk_op_unary_push_constants_init(src0, dst), dryrun);
  8466. }
  8467. static void ggml_vk_sqrt(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8468. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_SQRT, vk_op_unary_push_constants_init(src0, dst), dryrun);
  8469. }
  8470. static void ggml_vk_sin(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8471. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_SIN, vk_op_unary_push_constants_init(src0, dst), dryrun);
  8472. }
  8473. static void ggml_vk_cos(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8474. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_COS, vk_op_unary_push_constants_init(src0, dst), dryrun);
  8475. }
  8476. static void ggml_vk_clamp(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8477. vk_op_unary_push_constants p = vk_op_unary_push_constants_init(src0, dst);
  8478. p.param1 = ggml_get_op_params_f32(dst, 0);
  8479. p.param2 = ggml_get_op_params_f32(dst, 1);
  8480. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_CLAMP, std::move(p), dryrun);
  8481. }
  8482. static void ggml_vk_pad(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8483. vk_op_pad_push_constants p = vk_op_pad_push_constants_init(src0, dst);
  8484. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_PAD, std::move(p), dryrun);
  8485. }
  8486. static void ggml_vk_roll(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8487. const int32_t s0 = ggml_get_op_params_i32(dst, 0);
  8488. const int32_t s1 = ggml_get_op_params_i32(dst, 1);
  8489. const int32_t s2 = ggml_get_op_params_i32(dst, 2);
  8490. const int32_t s3 = ggml_get_op_params_i32(dst, 3);
  8491. const uint32_t s01_packed = ((s0 + 0x8000) << 16) | (s1 + 0x8000);
  8492. const uint32_t s23_packed = ((s2 + 0x8000) << 16) | (s3 + 0x8000);
  8493. vk_op_unary_push_constants p = vk_op_unary_push_constants_init(src0, dst);
  8494. memcpy(&p.param1, &s01_packed, sizeof(float));
  8495. memcpy(&p.param2, &s23_packed, sizeof(float));
  8496. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_ROLL, std::move(p), dryrun);
  8497. }
  8498. static void ggml_vk_repeat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8499. vk_op_unary_push_constants p = vk_op_unary_push_constants_init(src0, dst, ggml_nelements(dst));
  8500. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_REPEAT, std::move(p), dryrun);
  8501. }
  8502. static void ggml_vk_repeat_back(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8503. vk_op_unary_push_constants p = vk_op_unary_push_constants_init(src0, dst, ggml_nelements(dst));
  8504. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_REPEAT_BACK, std::move(p), dryrun);
  8505. }
  8506. static void ggml_vk_cpy(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8507. uint32_t ne = (uint32_t)ggml_nelements(src0);
  8508. if (ggml_is_quantized(src0->type) && ggml_is_quantized(dst->type)) {
  8509. // Convert from number of logical elements to 2- or 4-byte units.
  8510. ne /= ggml_blck_size(src0->type);
  8511. if ((ggml_type_size(src0->type) % 4) == 0) {
  8512. ne *= ggml_type_size(src0->type) / 4;
  8513. } else {
  8514. ne *= ggml_type_size(src0->type) / 2;
  8515. }
  8516. }
  8517. vk_op_unary_push_constants p = vk_op_unary_push_constants_init(src0, dst, ne);
  8518. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_CPY, std::move(p), dryrun);
  8519. }
  8520. static void ggml_vk_set_rows(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8521. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8522. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8523. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8524. // Skip empty skip_rows operations. For most ops the empty check at the start
  8525. // of ggml_vk_build_graph is sufficient, but set_rows can have a nonempty dst
  8526. // with empty srcs.
  8527. if (ggml_is_empty(src0) || ggml_is_empty(src1)) {
  8528. return;
  8529. }
  8530. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SET_ROWS, {
  8531. (uint32_t)ggml_nelements(src0),
  8532. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8533. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8534. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8535. 0,
  8536. 0.0f, 0.0f, 0,
  8537. }, dryrun);
  8538. }
  8539. static void ggml_vk_silu_back(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8540. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SILU_BACK, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f }, dryrun);
  8541. }
  8542. static void ggml_vk_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8543. float * op_params = (float *)dst->op_params;
  8544. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f }, dryrun);
  8545. }
  8546. static void ggml_vk_group_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8547. const int * int_op_params = (const int *)dst->op_params;
  8548. const float * float_op_params = (const float *)dst->op_params;
  8549. const uint32_t num_groups = int_op_params[0];
  8550. const float eps = float_op_params[1];
  8551. const uint32_t group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups);
  8552. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_GROUP_NORM, { group_size, 0, eps, 0.0f }, dryrun);
  8553. }
  8554. static uint32_t ggml_vk_rms_num_partials(ggml_backend_vk_context * ctx, const ggml_tensor *node) {
  8555. const uint32_t ne = (uint32_t)node->ne[0];
  8556. const uint32_t denom = ctx->device->pipeline_add_rms[0][0][0]->wg_denoms[0];
  8557. const uint32_t num_partials = CEIL_DIV(ne, denom);
  8558. return num_partials;
  8559. }
  8560. static uint32_t ggml_vk_rms_partials_size(ggml_backend_vk_context * ctx, const ggml_tensor *node) {
  8561. const uint32_t num_partials = ggml_vk_rms_num_partials(ctx, node);
  8562. const uint32_t num_bytes = ROUNDUP_POW2(num_partials * sizeof(uint32_t), ctx->device->partials_binding_alignment);
  8563. return num_bytes;
  8564. }
  8565. static void ggml_vk_rms_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, float * op_params, bool dryrun = false) {
  8566. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8567. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8568. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8569. uint32_t param3 = ctx->do_add_rms_partials ? ggml_vk_rms_num_partials(ctx, dst) : 0;
  8570. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_RMS_NORM, {
  8571. (uint32_t)ggml_nelements(src0),
  8572. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8573. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8574. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8575. 0,
  8576. op_params[0], 0.0f, (int32_t)param3,
  8577. }, dryrun);
  8578. if (ctx->do_add_rms_partials) {
  8579. ctx->prealloc_size_add_rms_partials_offset += ggml_vk_rms_partials_size(ctx, src0);
  8580. ctx->do_add_rms_partials = false;
  8581. }
  8582. }
  8583. static void ggml_vk_rms_norm_back(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8584. float * op_params = (float *)dst->op_params;
  8585. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_RMS_NORM_BACK, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f }, dryrun);
  8586. }
  8587. static void ggml_vk_l2_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8588. float * op_params = (float *)dst->op_params;
  8589. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_L2_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f }, dryrun);
  8590. }
  8591. static void ggml_vk_unary(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8592. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_UNARY, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f }, dryrun);
  8593. }
  8594. static void ggml_vk_glu(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8595. const float * op_params_f = (const float *)dst->op_params;
  8596. const bool swapped = (bool)dst->op_params[1];
  8597. const bool split = src1 != nullptr;
  8598. const float alpha = op_params_f[2];
  8599. const float limit = op_params_f[3];
  8600. GGML_ASSERT(ggml_is_contiguous(src0));
  8601. if (!split) {
  8602. GGML_ASSERT(src0->ne[0] / 2 == dst->ne[0]);
  8603. } else {
  8604. GGML_ASSERT(src0->ne[0] == src1->ne[0]);
  8605. GGML_ASSERT(src0->ne[0] == dst->ne[0]);
  8606. GGML_ASSERT(src0->type == src1->type);
  8607. }
  8608. const uint32_t mode = split ? 2 : (swapped ? 1 : 0);
  8609. ggml_vk_op_f32<vk_op_glu_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_GLU,
  8610. {
  8611. (uint32_t)ggml_nelements(dst),
  8612. (uint32_t)src0->ne[0],
  8613. (uint32_t)dst->ne[0],
  8614. mode,
  8615. alpha,
  8616. limit
  8617. }, dryrun);
  8618. }
  8619. static void ggml_vk_diag_mask_inf(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8620. int32_t * op_params = (int32_t *)dst->op_params;
  8621. ggml_vk_op_f32<vk_op_diag_mask_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_DIAG_MASK_INF, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0] }, dryrun);
  8622. }
  8623. static void ggml_vk_soft_max(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, bool dryrun = false) {
  8624. float * op_params = (float *)dst->op_params;
  8625. float scale = op_params[0];
  8626. float max_bias = op_params[1];
  8627. const uint32_t ncols = (uint32_t)src0->ne[0];
  8628. const uint32_t nrows_x = (uint32_t)ggml_nrows(src0);
  8629. const uint32_t nrows_y = (uint32_t)src0->ne[1];
  8630. const uint32_t ne12 = src1 ? (uint32_t)(src1->ne[2]) : 0u;
  8631. const uint32_t ne13 = src1 ? (uint32_t)(src1->ne[3]) : 0u;
  8632. const uint32_t nb11 = src1 ? (uint32_t)(src1->nb[1] / src1->nb[0]) : 0u;
  8633. const uint32_t nb12 = src1 ? (uint32_t)(src1->nb[2] / src1->nb[0]) : 0u;
  8634. const uint32_t nb13 = src1 ? (uint32_t)(src1->nb[3] / src1->nb[0]) : 0u;
  8635. const uint32_t n_head_kv = src0->ne[2];
  8636. const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
  8637. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  8638. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  8639. ggml_vk_op_f32<vk_op_soft_max_push_constants>(ctx, subctx, src0, src1, src2, nullptr, dst, GGML_OP_SOFT_MAX, {
  8640. ncols,
  8641. src1 != nullptr ? nrows_y : (uint32_t)0,
  8642. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],
  8643. ne12, ne13,
  8644. nb11, nb12, nb13,
  8645. scale, max_bias,
  8646. m0, m1,
  8647. n_head_log2,
  8648. nrows_x,
  8649. src2 != nullptr
  8650. }, dryrun);
  8651. }
  8652. static void ggml_vk_soft_max_back(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8653. float * op_params = (float *)dst->op_params;
  8654. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SOFT_MAX_BACK, { (uint32_t)src0->ne[0], (uint32_t)ggml_nrows(src0), op_params[0], op_params[1] }, dryrun);
  8655. }
  8656. static void ggml_vk_topk_moe(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_cgraph * cgraph, int node_idx, bool dryrun = false) {
  8657. topk_moe_mode mode = ggml_vk_num_additional_ops_to_topk_moe_mode(ctx->num_additional_fused_ops);
  8658. ggml_tensor * logits = cgraph->nodes[node_idx + 0]->src[0];
  8659. ggml_tensor * weights = (mode == TOPK_MOE_EARLY_SOFTMAX_NORM) ? cgraph->nodes[node_idx + 9] :
  8660. (mode == TOPK_MOE_EARLY_SOFTMAX) ? cgraph->nodes[node_idx + 4] :
  8661. cgraph->nodes[node_idx + 5];
  8662. ggml_tensor * ids = (mode == TOPK_MOE_LATE_SOFTMAX) ? cgraph->nodes[node_idx + 1] : cgraph->nodes[node_idx + 3];
  8663. GGML_ASSERT(logits->type == GGML_TYPE_F32);
  8664. GGML_ASSERT(weights->type == GGML_TYPE_F32);
  8665. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  8666. const int n_experts = logits->ne[0];
  8667. const int n_rows = logits->ne[1];
  8668. const int n_expert_used = weights->ne[1];
  8669. GGML_ASSERT(ids->nb[1] / ggml_type_size(ids->type) == (size_t) n_experts);
  8670. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, nullptr, nullptr, nullptr, cgraph->nodes[node_idx], GGML_OP_SOFT_MAX);
  8671. if (dryrun) {
  8672. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8673. return;
  8674. }
  8675. ggml_backend_vk_buffer_context * logits_buf_ctx = (ggml_backend_vk_buffer_context *)logits->buffer->context;
  8676. ggml_backend_vk_buffer_context * weights_buf_ctx = (ggml_backend_vk_buffer_context *)weights->buffer->context;
  8677. ggml_backend_vk_buffer_context * ids_buf_ctx = (ggml_backend_vk_buffer_context *)ids->buffer->context;
  8678. vk_buffer d_logits = nullptr;
  8679. size_t logits_buf_offset = 0;
  8680. vk_buffer d_weights = nullptr;
  8681. size_t weights_buf_offset = 0;
  8682. vk_buffer d_ids = nullptr;
  8683. size_t ids_buf_offset = 0;
  8684. bool logits_uma = false;
  8685. bool weights_uma = false;
  8686. bool ids_uma = false;
  8687. if (ctx->device->uma) {
  8688. ggml_vk_host_get(ctx->device, logits->data, d_logits, logits_buf_offset);
  8689. ggml_vk_host_get(ctx->device, weights->data, d_weights, weights_buf_offset);
  8690. ggml_vk_host_get(ctx->device, ids->data, d_ids, ids_buf_offset);
  8691. logits_uma = d_logits != nullptr;
  8692. weights_uma = d_weights != nullptr;
  8693. ids_uma = d_ids != nullptr;
  8694. }
  8695. if (!logits_uma) {
  8696. d_logits = logits_buf_ctx->dev_buffer;
  8697. logits_buf_offset = vk_tensor_offset(logits) + logits->view_offs;
  8698. GGML_ASSERT(d_logits != nullptr);
  8699. }
  8700. if (!weights_uma) {
  8701. d_weights = weights_buf_ctx->dev_buffer;
  8702. weights_buf_offset = vk_tensor_offset(weights) + weights->view_offs;
  8703. GGML_ASSERT(d_weights != nullptr);
  8704. }
  8705. if (!ids_uma) {
  8706. d_ids = ids_buf_ctx->dev_buffer;
  8707. ids_buf_offset = vk_tensor_offset(ids) + ids->view_offs;
  8708. GGML_ASSERT(d_ids != nullptr);
  8709. }
  8710. vk_op_topk_moe_push_constants pc {};
  8711. pc.n_rows = n_rows;
  8712. pc.n_expert_used = n_expert_used;
  8713. if (mode == TOPK_MOE_EARLY_SOFTMAX_NORM) {
  8714. ggml_tensor * clamp = cgraph->nodes[node_idx + 7];
  8715. pc.clamp_min = ggml_get_op_params_f32(clamp, 0);
  8716. pc.clamp_max = ggml_get_op_params_f32(clamp, 1);
  8717. }
  8718. GGML_ASSERT(n_expert_used <= n_experts);
  8719. const uint32_t rows_per_block = 4;
  8720. std::array<uint32_t, 3> elements = { CEIL_DIV(n_rows, rows_per_block), 1, 1 };
  8721. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
  8722. {
  8723. ggml_vk_subbuffer(ctx, d_logits, logits_buf_offset),
  8724. ggml_vk_subbuffer(ctx, d_weights, weights_buf_offset),
  8725. ggml_vk_subbuffer(ctx, d_ids, ids_buf_offset),
  8726. }, pc, elements);
  8727. }
  8728. static void ggml_vk_rope(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_cgraph * cgraph, int node_idx, bool backprop, bool dryrun = false) {
  8729. ggml_tensor * dst = cgraph->nodes[node_idx];
  8730. const ggml_tensor * src0 = dst->src[0];
  8731. const ggml_tensor * src1 = dst->src[1];
  8732. const ggml_tensor * src2 = dst->src[2];
  8733. const ggml_tensor * src3 = nullptr;
  8734. const int n_dims = ((int32_t *) dst->op_params)[1];
  8735. const int mode = ((int32_t *) dst->op_params)[2];
  8736. // const int n_ctx = ((int32_t *) dst->op_params)[3];
  8737. const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
  8738. const float freq_base = ((float *) dst->op_params)[5];
  8739. const float freq_scale = ((float *) dst->op_params)[6];
  8740. const float ext_factor = ((float *) dst->op_params)[7];
  8741. const float attn_factor = ((float *) dst->op_params)[8];
  8742. const float beta_fast = ((float *) dst->op_params)[9];
  8743. const float beta_slow = ((float *) dst->op_params)[10];
  8744. int sections[4] {};
  8745. if (mode & GGML_ROPE_TYPE_MROPE) {
  8746. memcpy(sections, (int32_t *) dst->op_params + 11, sizeof(int)*4);
  8747. }
  8748. const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE;
  8749. float corr_dims[2];
  8750. ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
  8751. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  8752. uint32_t s1 = src0->nb[1] / ggml_type_size(src0->type);
  8753. uint32_t s2 = src0->nb[2] / ggml_type_size(src0->type);
  8754. uint32_t set_rows_stride = 0;
  8755. // Fused rope + view + set_rows passes the set_rows destination stride in set_rows_stride
  8756. // and overrides the dst and sets src3=row_indices
  8757. if (ctx->num_additional_fused_ops > 0) {
  8758. set_rows_stride = cgraph->nodes[node_idx + 2]->nb[1] / ggml_type_size(cgraph->nodes[node_idx + 2]->type);
  8759. src3 = cgraph->nodes[node_idx + 2]->src[1];
  8760. dst = cgraph->nodes[node_idx + 2];
  8761. }
  8762. ggml_vk_op_f32<vk_op_rope_push_constants>(ctx, subctx, src0, src1, src2, src3, dst, GGML_OP_ROPE, {
  8763. (uint32_t)src0->ne[0], (uint32_t)n_dims, freq_scale, (uint32_t)src0->ne[1],
  8764. freq_base, ext_factor, attn_factor, {corr_dims[0], corr_dims[1]}, theta_scale,
  8765. src2 != nullptr, (uint32_t)src0->ne[2], s1, s2,
  8766. { sections[0], sections[1], sections[2], sections[3] }, is_imrope, backprop, set_rows_stride,
  8767. }, dryrun);
  8768. }
  8769. static void ggml_vk_argsort(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8770. int32_t * op_params = (int32_t *)dst->op_params;
  8771. uint32_t ncols = src0->ne[0];
  8772. uint32_t nrows = ggml_nrows(src0);
  8773. ggml_vk_op_f32<vk_op_argsort_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_ARGSORT, {
  8774. ncols,
  8775. nrows,
  8776. op_params[0],
  8777. }, dryrun);
  8778. }
  8779. static void ggml_vk_sum(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8780. vk_op_sum_rows_push_constants p = vk_op_sum_rows_push_constants_init(src0, dst, ggml_nelements(src0));
  8781. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_SUM, p, dryrun);
  8782. }
  8783. static void ggml_vk_sum_rows(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8784. vk_op_sum_rows_push_constants p = vk_op_sum_rows_push_constants_init(src0, dst, src0->ne[0]);
  8785. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_SUM_ROWS, p, dryrun);
  8786. }
  8787. static void ggml_vk_mean(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8788. vk_op_sum_rows_push_constants p = vk_op_sum_rows_push_constants_init(src0, dst, src0->ne[0]);
  8789. p.weight = 1.0f / (float)src0->ne[0];
  8790. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_MEAN, p, dryrun);
  8791. }
  8792. static void ggml_vk_argmax(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8793. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_ARGMAX, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], 0.0f, 0.0f }, dryrun);
  8794. }
  8795. static void ggml_vk_count_equal(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8796. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_COUNT_EQUAL, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f }, dryrun);
  8797. }
  8798. static void ggml_vk_im2col(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8799. const int32_t s0 = dst->op_params[0];
  8800. const int32_t s1 = dst->op_params[1];
  8801. const int32_t p0 = dst->op_params[2];
  8802. const int32_t p1 = dst->op_params[3];
  8803. const int32_t d0 = dst->op_params[4];
  8804. const int32_t d1 = dst->op_params[5];
  8805. const bool is_2D = dst->op_params[6] == 1;
  8806. const uint32_t IC = src1->ne[is_2D ? 2 : 1];
  8807. const uint32_t IH = is_2D ? src1->ne[1] : 1;
  8808. const uint32_t IW = src1->ne[0];
  8809. const uint32_t KH = is_2D ? src0->ne[1] : 1;
  8810. const uint32_t KW = src0->ne[0];
  8811. const uint32_t OH = is_2D ? dst->ne[2] : 1;
  8812. const uint32_t OW = dst->ne[1];
  8813. const uint32_t offset_delta = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32
  8814. const uint32_t batch_offset = src1->nb[is_2D ? 3 : 2] / 4; // nb is byte offset, src is type float32
  8815. const uint32_t pelements = OW * KW * KH;
  8816. const ggml_backend_vk_buffer_context * d_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  8817. const vk_buffer d_buf = d_buf_ctx->dev_buffer;
  8818. const vk::DeviceAddress dst_addr = d_buf->bda_addr + vk_tensor_offset(dst) + dst->view_offs;
  8819. ggml_vk_op_f32<vk_op_im2col_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_IM2COL, {
  8820. dst_addr,
  8821. batch_offset, offset_delta,
  8822. IC, IW, IH, OW, OH, KW, KH,
  8823. pelements,
  8824. IC * KH * KW,
  8825. s0, s1, p0, p1, d0, d1,
  8826. }, dryrun);
  8827. }
  8828. static void ggml_vk_im2col_3d(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8829. GGML_TENSOR_BINARY_OP_LOCALS
  8830. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  8831. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  8832. const int32_t s2 = ((const int32_t *)(dst->op_params))[2];
  8833. const int32_t p0 = ((const int32_t *)(dst->op_params))[3];
  8834. const int32_t p1 = ((const int32_t *)(dst->op_params))[4];
  8835. const int32_t p2 = ((const int32_t *)(dst->op_params))[5];
  8836. const int32_t d0 = ((const int32_t *)(dst->op_params))[6];
  8837. const int32_t d1 = ((const int32_t *)(dst->op_params))[7];
  8838. const int32_t d2 = ((const int32_t *)(dst->op_params))[8];
  8839. const int32_t IC = ((const int32_t *)(dst->op_params))[9];
  8840. const int64_t N = ne13 / IC;
  8841. const int64_t ID = ne12;
  8842. const int64_t IH = ne11;
  8843. const int64_t IW = ne10;
  8844. const int64_t KD = ne02;
  8845. const int64_t KH = ne01;
  8846. const int64_t KW = ne00;
  8847. const int64_t OD = ne3 / N;
  8848. const int64_t OH = ne2;
  8849. const int64_t OW = ne1;
  8850. const ggml_backend_vk_buffer_context * d_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  8851. const vk_buffer d_buf = d_buf_ctx->dev_buffer;
  8852. const vk::DeviceAddress dst_addr = d_buf->bda_addr + vk_tensor_offset(dst) + dst->view_offs;
  8853. vk_op_im2col_3d_push_constants pc {};
  8854. pc.dst_addr = dst_addr;
  8855. pc.nb10 = nb10 / ggml_type_size(src1->type);
  8856. pc.nb11 = nb11 / ggml_type_size(src1->type);
  8857. pc.nb12 = nb12 / ggml_type_size(src1->type);
  8858. pc.nb13 = nb13 / ggml_type_size(src1->type);
  8859. pc.s0 = s0;
  8860. pc.s1 = s1;
  8861. pc.s2 = s2;
  8862. pc.p0 = p0;
  8863. pc.p1 = p1;
  8864. pc.p2 = p2;
  8865. pc.d0 = d0;
  8866. pc.d1 = d1;
  8867. pc.d2 = d2;
  8868. pc.IW = IW;
  8869. pc.IH = IH;
  8870. pc.ID = ID;
  8871. pc.IC = IC;
  8872. pc.KW = KW;
  8873. pc.OH = OH;
  8874. pc.KD_KH_KW = KD*KH*KW;
  8875. pc.KH_KW = KH*KW;
  8876. pc.IC_KD_KH_KW = IC*KD*KH*KW;
  8877. pc.N_OD_OH = N*OD*OH;
  8878. pc.OD_OH = OD*OH;
  8879. pc.OD_OH_OW_IC_KD_KH_KW = OD*OH*OW*IC*KD*KH*KW;
  8880. pc.OH_OW_IC_KD_KH_KW = OH*OW*IC*KD*KH*KW;
  8881. pc.OW_IC_KD_KH_KW = OW*IC*KD*KH*KW;
  8882. ggml_vk_op_f32<vk_op_im2col_3d_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_IM2COL_3D, std::move(pc), dryrun);
  8883. }
  8884. static void ggml_vk_timestep_embedding(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8885. const uint32_t dim = dst->op_params[0];
  8886. const uint32_t max_period = dst->op_params[1];
  8887. const uint32_t nb1 = dst->nb[1] / ggml_type_size(dst->type);
  8888. ggml_vk_op_f32<vk_op_timestep_embedding_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_TIMESTEP_EMBEDDING, {
  8889. nb1, dim, max_period,
  8890. }, dryrun);
  8891. }
  8892. static void ggml_vk_conv_transpose_1d(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8893. // src0: (K, Cout, Cin, 1) -- kernel
  8894. // src1: (L, Cin, 1, 1) -- input
  8895. // dst: (*, Cout, 1, 1)
  8896. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8897. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  8898. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8899. GGML_TENSOR_BINARY_OP_LOCALS
  8900. GGML_ASSERT(nb00 == sizeof(float));
  8901. GGML_ASSERT(nb10 == sizeof(float));
  8902. const int32_t s0 = dst->op_params[0];
  8903. vk_op_conv_transpose_1d_push_constants p{};
  8904. p.Cout = static_cast<uint32_t>(ne01);
  8905. p.Cin = static_cast<uint32_t>(ne02);
  8906. p.K = static_cast<uint32_t>(ne00);
  8907. p.L = static_cast<uint32_t>(ne10);
  8908. p.KL = static_cast<uint32_t>(ne0);
  8909. p.nb01 = static_cast<uint32_t>(nb01 / nb00);
  8910. p.nb02 = static_cast<uint32_t>(nb02 / nb00);
  8911. p.nb11 = static_cast<uint32_t>(nb11 / nb10);
  8912. p.nb1 = static_cast<uint32_t>(nb1 / nb0);
  8913. p.s0 = static_cast<uint32_t>(s0);
  8914. ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_CONV_TRANSPOSE_1D, std::move(p), dryrun);
  8915. }
  8916. static void ggml_vk_pool_2d(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  8917. uint32_t op = static_cast<uint32_t>(dst->op_params[0]);
  8918. const int32_t k1 = dst->op_params[1];
  8919. const int32_t k0 = dst->op_params[2];
  8920. const int32_t s1 = dst->op_params[3];
  8921. const int32_t s0 = dst->op_params[4];
  8922. const int32_t p1 = dst->op_params[5];
  8923. const int32_t p0 = dst->op_params[6];
  8924. const uint32_t IH = src0->ne[1];
  8925. const uint32_t IW = src0->ne[0];
  8926. const uint32_t N = dst->ne[3];
  8927. const uint32_t OC = dst->ne[2];
  8928. const uint32_t OH = dst->ne[1];
  8929. const uint32_t OW = dst->ne[0];
  8930. const uint32_t parallel_elements = N * OC * OH * OW;
  8931. ggml_vk_op_f32<vk_op_pool2d_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_POOL_2D, {
  8932. IW, IH, OW, OH, OC,
  8933. parallel_elements,
  8934. op,
  8935. k0, k1, s0, s1, p0, p1,
  8936. }, dryrun);
  8937. }
  8938. static void ggml_vk_conv_2d(ggml_backend_vk_context * ctx, vk_context & subctx, const ggml_tensor * src0,
  8939. const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8940. GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16);
  8941. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  8942. GGML_ASSERT(dst->type == GGML_TYPE_F32);
  8943. GGML_TENSOR_BINARY_OP_LOCALS
  8944. GGML_ASSERT(nb00 == sizeof(float) || nb00 == sizeof(ggml_fp16_t));
  8945. GGML_ASSERT(nb10 == sizeof(float));
  8946. GGML_ASSERT(nb0 == sizeof(float));
  8947. vk_op_conv2d_push_constants p{};
  8948. p.Cout = static_cast<uint32_t>(ne03);
  8949. p.Cin = static_cast<uint32_t>(ne02);
  8950. p.N = static_cast<uint32_t>(ne13);
  8951. p.KW = static_cast<uint32_t>(ne00);
  8952. p.KH = static_cast<uint32_t>(ne01);
  8953. p.W = static_cast<uint32_t>(ne10);
  8954. p.H = static_cast<uint32_t>(ne11);
  8955. p.OW = static_cast<uint32_t>(ne0);
  8956. p.OH = static_cast<uint32_t>(ne1);
  8957. p.s0 = static_cast<uint32_t>(dst->op_params[0]);
  8958. p.s1 = static_cast<uint32_t>(dst->op_params[1]);
  8959. p.p0 = static_cast<uint32_t>(dst->op_params[2]);
  8960. p.p1 = static_cast<uint32_t>(dst->op_params[3]);
  8961. p.d0 = static_cast<uint32_t>(dst->op_params[4]);
  8962. p.d1 = static_cast<uint32_t>(dst->op_params[5]);
  8963. p.nb01 = static_cast<uint32_t>(nb01 / nb00);
  8964. p.nb02 = static_cast<uint32_t>(nb02 / nb00);
  8965. p.nb03 = static_cast<uint32_t>(nb03 / nb00);
  8966. p.nb11 = static_cast<uint32_t>(nb11 / nb10);
  8967. p.nb12 = static_cast<uint32_t>(nb12 / nb10);
  8968. p.nb13 = static_cast<uint32_t>(nb13 / nb10);
  8969. p.nb1 = static_cast<uint32_t>(nb1 / nb0);
  8970. p.nb2 = static_cast<uint32_t>(nb2 / nb0);
  8971. p.nb3 = static_cast<uint32_t>(nb3 / nb0);
  8972. GGML_ASSERT(ne03 == ne2);
  8973. GGML_ASSERT(ne02 == ne12);
  8974. ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_CONV_2D, std::move(p), dryrun);
  8975. }
  8976. static void ggml_vk_conv_transpose_2d(ggml_backend_vk_context * ctx, vk_context & subctx, const ggml_tensor * src0,
  8977. const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  8978. GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16);
  8979. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  8980. GGML_ASSERT(dst->type == GGML_TYPE_F32);
  8981. GGML_TENSOR_BINARY_OP_LOCALS
  8982. GGML_ASSERT(nb00 == sizeof(float) || nb00 == sizeof(ggml_fp16_t));
  8983. GGML_ASSERT(nb10 == sizeof(float));
  8984. GGML_ASSERT(nb0 == sizeof(float));
  8985. vk_op_conv_transpose_2d_push_constants p{};
  8986. p.Cout = static_cast<uint32_t>(ne02);
  8987. p.Cin = static_cast<uint32_t>(ne03);
  8988. p.N = static_cast<uint32_t>(ne13);
  8989. p.KW = static_cast<uint32_t>(ne00);
  8990. p.KH = static_cast<uint32_t>(ne01);
  8991. p.W = static_cast<uint32_t>(ne10);
  8992. p.H = static_cast<uint32_t>(ne11);
  8993. p.OW = static_cast<uint32_t>(ne0);
  8994. p.OH = static_cast<uint32_t>(ne1);
  8995. p.s0 = static_cast<uint32_t>(dst->op_params[0]);
  8996. p.s1 = static_cast<uint32_t>(dst->op_params[0]);
  8997. p.p0 = 0;
  8998. p.p1 = 0;
  8999. p.d0 = 1;
  9000. p.d1 = 1;
  9001. p.nb01 = static_cast<uint32_t>(nb01 / nb00);
  9002. p.nb02 = static_cast<uint32_t>(nb02 / nb00);
  9003. p.nb03 = static_cast<uint32_t>(nb03 / nb00);
  9004. p.nb11 = static_cast<uint32_t>(nb11 / nb10);
  9005. p.nb12 = static_cast<uint32_t>(nb12 / nb10);
  9006. p.nb13 = static_cast<uint32_t>(nb13 / nb10);
  9007. p.nb1 = static_cast<uint32_t>(nb1 / nb0);
  9008. p.nb2 = static_cast<uint32_t>(nb2 / nb0);
  9009. p.nb3 = static_cast<uint32_t>(nb3 / nb0);
  9010. GGML_ASSERT(ne02 == ne2);
  9011. GGML_ASSERT(ne03 == ne12);
  9012. ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_CONV_TRANSPOSE_2D, std::move(p), dryrun);
  9013. }
  9014. static void ggml_vk_conv_2d_dw(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  9015. vk_op_conv2d_dw_push_constants p{};
  9016. p.ne = ggml_nelements(dst);
  9017. p.channels = dst->ne[2];
  9018. p.batches = dst->ne[3];
  9019. p.dst_w = dst->ne[0];
  9020. p.dst_h = dst->ne[1];
  9021. p.src_w = src1->ne[0];
  9022. p.src_h = src1->ne[1];
  9023. p.knl_w = src0->ne[0];
  9024. p.knl_h = src0->ne[1];
  9025. p.stride_x = dst->op_params[0];
  9026. p.stride_y = dst->op_params[1];
  9027. p.pad_x = dst->op_params[2];
  9028. p.pad_y = dst->op_params[3];
  9029. p.dilation_x = dst->op_params[4];
  9030. p.dilation_y = dst->op_params[5];
  9031. GGML_ASSERT(src0->ne[3] == p.channels);
  9032. GGML_ASSERT(src1->ne[3] == p.batches);
  9033. ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_CONV_2D_DW, std::move(p), dryrun);
  9034. }
  9035. static void ggml_vk_leaky_relu(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  9036. const float * op_params = (const float *)dst->op_params;
  9037. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_LEAKY_RELU, { (uint32_t)ggml_nelements(src0), 0, op_params[0], 0.0f }, dryrun);
  9038. }
  9039. #ifdef GGML_VULKAN_RUN_TESTS
  9040. static void ggml_vk_print_matrix_area(const void * data, ggml_type type, int ne0, int ne1, int i0, int i1, int i2) {
  9041. if (type != GGML_TYPE_F32 && type != GGML_TYPE_F16) {
  9042. return;
  9043. }
  9044. i0 = std::max(i0, 5);
  9045. i1 = std::max(i1, 5);
  9046. i2 = std::max(i2, 0);
  9047. fprintf(stderr, " ");
  9048. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  9049. fprintf(stderr, "%7d ", idx1);
  9050. }
  9051. fprintf(stderr, "\n");
  9052. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  9053. fprintf(stderr, "%7d: ", idx0);
  9054. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  9055. if (idx0 >= 0 && idx0 < ne0 && idx1 >= 0 && idx1 < ne1) {
  9056. float val;
  9057. if (type == GGML_TYPE_F32) {
  9058. val = *((const float *) data + i2*ne1*ne0 + idx1*ne0 + idx0);
  9059. } else if (type == GGML_TYPE_F16) {
  9060. val = ggml_fp16_to_fp32(*((const ggml_fp16_t *) data + i2*ne1*ne0 + idx1*ne0 + idx0));
  9061. } else {
  9062. GGML_ABORT("fatal error");
  9063. }
  9064. fprintf(stderr, "% 7.2f ", val);
  9065. } else {
  9066. fprintf(stderr, " ");
  9067. }
  9068. }
  9069. fprintf(stderr, "\n");
  9070. }
  9071. }
  9072. template <typename X_TYPE, typename Y_TYPE>
  9073. static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t n, size_t k, size_t batch, size_t num_it, int split_k, int shader_size) {
  9074. VK_LOG_DEBUG("ggml_vk_test_matmul(" << m << ", " << n << ", " << k << ", " << batch << ", " << num_it << ", " << split_k << ", " << shader_size << ")");
  9075. const size_t x_ne = m * k * batch;
  9076. const size_t y_ne = k * n * batch;
  9077. const size_t d_ne = m * n * batch;
  9078. vk_pipeline p;
  9079. std::string shname;
  9080. if (shader_size == 0) {
  9081. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9082. p = ctx->device->pipeline_matmul_f32->a_s;
  9083. shname = "F32_ALIGNED_S";
  9084. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9085. p = ctx->device->pipeline_matmul_f32_f16->a_s;
  9086. shname = "F32_F16_ALIGNED_S";
  9087. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9088. p = ctx->device->pipeline_matmul_f16_f32.f32acc->a_s;
  9089. shname = "F16_F32_ALIGNED_S";
  9090. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9091. p = ctx->device->pipeline_matmul_f16.f32acc->a_s;
  9092. shname = "F16_ALIGNED_S";
  9093. } else {
  9094. GGML_ABORT("fatal error");
  9095. }
  9096. } else if (shader_size == 1) {
  9097. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9098. p = ctx->device->pipeline_matmul_f32->a_m;
  9099. shname = "F32_ALIGNED_M";
  9100. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9101. p = ctx->device->pipeline_matmul_f32_f16->a_m;
  9102. shname = "F32_F16_ALIGNED_M";
  9103. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9104. p = ctx->device->pipeline_matmul_f16_f32.f32acc->a_m;
  9105. shname = "F16_F32_ALIGNED_M";
  9106. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9107. p = ctx->device->pipeline_matmul_f16.f32acc->a_m;
  9108. shname = "F16_ALIGNED_M";
  9109. } else {
  9110. GGML_ABORT("fatal error");
  9111. }
  9112. } else if (shader_size == 2) {
  9113. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9114. p = ctx->device->pipeline_matmul_f32->a_l;
  9115. shname = "F32_ALIGNED_L";
  9116. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9117. p = ctx->device->pipeline_matmul_f32_f16->a_l;
  9118. shname = "F32_F16_ALIGNED_L";
  9119. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9120. p = ctx->device->pipeline_matmul_f16_f32.f32acc->a_l;
  9121. shname = "F16_F32_ALIGNED_L";
  9122. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9123. p = ctx->device->pipeline_matmul_f16.f32acc->a_l;
  9124. shname = "F16_ALIGNED_L";
  9125. } else {
  9126. GGML_ABORT("fatal error");
  9127. }
  9128. } else {
  9129. GGML_ASSERT(0);
  9130. }
  9131. const size_t kpad = ggml_vk_align_size(k, p->align);
  9132. if (k != kpad) {
  9133. if (shader_size == 0) {
  9134. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9135. p = ctx->device->pipeline_matmul_f32->s;
  9136. shname = "F32_S";
  9137. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9138. p = ctx->device->pipeline_matmul_f32_f16->s;
  9139. shname = "F32_F16_S";
  9140. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9141. p = ctx->device->pipeline_matmul_f16_f32.f32acc->s;
  9142. shname = "F16_F32_S";
  9143. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9144. p = ctx->device->pipeline_matmul_f16.f32acc->s;
  9145. shname = "F16_S";
  9146. }
  9147. } else if (shader_size == 1) {
  9148. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9149. p = ctx->device->pipeline_matmul_f32->m;
  9150. shname = "F32_M";
  9151. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9152. p = ctx->device->pipeline_matmul_f32_f16->m;
  9153. shname = "F32_F16_M";
  9154. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9155. p = ctx->device->pipeline_matmul_f16_f32.f32acc->m;
  9156. shname = "F16_F32_M";
  9157. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9158. p = ctx->device->pipeline_matmul_f16.f32acc->m;
  9159. shname = "F16_M";
  9160. }
  9161. } else if (shader_size == 2) {
  9162. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9163. p = ctx->device->pipeline_matmul_f32->l;
  9164. shname = "F32_L";
  9165. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9166. p = ctx->device->pipeline_matmul_f32_f16->l;
  9167. shname = "F32_F16_L";
  9168. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9169. p = ctx->device->pipeline_matmul_f16_f32.f32acc->l;
  9170. shname = "F16_F32_L";
  9171. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9172. p = ctx->device->pipeline_matmul_f16.f32acc->l;
  9173. shname = "F16_L";
  9174. }
  9175. }
  9176. }
  9177. ggml_pipeline_request_descriptor_sets(ctx, p, num_it);
  9178. if (split_k > 1) {
  9179. ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_matmul_split_k_reduce, num_it);
  9180. if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) {
  9181. // Resize buffer
  9182. if (ctx->prealloc_split_k != nullptr) {
  9183. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  9184. }
  9185. ctx->prealloc_split_k = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne * split_k, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9186. }
  9187. }
  9188. if (ctx->device->need_compiles) {
  9189. ggml_vk_load_shaders(ctx->device);
  9190. }
  9191. ggml_pipeline_allocate_descriptor_sets(ctx);
  9192. vk_buffer d_X = ggml_vk_create_buffer_check(ctx->device, sizeof(X_TYPE) * x_ne, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9193. vk_buffer d_Y = ggml_vk_create_buffer_check(ctx->device, sizeof(Y_TYPE) * y_ne, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9194. vk_buffer d_D = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9195. X_TYPE* x = (X_TYPE *) malloc(sizeof(X_TYPE) * x_ne);
  9196. Y_TYPE* y = (Y_TYPE *) malloc(sizeof(Y_TYPE) * y_ne);
  9197. float* d = (float *) malloc(sizeof(float) * d_ne);
  9198. for (size_t i = 0; i < x_ne; i++) {
  9199. if (std::is_same<float, X_TYPE>()) {
  9200. x[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  9201. // x[i] = 1.0f;
  9202. // x[i] = i + 1;
  9203. // x[i] = (i % k == i / k) ? 1.0f : 0.0f;
  9204. } else if (std::is_same<ggml_fp16_t, X_TYPE>()) {
  9205. x[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f);
  9206. // x[i] = ggml_fp32_to_fp16(1.0f);
  9207. // x[i] = ggml_fp32_to_fp16(i + 1);
  9208. // x[i] = ggml_fp32_to_fp16((i % k == i / k) ? 1.0f : 0.0f);
  9209. } else {
  9210. GGML_ABORT("fatal error");
  9211. }
  9212. }
  9213. for (size_t i = 0; i < y_ne; i++) {
  9214. if (std::is_same<float, Y_TYPE>()) {
  9215. y[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  9216. // y[i] = (i % k == i / k) ? 1.0f : 0.0f;
  9217. // y[i] = i + 1;
  9218. } else if (std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9219. y[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f);
  9220. // y[i] = ggml_fp32_to_fp16((i % k == i / k) ? 1.0f : 0.0f);
  9221. // y[i] = ggml_fp32_to_fp16(i + 1);
  9222. } else {
  9223. GGML_ABORT("fatal error");
  9224. }
  9225. }
  9226. ggml_vk_buffer_write(d_X, 0, x, sizeof(X_TYPE) * k * m * batch);
  9227. ggml_vk_buffer_write(d_Y, 0, y, sizeof(Y_TYPE) * k * n * batch);
  9228. vk_context subctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  9229. ggml_vk_ctx_begin(ctx->device, subctx);
  9230. for (size_t i = 0; i < num_it; i++) {
  9231. ggml_vk_matmul(
  9232. ctx, subctx, p, ggml_vk_subbuffer(ctx, d_X), ggml_vk_subbuffer(ctx, d_Y), ggml_vk_subbuffer(ctx, d_D), ggml_vk_subbuffer(ctx, ctx->prealloc_split_k),
  9233. m, n, k,
  9234. k, k, m, k*m, k*n, m*n,
  9235. split_k, batch, batch, batch, 1, 1, n
  9236. );
  9237. }
  9238. ggml_vk_ctx_end(subctx);
  9239. auto begin = std::chrono::high_resolution_clock::now();
  9240. ggml_vk_submit(subctx, ctx->fence);
  9241. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_matmul waitForFences");
  9242. ctx->device->device.resetFences({ ctx->fence });
  9243. ggml_vk_queue_command_pools_cleanup(ctx->device);
  9244. auto end = std::chrono::high_resolution_clock::now();
  9245. double time = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  9246. // copy dst to host
  9247. ggml_vk_buffer_read(d_D, 0, d, sizeof(float) * d_ne);
  9248. float * d_chk = (float *) malloc(sizeof(float) * d_ne);
  9249. ggml_init_params iparams = {
  9250. /*.mem_size =*/ 1024*1024*1024,
  9251. /*.mem_buffer =*/ NULL,
  9252. /*.no_alloc =*/ true,
  9253. };
  9254. ggml_context * ggml_ctx = ggml_init(iparams);
  9255. ggml_type src0_type;
  9256. ggml_type src1_type;
  9257. if (std::is_same<float, X_TYPE>()) {
  9258. src0_type = GGML_TYPE_F32;
  9259. } else if (std::is_same<ggml_fp16_t, X_TYPE>()) {
  9260. src0_type = GGML_TYPE_F16;
  9261. } else {
  9262. GGML_ABORT("fatal error");
  9263. }
  9264. if (std::is_same<float, Y_TYPE>()) {
  9265. src1_type = GGML_TYPE_F32;
  9266. } else if (std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9267. src1_type = GGML_TYPE_F16;
  9268. } else {
  9269. GGML_ABORT("fatal error");
  9270. }
  9271. ggml_tensor * src0_ggml = ggml_new_tensor_3d(ggml_ctx, src0_type, k, m, batch);
  9272. ggml_tensor * src1_ggml = ggml_new_tensor_3d(ggml_ctx, src1_type, k, n, batch);
  9273. ggml_tensor * tensor_ggml = ggml_mul_mat(ggml_ctx, src0_ggml, src1_ggml);
  9274. src0_ggml->data = x;
  9275. src1_ggml->data = y;
  9276. tensor_ggml->data = d_chk;
  9277. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  9278. ggml_build_forward_expand(cgraph, tensor_ggml);
  9279. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 1);
  9280. ggml_free(ggml_ctx);
  9281. double avg_err = 0.0;
  9282. int first_err_n = -1;
  9283. int first_err_m = -1;
  9284. int first_err_b = -1;
  9285. for (size_t i = 0; i < m*n*batch; i++) {
  9286. double err = std::fabs(d[i] - d_chk[i]);
  9287. avg_err += err;
  9288. if ((err > 0.05f || std::isnan(err)) && first_err_n == -1) {
  9289. first_err_b = i / (m * n);
  9290. first_err_n = (i % (m * n)) / m;
  9291. first_err_m = (i % (m * n)) % m;
  9292. }
  9293. }
  9294. avg_err /= m * n;
  9295. double tflops = 2.0*m*n*k*batch*num_it / (time / 1000.0) / (1000.0*1000.0*1000.0*1000.0);
  9296. std::cerr << "TEST " << shname << " m=" << m << " n=" << n << " k=" << k << " batch=" << batch << " split_k=" << split_k << " matmul " << time / num_it << "ms " << tflops << " TFLOPS avg_err=" << avg_err << std::endl;
  9297. if (avg_err > 0.1 || std::isnan(avg_err)) {
  9298. std::cerr << "m = " << first_err_m << " n = " << first_err_n << " b = " << first_err_b << std::endl;
  9299. std::cerr << "Actual result: " << std::endl << std::endl;
  9300. ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9301. std::cerr << "Expected result: " << std::endl << std::endl;
  9302. ggml_vk_print_matrix_area(d_chk, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9303. if (split_k > 1) {
  9304. float * split_k_buf = (float *) malloc(sizeof(float) * d_ne * split_k);
  9305. ggml_vk_buffer_read(ctx->prealloc_split_k, 0, split_k_buf, sizeof(float) * d_ne * split_k);
  9306. std::cerr << "d_buf0: " << std::endl << std::endl;
  9307. ggml_vk_print_matrix_area(split_k_buf, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9308. std::cerr << "d_buf1: " << std::endl << std::endl;
  9309. ggml_vk_print_matrix_area(split_k_buf + d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9310. std::cerr << "d_buf2: " << std::endl << std::endl;
  9311. ggml_vk_print_matrix_area(split_k_buf + 2 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9312. std::cerr << "d_buf3: " << std::endl << std::endl;
  9313. ggml_vk_print_matrix_area(split_k_buf + 3 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9314. free(split_k_buf);
  9315. }
  9316. }
  9317. free(d_chk);
  9318. ggml_vk_command_pool_cleanup(ctx->device, ctx->compute_cmd_pool);
  9319. ggml_vk_command_pool_cleanup(ctx->device, ctx->transfer_cmd_pool);
  9320. ggml_vk_destroy_buffer(d_X);
  9321. ggml_vk_destroy_buffer(d_Y);
  9322. ggml_vk_destroy_buffer(d_D);
  9323. free(x);
  9324. free(y);
  9325. free(d);
  9326. }
  9327. static void ggml_vk_print_tensor_area(const ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  9328. if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16) {
  9329. return;
  9330. }
  9331. i0 = std::max(i0, 5);
  9332. i1 = std::max(i1, 5);
  9333. i2 = std::max(i2, 0);
  9334. i3 = std::max(i3, 0);
  9335. fprintf(stderr, " ");
  9336. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  9337. fprintf(stderr, "%7d ", idx1);
  9338. }
  9339. fprintf(stderr, "\n");
  9340. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  9341. fprintf(stderr, "%7d: ", idx0);
  9342. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  9343. if (idx0 >= 0 && idx0 < tensor->ne[0] && idx1 >= 0 && idx1 < tensor->ne[1] && i2 >= 0 && i2 < tensor->ne[2] && i3 >= 0 && i3 < tensor->ne[3]) {
  9344. float val;
  9345. if (tensor->type == GGML_TYPE_F32) {
  9346. val = *(float *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  9347. } else if (tensor->type == GGML_TYPE_F16) {
  9348. val = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]));
  9349. } else {
  9350. GGML_ABORT("fatal error");
  9351. }
  9352. fprintf(stderr, "% 7.2f ", val);
  9353. } else {
  9354. fprintf(stderr, " ");
  9355. }
  9356. }
  9357. fprintf(stderr, "\n");
  9358. }
  9359. }
  9360. static void ggml_vk_quantize_data(const float * from, void * to, size_t ne, ggml_type quant) {
  9361. ggml_quantize_chunk(quant, from, to, 0, 1, ne, nullptr);
  9362. }
  9363. static void ggml_vk_dequantize_data(const void * from, float * to, size_t ne, ggml_type quant) {
  9364. if (quant == GGML_TYPE_F32) {
  9365. memcpy(to, from, sizeof(float) * ne);
  9366. return;
  9367. }
  9368. const auto * tt = ggml_get_type_traits(quant);
  9369. ggml_to_float_t dequant_fn = tt->to_float;
  9370. dequant_fn(from, to, ne);
  9371. }
  9372. static void ggml_vk_test_dequant(ggml_backend_vk_context * ctx, size_t ne, ggml_type quant) {
  9373. VK_LOG_DEBUG("ggml_vk_test_dequant(" << ne << ")");
  9374. const size_t x_sz = sizeof(float) * ne;
  9375. const size_t x_sz_f16 = sizeof(ggml_fp16_t) * ne;
  9376. const size_t qx_sz = ne * ggml_type_size(quant)/ggml_blck_size(quant);
  9377. float * x = (float *) malloc(x_sz);
  9378. void * qx = malloc(qx_sz);
  9379. vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx->device, qx_sz, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9380. vk_buffer x_buf = ggml_vk_create_buffer_check(ctx->device, x_sz_f16, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9381. float * x_ref = (float *) malloc(x_sz);
  9382. ggml_fp16_t * x_chk = (ggml_fp16_t *) malloc(x_sz_f16);
  9383. for (size_t i = 0; i < ne; i++) {
  9384. x[i] = rand() / (float)RAND_MAX;
  9385. }
  9386. vk_pipeline p = ggml_vk_get_to_fp16(ctx, quant);
  9387. ggml_vk_quantize_data(x, qx, ne, quant);
  9388. ggml_vk_dequantize_data(qx, x_ref, ne, quant);
  9389. ggml_pipeline_request_descriptor_sets(ctx, p, 1);
  9390. if (ctx->device->need_compiles) {
  9391. ggml_vk_load_shaders(ctx->device);
  9392. }
  9393. ggml_pipeline_allocate_descriptor_sets(ctx);
  9394. ggml_vk_buffer_write(qx_buf, 0, qx, qx_sz);
  9395. vk_context subctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  9396. ggml_vk_ctx_begin(ctx->device, subctx);
  9397. const std::vector<uint32_t> pc = { 1, (uint32_t)ne, (uint32_t)ne, (uint32_t)ne, (uint32_t)ne };
  9398. ggml_vk_dispatch_pipeline(ctx, subctx, p, { vk_subbuffer{ qx_buf, 0, qx_sz }, vk_subbuffer{ x_buf, 0, x_sz_f16 } }, pc, { (uint32_t)ne, 1, 1});
  9399. ggml_vk_ctx_end(subctx);
  9400. auto begin = std::chrono::high_resolution_clock::now();
  9401. ggml_vk_submit(subctx, ctx->fence);
  9402. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences");
  9403. ctx->device->device.resetFences({ ctx->fence });
  9404. ggml_vk_queue_command_pools_cleanup(ctx->device);
  9405. auto end = std::chrono::high_resolution_clock::now();
  9406. double ms_dequant = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  9407. ggml_vk_buffer_read(x_buf, 0, x_chk, x_sz_f16);
  9408. int first_err = -1;
  9409. double avg_err = 0.0;
  9410. for (size_t i = 0; i < ne; i++) {
  9411. double error = std::fabs(x_ref[i] - ggml_fp16_to_fp32(x_chk[i]));
  9412. avg_err += error;
  9413. if (first_err < 0 && error > 0.05) {
  9414. first_err = i;
  9415. }
  9416. }
  9417. avg_err /= ne;
  9418. std::cerr << "TEST DEQUANT " << ggml_type_name(quant) << " time=" << ms_dequant << "ms avg_err=" << avg_err << std::endl;
  9419. if (avg_err > 0.1) {
  9420. std::cerr << "first_error = " << first_err << std::endl;
  9421. std::cerr << "Actual result: " << std::endl << std::endl;
  9422. for (int i = std::max(0, first_err - 5); i < std::min((int)ne, first_err + 5); i++) {
  9423. std::cerr << ggml_fp16_to_fp32(x_chk[i]) << ", ";
  9424. }
  9425. std::cerr << std::endl << "Expected result: " << std::endl << std::endl;
  9426. for (int i = std::max(0, first_err - 5); i < std::min((int)ne, first_err + 5); i++) {
  9427. std::cerr << x_ref[i] << ", ";
  9428. }
  9429. std::cerr << std::endl;
  9430. }
  9431. ggml_vk_destroy_buffer(x_buf);
  9432. ggml_vk_destroy_buffer(qx_buf);
  9433. free(x);
  9434. free(qx);
  9435. free(x_ref);
  9436. free(x_chk);
  9437. }
  9438. // This does not work without ggml q8_1 quantization support
  9439. //
  9440. // typedef uint16_t ggml_half;
  9441. // typedef uint32_t ggml_half2;
  9442. //
  9443. // #define QK8_1 32
  9444. // typedef struct {
  9445. // union {
  9446. // struct {
  9447. // ggml_half d; // delta
  9448. // ggml_half s; // d * sum(qs[i])
  9449. // } GGML_COMMON_AGGR_S;
  9450. // ggml_half2 ds;
  9451. // } GGML_COMMON_AGGR_U;
  9452. // int8_t qs[QK8_1]; // quants
  9453. // } block_q8_1;
  9454. //
  9455. // static void ggml_vk_test_quantize(ggml_backend_vk_context * ctx, size_t ne, ggml_type quant) {
  9456. // VK_LOG_DEBUG("ggml_vk_test_quantize(" << ne << ")");
  9457. // GGML_ASSERT(quant == GGML_TYPE_Q8_1);
  9458. //
  9459. // const size_t x_sz = sizeof(float) * ne;
  9460. // const size_t qx_sz = ne * ggml_type_size(quant)/ggml_blck_size(quant);
  9461. // float * x = (float *) malloc(x_sz);
  9462. // block_q8_1 * qx = (block_q8_1 *)malloc(qx_sz);
  9463. // block_q8_1 * qx_res = (block_q8_1 *)malloc(qx_sz);
  9464. // vk_buffer x_buf = ggml_vk_create_buffer_check(ctx->device, x_sz, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9465. // vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx->device, qx_sz, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9466. //
  9467. // for (size_t i = 0; i < ne; i++) {
  9468. // x[i] = rand() / (float)RAND_MAX;
  9469. // }
  9470. //
  9471. // vk_pipeline p = ggml_vk_get_quantize_pipeline(ctx, quant);
  9472. //
  9473. // ggml_pipeline_request_descriptor_sets(ctx, p, 1);
  9474. //
  9475. // if (ctx->device->need_compiles) {
  9476. // ggml_vk_load_shaders(ctx->device);
  9477. // }
  9478. //
  9479. // ggml_pipeline_allocate_descriptor_sets(ctx);
  9480. //
  9481. // ggml_vk_buffer_write(x_buf, 0, x, x_sz);
  9482. //
  9483. // vk_context subctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  9484. // ggml_vk_ctx_begin(ctx->device, subctx);
  9485. // ggml_vk_quantize_q8_1(ctx, subctx, ggml_vk_subbuffer(ctx, x_buf), ggml_vk_subbuffer(ctx, qx_buf), ne);
  9486. // ggml_vk_ctx_end(subctx);
  9487. //
  9488. // auto begin = std::chrono::high_resolution_clock::now();
  9489. //
  9490. // ggml_vk_submit(subctx, ctx->fence);
  9491. // VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_quantize waitForFences");
  9492. // ctx->device->device.resetFences({ ctx->fence });
  9493. // ggml_vk_queue_command_pools_cleanup(ctx->device);
  9494. //
  9495. // auto end = std::chrono::high_resolution_clock::now();
  9496. //
  9497. // double ms_quant = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  9498. // ggml_vk_buffer_read(qx_buf, 0, qx, qx_sz);
  9499. //
  9500. // ggml_vk_quantize_data(x, qx_res, ne, quant);
  9501. //
  9502. // int first_err = -1;
  9503. //
  9504. // for (size_t i = 0; i < ne / 32; i++) {
  9505. // double error = std::fabs(ggml_fp16_to_fp32(qx_res[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d) - ggml_fp16_to_fp32(qx[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d));
  9506. //
  9507. // if (first_err < 0 && error > 0.1) {
  9508. // first_err = i;
  9509. // }
  9510. //
  9511. // error = std::fabs(ggml_fp16_to_fp32(qx_res[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.s) - ggml_fp16_to_fp32(qx[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.s));
  9512. //
  9513. // if (first_err < 0 && error > 0.1) {
  9514. // first_err = i;
  9515. // }
  9516. //
  9517. // for (size_t j = 0; j < 32; j++) {
  9518. // uint64_t error = std::abs(qx_res[i].qs[j] - qx[i].qs[j]);
  9519. //
  9520. // if (first_err < 0 && error > 1) {
  9521. // first_err = i;
  9522. // }
  9523. // }
  9524. // }
  9525. //
  9526. // std::cerr << "TEST QUANTIZE " << ggml_type_name(quant) << " time=" << ms_quant << "ms " << (first_err == -1 ? "CORRECT" : "INCORRECT") << std::endl;
  9527. //
  9528. // if (first_err != -1) {
  9529. // std::cerr << "first_error = " << first_err << std::endl;
  9530. // std::cerr << "Actual result: " << std::endl << std::endl;
  9531. // std::cout << "d=" << ggml_fp16_to_fp32(qx[first_err].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d) << " s=" << ggml_fp16_to_fp32(qx[first_err].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.s) << " ";
  9532. // for (size_t j = 0; j < 32; j++) {
  9533. // std::cout << " qs" << j << "=" << (uint32_t)qx[first_err].qs[j] << " ";
  9534. // }
  9535. // std::cerr << std::endl << std::endl << "Expected result: " << std::endl << std::endl;
  9536. // std::cout << "d=" << ggml_fp16_to_fp32(qx_res[first_err].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d) << " s=" << ggml_fp16_to_fp32(qx_res[first_err].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.s) << " ";
  9537. // for (size_t j = 0; j < 32; j++) {
  9538. // std::cout << " qs" << j << "=" << (uint32_t)qx_res[first_err].qs[j] << " ";
  9539. // }
  9540. // std::cerr << std::endl;
  9541. // }
  9542. //
  9543. // ggml_vk_destroy_buffer(x_buf);
  9544. // ggml_vk_destroy_buffer(qx_buf);
  9545. //
  9546. // free(x);
  9547. // free(qx);
  9548. // free(qx_res);
  9549. // }
  9550. static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, size_t n, size_t k, size_t batch, size_t num_it, size_t split_k, size_t shader_size, ggml_type quant, bool mmq = false) {
  9551. VK_LOG_DEBUG("ggml_vk_test_dequant_matmul(" << m << ", " << n << ", " << k << ", " << batch << ", " << num_it << ", " << split_k << ", " << ggml_type_name(quant) << ")");
  9552. const size_t x_ne = m * k * batch;
  9553. const size_t y_ne = k * n * batch;
  9554. const size_t d_ne = m * n * batch;
  9555. vk_matmul_pipeline2 * pipelines;
  9556. if (mmq) {
  9557. pipelines = ctx->device->pipeline_dequant_mul_mat_mat_q8_1;
  9558. } else {
  9559. pipelines = ctx->device->pipeline_dequant_mul_mat_mat;
  9560. }
  9561. const bool fp16acc = ctx->device->fp16;
  9562. vk_pipeline p;
  9563. std::string shname;
  9564. if (shader_size == 0) {
  9565. p = fp16acc ? pipelines[quant].f16acc->a_s : pipelines[quant].f32acc->a_s;
  9566. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_S";
  9567. } else if (shader_size == 1) {
  9568. p = fp16acc ? pipelines[quant].f16acc->a_m : pipelines[quant].f32acc->a_m;
  9569. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_M";
  9570. } else if (shader_size == 2) {
  9571. p = fp16acc ? pipelines[quant].f16acc->a_l : pipelines[quant].f32acc->a_l;
  9572. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_L";
  9573. } else {
  9574. GGML_ASSERT(0);
  9575. }
  9576. const size_t kpad = mmq ? 0 : ggml_vk_align_size(k, p->align);
  9577. if (mmq || k != kpad) {
  9578. if (shader_size == 0) {
  9579. p = fp16acc ? pipelines[quant].f16acc->s : pipelines[quant].f32acc->s;
  9580. shname = std::string(ggml_type_name(quant)) + "_S";
  9581. } else if (shader_size == 1) {
  9582. p = fp16acc ? pipelines[quant].f16acc->m : pipelines[quant].f32acc->m;
  9583. shname = std::string(ggml_type_name(quant)) + "_M";
  9584. } else if (shader_size == 2) {
  9585. p = fp16acc ? pipelines[quant].f16acc->l : pipelines[quant].f32acc->l;
  9586. shname = std::string(ggml_type_name(quant)) + "_L";
  9587. } else {
  9588. GGML_ASSERT(0);
  9589. }
  9590. }
  9591. if (p == nullptr) {
  9592. std::cerr << "error: no pipeline for ggml_vk_test_dequant_matmul " << ggml_type_name(quant) << std::endl;
  9593. return;
  9594. }
  9595. const size_t x_sz = sizeof(float) * x_ne;
  9596. const size_t y_sz = sizeof(float) * y_ne;
  9597. const size_t qx_sz = x_ne * ggml_type_size(quant)/ggml_blck_size(quant);
  9598. const size_t qy_sz = mmq ? y_ne * ggml_type_size(GGML_TYPE_Q8_1)/ggml_blck_size(GGML_TYPE_Q8_1) : y_sz;
  9599. const size_t d_sz = sizeof(float) * d_ne;
  9600. float * x = (float *) malloc(x_sz);
  9601. float * y = (float *) malloc(y_sz);
  9602. void * qx = malloc(qx_sz);
  9603. vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx->device, qx_sz, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9604. vk_buffer y_buf = ggml_vk_create_buffer_check(ctx->device, y_sz, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9605. vk_buffer qy_buf = ggml_vk_create_buffer_check(ctx->device, qy_sz, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9606. vk_buffer d_buf = ggml_vk_create_buffer_check(ctx->device, d_sz, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9607. float * d = (float *) malloc(d_sz);
  9608. float * d_chk = (float *) malloc(d_sz);
  9609. for (size_t i = 0; i < x_ne; i++) {
  9610. x[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  9611. // x[i] = (i % k == i / k) ? 1.0f : 0.0f;
  9612. // x[i] = i % k;
  9613. }
  9614. ggml_vk_quantize_data(x, qx, x_ne, quant);
  9615. for (size_t i = 0; i < y_ne; i++) {
  9616. y[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  9617. // y[i] = (i % k == i / k) ? 1.0f : 0.0f;
  9618. // y[i] = i % k;
  9619. }
  9620. ggml_pipeline_request_descriptor_sets(ctx, p, num_it);
  9621. if (split_k > 1) {
  9622. ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_matmul_split_k_reduce, num_it);
  9623. if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) {
  9624. // Resize buffer
  9625. if (ctx->prealloc_split_k != nullptr) {
  9626. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  9627. }
  9628. ctx->prealloc_split_k = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne * split_k, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9629. }
  9630. }
  9631. if (mmq) {
  9632. ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_quantize_q8_1, num_it);
  9633. }
  9634. if (ctx->device->need_compiles) {
  9635. ggml_vk_load_shaders(ctx->device);
  9636. }
  9637. ggml_pipeline_allocate_descriptor_sets(ctx);
  9638. ggml_vk_buffer_write(qx_buf, 0, qx, qx_sz);
  9639. ggml_vk_buffer_write(y_buf, 0, y, y_sz);
  9640. vk_context subctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  9641. ggml_vk_ctx_begin(ctx->device, subctx);
  9642. if (mmq) {
  9643. for (size_t i = 0; i < num_it; i++) {
  9644. ggml_vk_quantize_q8_1(ctx, subctx, { y_buf, 0, y_sz }, { qy_buf, 0, qy_sz }, y_ne);
  9645. ggml_vk_matmul(
  9646. ctx, subctx, p, { qx_buf, 0, qx_sz }, { qy_buf, 0, qy_sz }, { d_buf, 0, d_sz }, { ctx->prealloc_split_k, 0, ctx->prealloc_size_split_k },
  9647. m, n, k,
  9648. k, k, m, k*m, k*n, m*n,
  9649. split_k, batch, batch, batch, 1, 1, n
  9650. );
  9651. }
  9652. } else {
  9653. for (size_t i = 0; i < num_it; i++) {
  9654. ggml_vk_matmul(
  9655. ctx, subctx, p, { qx_buf, 0, qx_sz }, { y_buf, 0, y_sz }, { d_buf, 0, d_sz }, { ctx->prealloc_split_k, 0, ctx->prealloc_size_split_k },
  9656. m, n, k,
  9657. k, k, m, k*m, k*n, m*n,
  9658. split_k, batch, batch, batch, 1, 1, n
  9659. );
  9660. }
  9661. }
  9662. ggml_vk_ctx_end(subctx);
  9663. auto begin = std::chrono::high_resolution_clock::now();
  9664. ggml_vk_submit(subctx, ctx->fence);
  9665. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences");
  9666. ctx->device->device.resetFences({ ctx->fence });
  9667. ggml_vk_queue_command_pools_cleanup(ctx->device);
  9668. auto end = std::chrono::high_resolution_clock::now();
  9669. double time_ms = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  9670. ggml_vk_buffer_read(d_buf, 0, d, d_sz);
  9671. ggml_init_params iparams = {
  9672. /*.mem_size =*/ 1024*1024*1024,
  9673. /*.mem_buffer =*/ NULL,
  9674. /*.no_alloc =*/ true,
  9675. };
  9676. ggml_context * ggml_ctx = ggml_init(iparams);
  9677. ggml_tensor * src0_ggml = ggml_new_tensor_3d(ggml_ctx, quant, k, m, batch);
  9678. ggml_tensor * src1_ggml = ggml_new_tensor_3d(ggml_ctx, GGML_TYPE_F32, k, n, batch);
  9679. ggml_tensor * tensor_ggml = ggml_mul_mat(ggml_ctx, src0_ggml, src1_ggml);
  9680. src0_ggml->data = qx;
  9681. src1_ggml->data = y;
  9682. tensor_ggml->data = d_chk;
  9683. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  9684. ggml_build_forward_expand(cgraph, tensor_ggml);
  9685. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 1);
  9686. ggml_free(ggml_ctx);
  9687. double avg_err = 0.0;
  9688. int first_err_n = -1;
  9689. int first_err_m = -1;
  9690. int first_err_b = -1;
  9691. for (size_t i = 0; i < m*n*batch; i++) {
  9692. double err = std::fabs(d[i] - d_chk[i]);
  9693. avg_err += err;
  9694. if ((err > 0.05f || std::isnan(err)) && first_err_n == -1) {
  9695. first_err_b = i / (m * n);
  9696. first_err_n = (i % (m * n)) / m;
  9697. first_err_m = (i % (m * n)) % m;
  9698. }
  9699. }
  9700. avg_err /= m * n;
  9701. double tflops = 2.0*m*n*k*batch*num_it / (time_ms / 1000.0) / (1000.0*1000.0*1000.0*1000.0);
  9702. std::cerr << "TEST dequant matmul " << shname;
  9703. if (mmq) {
  9704. std::cerr << " mmq";
  9705. }
  9706. std::cerr << " m=" << m << " n=" << n << " k=" << k << " batch=" << batch << " split_k=" << split_k << " matmul " << time_ms / num_it << "ms " << tflops << " TFLOPS avg_err=" << avg_err << std::endl;
  9707. if (avg_err > 0.01 || std::isnan(avg_err)) {
  9708. std::cerr << "m = " << first_err_m << " n = " << first_err_n << " b = " << first_err_b << std::endl;
  9709. std::cerr << "Actual result: " << std::endl << std::endl;
  9710. ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9711. std::cerr << std::endl;
  9712. std::cerr << "Expected result: " << std::endl << std::endl;
  9713. ggml_vk_print_matrix_area(d_chk, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9714. std::cerr << "src0: " << std::endl << std::endl;
  9715. ggml_vk_print_matrix_area(x, GGML_TYPE_F32, k, m, first_err_m, first_err_n, first_err_b);
  9716. std::cerr << std::endl;
  9717. std::cerr << "src1: " << std::endl << std::endl;
  9718. ggml_vk_print_matrix_area(y, GGML_TYPE_F32, k, n, first_err_m, first_err_n, first_err_b);
  9719. if (split_k > 1) {
  9720. float * split_k_buf = (float *) malloc(sizeof(float) * d_ne * split_k);
  9721. ggml_vk_buffer_read(ctx->prealloc_split_k, 0, split_k_buf, sizeof(float) * d_ne * split_k);
  9722. std::cerr << "d_buf0: " << std::endl << std::endl;
  9723. ggml_vk_print_matrix_area(split_k_buf, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9724. std::cerr << "d_buf1: " << std::endl << std::endl;
  9725. ggml_vk_print_matrix_area(split_k_buf + d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9726. std::cerr << "d_buf2: " << std::endl << std::endl;
  9727. ggml_vk_print_matrix_area(split_k_buf + 2 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9728. std::cerr << "d_buf3: " << std::endl << std::endl;
  9729. ggml_vk_print_matrix_area(split_k_buf + 3 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9730. free(split_k_buf);
  9731. }
  9732. }
  9733. ggml_vk_destroy_buffer(qx_buf);
  9734. ggml_vk_destroy_buffer(y_buf);
  9735. ggml_vk_destroy_buffer(qy_buf);
  9736. ggml_vk_destroy_buffer(d_buf);
  9737. free(x);
  9738. free(qx);
  9739. free(y);
  9740. free(d);
  9741. free(d_chk);
  9742. }
  9743. #endif
  9744. static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx) {
  9745. #if defined(GGML_VULKAN_RUN_TESTS)
  9746. const std::vector<size_t> vals {
  9747. 512, 512, 128,
  9748. 128, 512, 512,
  9749. 4096, 512, 4096,
  9750. 11008, 512, 4096,
  9751. 4096, 512, 11008,
  9752. 32000, 512, 4096,
  9753. 8, 8, 8,
  9754. 100, 46, 576,
  9755. 623, 111, 128,
  9756. 100, 46, 558,
  9757. 512, 1, 256,
  9758. 128, 110, 622,
  9759. 511, 511, 127,
  9760. 511, 511, 7,
  9761. 511, 511, 17,
  9762. 49, 49, 128,
  9763. 128, 49, 49,
  9764. 4096, 49, 4096,
  9765. };
  9766. const size_t num_it = 100;
  9767. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 0, GGML_TYPE_Q4_0);
  9768. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 1, GGML_TYPE_Q4_0);
  9769. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 2, GGML_TYPE_Q4_0);
  9770. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 0, GGML_TYPE_Q4_0, true);
  9771. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 1, GGML_TYPE_Q4_0, true);
  9772. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 2, GGML_TYPE_Q4_0, true);
  9773. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 0, GGML_TYPE_Q8_0);
  9774. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 1, GGML_TYPE_Q8_0);
  9775. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 2, GGML_TYPE_Q8_0);
  9776. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 0, GGML_TYPE_Q8_0, true);
  9777. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 1, GGML_TYPE_Q8_0, true);
  9778. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 2, GGML_TYPE_Q8_0, true);
  9779. abort();
  9780. for (size_t i = 0; i < vals.size(); i += 3) {
  9781. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0);
  9782. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1);
  9783. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2);
  9784. std::cerr << '\n';
  9785. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 0);
  9786. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 1);
  9787. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 2);
  9788. std::cerr << '\n';
  9789. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0);
  9790. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1);
  9791. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2);
  9792. std::cerr << '\n' << std::endl;
  9793. if (vals[i + 2] % 32 == 0) {
  9794. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0, GGML_TYPE_Q4_0);
  9795. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1, GGML_TYPE_Q4_0);
  9796. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2, GGML_TYPE_Q4_0);
  9797. std::cerr << '\n';
  9798. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 0, GGML_TYPE_Q4_0);
  9799. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 1, GGML_TYPE_Q4_0);
  9800. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 2, GGML_TYPE_Q4_0);
  9801. std::cerr << '\n';
  9802. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0, GGML_TYPE_Q4_0);
  9803. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1, GGML_TYPE_Q4_0);
  9804. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2, GGML_TYPE_Q4_0);
  9805. std::cerr << '\n' << std::endl;
  9806. }
  9807. if (vals[i + 2] % 256 == 0) {
  9808. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0, GGML_TYPE_Q4_K);
  9809. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1, GGML_TYPE_Q4_K);
  9810. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2, GGML_TYPE_Q4_K);
  9811. std::cerr << '\n';
  9812. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 0, GGML_TYPE_Q4_K);
  9813. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 1, GGML_TYPE_Q4_K);
  9814. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 2, GGML_TYPE_Q4_K);
  9815. std::cerr << '\n';
  9816. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0, GGML_TYPE_Q4_K);
  9817. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1, GGML_TYPE_Q4_K);
  9818. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2, GGML_TYPE_Q4_K);
  9819. std::cerr << '\n' << std::endl;
  9820. }
  9821. }
  9822. GGML_ABORT("fatal error");
  9823. #endif
  9824. if (ctx->prealloc_x == nullptr || (ctx->prealloc_size_x > 0 && ctx->prealloc_x->size < ctx->prealloc_size_x)) {
  9825. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(x_size: " << ctx->prealloc_size_x << ")");
  9826. // Resize buffer
  9827. if (ctx->prealloc_x != nullptr) {
  9828. ggml_vk_destroy_buffer(ctx->prealloc_x);
  9829. }
  9830. ctx->prealloc_x = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_x);
  9831. }
  9832. if (ctx->prealloc_y == nullptr || (ctx->prealloc_size_y > 0 && ctx->prealloc_y->size < ctx->prealloc_size_y)) {
  9833. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(y_size: " << ctx->prealloc_size_y << ")");
  9834. // Resize buffer
  9835. if (ctx->prealloc_y != nullptr) {
  9836. ggml_vk_destroy_buffer(ctx->prealloc_y);
  9837. }
  9838. ctx->prealloc_y = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_y);
  9839. }
  9840. if (ctx->prealloc_split_k == nullptr || (ctx->prealloc_size_split_k > 0 && ctx->prealloc_split_k->size < ctx->prealloc_size_split_k)) {
  9841. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(split_k_size: " << ctx->prealloc_size_split_k << ")");
  9842. // Resize buffer
  9843. if (ctx->prealloc_split_k != nullptr) {
  9844. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  9845. }
  9846. ctx->prealloc_split_k = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_split_k);
  9847. }
  9848. if (ctx->prealloc_add_rms_partials == nullptr || (ctx->prealloc_size_add_rms_partials > 0 && ctx->prealloc_add_rms_partials->size < ctx->prealloc_size_add_rms_partials)) {
  9849. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(add_partials_size: " << ctx->prealloc_add_rms_partials << ")");
  9850. // Resize buffer
  9851. if (ctx->prealloc_add_rms_partials != nullptr) {
  9852. ggml_vk_destroy_buffer(ctx->prealloc_add_rms_partials);
  9853. }
  9854. ctx->prealloc_add_rms_partials = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_add_rms_partials);
  9855. }
  9856. }
  9857. static bool ggml_vk_compute_forward(ggml_backend_vk_context* ctx, ggml_cgraph * cgraph, ggml_tensor* tensor, int tensor_idx, bool use_fence, bool almost_ready);
  9858. // Returns true if node has enqueued work into the queue, false otherwise
  9859. // If submit is true the current all operations queued so far are being submitted to Vulkan to overlap cmdlist creation and GPU execution.
  9860. static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, int node_idx, ggml_tensor *node_begin, int node_idx_begin, bool dryrun, bool last_node, bool almost_ready, bool submit){
  9861. ggml_tensor * node = cgraph->nodes[node_idx];
  9862. if (ggml_is_empty(node) || !node->buffer) {
  9863. return false;
  9864. }
  9865. VK_LOG_DEBUG("ggml_vk_build_graph(" << node << ", " << ggml_op_name(node->op) << ")");
  9866. ctx->semaphore_idx = 0;
  9867. ggml_tensor * src0 = node->src[0];
  9868. ggml_tensor * src1 = node->src[1];
  9869. ggml_tensor * src2 = node->src[2];
  9870. ggml_tensor * src3 = node->src[3];
  9871. switch (node->op) {
  9872. // Return on empty ops to avoid generating a compute_ctx and setting exit_tensor
  9873. case GGML_OP_RESHAPE:
  9874. case GGML_OP_VIEW:
  9875. case GGML_OP_PERMUTE:
  9876. case GGML_OP_TRANSPOSE:
  9877. case GGML_OP_NONE:
  9878. return false;
  9879. case GGML_OP_UNARY:
  9880. switch (ggml_get_unary_op(node)) {
  9881. case GGML_UNARY_OP_EXP:
  9882. case GGML_UNARY_OP_SILU:
  9883. case GGML_UNARY_OP_GELU:
  9884. case GGML_UNARY_OP_GELU_ERF:
  9885. case GGML_UNARY_OP_GELU_QUICK:
  9886. case GGML_UNARY_OP_RELU:
  9887. case GGML_UNARY_OP_TANH:
  9888. case GGML_UNARY_OP_SIGMOID:
  9889. case GGML_UNARY_OP_HARDSIGMOID:
  9890. case GGML_UNARY_OP_HARDSWISH:
  9891. break;
  9892. default:
  9893. return false;
  9894. }
  9895. break;
  9896. case GGML_OP_GLU:
  9897. switch (ggml_get_glu_op(node)) {
  9898. case GGML_GLU_OP_GEGLU:
  9899. case GGML_GLU_OP_REGLU:
  9900. case GGML_GLU_OP_SWIGLU:
  9901. case GGML_GLU_OP_SWIGLU_OAI:
  9902. case GGML_GLU_OP_GEGLU_ERF:
  9903. case GGML_GLU_OP_GEGLU_QUICK:
  9904. break;
  9905. default:
  9906. return false;
  9907. }
  9908. break;
  9909. case GGML_OP_ADD:
  9910. {
  9911. int next_node_idx = node_idx + 1 + ctx->num_additional_fused_ops;
  9912. if (next_node_idx < cgraph->n_nodes &&
  9913. cgraph->nodes[next_node_idx]->op == GGML_OP_RMS_NORM &&
  9914. cgraph->nodes[next_node_idx]->src[0] == cgraph->nodes[next_node_idx - 1] &&
  9915. ggml_nrows(cgraph->nodes[next_node_idx]) == 1 &&
  9916. ctx->device->add_rms_fusion) {
  9917. if (dryrun) {
  9918. ctx->prealloc_size_add_rms_partials += ggml_vk_rms_partials_size(ctx, cgraph->nodes[node_idx]);
  9919. }
  9920. ctx->do_add_rms_partials = true;
  9921. }
  9922. } break;
  9923. case GGML_OP_REPEAT:
  9924. case GGML_OP_REPEAT_BACK:
  9925. case GGML_OP_GET_ROWS:
  9926. case GGML_OP_ADD_ID:
  9927. case GGML_OP_ACC:
  9928. case GGML_OP_SUB:
  9929. case GGML_OP_MUL:
  9930. case GGML_OP_DIV:
  9931. case GGML_OP_CONCAT:
  9932. case GGML_OP_UPSCALE:
  9933. case GGML_OP_SCALE:
  9934. case GGML_OP_SQR:
  9935. case GGML_OP_SQRT:
  9936. case GGML_OP_SIN:
  9937. case GGML_OP_COS:
  9938. case GGML_OP_CLAMP:
  9939. case GGML_OP_PAD:
  9940. case GGML_OP_ROLL:
  9941. case GGML_OP_CPY:
  9942. case GGML_OP_SET_ROWS:
  9943. case GGML_OP_CONT:
  9944. case GGML_OP_DUP:
  9945. case GGML_OP_SILU_BACK:
  9946. case GGML_OP_NORM:
  9947. case GGML_OP_GROUP_NORM:
  9948. case GGML_OP_RMS_NORM:
  9949. case GGML_OP_RMS_NORM_BACK:
  9950. case GGML_OP_L2_NORM:
  9951. case GGML_OP_DIAG_MASK_INF:
  9952. case GGML_OP_SOFT_MAX:
  9953. case GGML_OP_SOFT_MAX_BACK:
  9954. case GGML_OP_ROPE:
  9955. case GGML_OP_ROPE_BACK:
  9956. case GGML_OP_MUL_MAT:
  9957. case GGML_OP_MUL_MAT_ID:
  9958. case GGML_OP_ARGSORT:
  9959. case GGML_OP_SUM:
  9960. case GGML_OP_SUM_ROWS:
  9961. case GGML_OP_MEAN:
  9962. case GGML_OP_ARGMAX:
  9963. case GGML_OP_COUNT_EQUAL:
  9964. case GGML_OP_IM2COL:
  9965. case GGML_OP_IM2COL_3D:
  9966. case GGML_OP_TIMESTEP_EMBEDDING:
  9967. case GGML_OP_CONV_TRANSPOSE_1D:
  9968. case GGML_OP_POOL_2D:
  9969. case GGML_OP_CONV_2D:
  9970. case GGML_OP_CONV_TRANSPOSE_2D:
  9971. case GGML_OP_CONV_2D_DW:
  9972. case GGML_OP_RWKV_WKV6:
  9973. case GGML_OP_RWKV_WKV7:
  9974. case GGML_OP_SSM_SCAN:
  9975. case GGML_OP_SSM_CONV:
  9976. case GGML_OP_LEAKY_RELU:
  9977. case GGML_OP_FLASH_ATTN_EXT:
  9978. case GGML_OP_OPT_STEP_ADAMW:
  9979. case GGML_OP_OPT_STEP_SGD:
  9980. break;
  9981. default:
  9982. std::cerr << "ggml_vulkan: Error: Missing op: " << ggml_op_name(node->op) << std::endl;
  9983. GGML_ABORT("fatal error");
  9984. }
  9985. vk_context compute_ctx;
  9986. if (!dryrun) {
  9987. if (ctx->compute_ctx.expired()) {
  9988. compute_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  9989. ctx->compute_ctx = compute_ctx;
  9990. ggml_vk_ctx_begin(ctx->device, compute_ctx);
  9991. } else {
  9992. compute_ctx = ctx->compute_ctx.lock();
  9993. }
  9994. } else {
  9995. switch (node->op) {
  9996. case GGML_OP_REPEAT:
  9997. case GGML_OP_REPEAT_BACK:
  9998. case GGML_OP_ACC:
  9999. case GGML_OP_GET_ROWS:
  10000. case GGML_OP_ADD:
  10001. case GGML_OP_SUB:
  10002. case GGML_OP_MUL:
  10003. case GGML_OP_DIV:
  10004. case GGML_OP_CONCAT:
  10005. case GGML_OP_UPSCALE:
  10006. case GGML_OP_SCALE:
  10007. case GGML_OP_SQR:
  10008. case GGML_OP_SQRT:
  10009. case GGML_OP_SIN:
  10010. case GGML_OP_COS:
  10011. case GGML_OP_CLAMP:
  10012. case GGML_OP_PAD:
  10013. case GGML_OP_CPY:
  10014. case GGML_OP_SET_ROWS:
  10015. case GGML_OP_CONT:
  10016. case GGML_OP_DUP:
  10017. case GGML_OP_SILU_BACK:
  10018. case GGML_OP_NORM:
  10019. case GGML_OP_GROUP_NORM:
  10020. case GGML_OP_RMS_NORM:
  10021. case GGML_OP_RMS_NORM_BACK:
  10022. case GGML_OP_L2_NORM:
  10023. case GGML_OP_UNARY:
  10024. case GGML_OP_GLU:
  10025. case GGML_OP_DIAG_MASK_INF:
  10026. case GGML_OP_SOFT_MAX:
  10027. case GGML_OP_SOFT_MAX_BACK:
  10028. case GGML_OP_ROPE_BACK:
  10029. case GGML_OP_ARGSORT:
  10030. case GGML_OP_SUM:
  10031. case GGML_OP_SUM_ROWS:
  10032. case GGML_OP_MEAN:
  10033. case GGML_OP_ARGMAX:
  10034. case GGML_OP_COUNT_EQUAL:
  10035. case GGML_OP_IM2COL:
  10036. case GGML_OP_IM2COL_3D:
  10037. case GGML_OP_TIMESTEP_EMBEDDING:
  10038. case GGML_OP_CONV_TRANSPOSE_1D:
  10039. case GGML_OP_POOL_2D:
  10040. case GGML_OP_CONV_2D:
  10041. case GGML_OP_CONV_TRANSPOSE_2D:
  10042. case GGML_OP_CONV_2D_DW:
  10043. case GGML_OP_LEAKY_RELU:
  10044. case GGML_OP_OPT_STEP_SGD:
  10045. {
  10046. // These operations all go through ggml_vk_op_f32, so short-circuit and
  10047. // do the only thing needed for the dryrun.
  10048. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, node, node->op);
  10049. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  10050. if (node->op == GGML_OP_RMS_NORM) {
  10051. ctx->do_add_rms_partials = false;
  10052. }
  10053. return false;
  10054. }
  10055. default:
  10056. break;
  10057. }
  10058. }
  10059. if (!dryrun) {
  10060. // This logic detects dependencies between modes in the graph and calls ggml_vk_sync_buffers
  10061. // to synchronize them. This handles most "normal" synchronization when computing the graph, and when
  10062. // there is no auxiliary memory use, it shouldn't be necessary to call ggml_vk_sync_buffers
  10063. // outside of this logic. When a node uses one of the prealloc buffers for something like
  10064. // dequantization or split_k, additional synchronization is needed between those passes.
  10065. bool need_sync = false;
  10066. // Check whether "node" requires synchronization. The node requires synchronization if it
  10067. // overlaps in memory with another unsynchronized node and at least one of them is a write.
  10068. // Destination nodes are checked against both the written/read lists. Source nodes are only
  10069. // checked against the written list. Two nodes overlap in memory if they come from the same
  10070. // buffer and the tensor or view ranges overlap.
  10071. auto const &overlaps_unsynced = [&](const ggml_tensor *node, const std::vector<const ggml_tensor *> &unsynced_nodes) -> bool {
  10072. if (unsynced_nodes.size() == 0) {
  10073. return false;
  10074. }
  10075. auto n_base = vk_tensor_offset(node) + node->view_offs;
  10076. auto n_size = ggml_nbytes(node);
  10077. ggml_backend_vk_buffer_context * a_buf_ctx = (ggml_backend_vk_buffer_context *)node->buffer->context;
  10078. vk_buffer a_buf = a_buf_ctx->dev_buffer;
  10079. for (auto &other : unsynced_nodes) {
  10080. ggml_backend_vk_buffer_context * o_buf_ctx = (ggml_backend_vk_buffer_context *)other->buffer->context;
  10081. vk_buffer o_buf = o_buf_ctx->dev_buffer;
  10082. if (a_buf == o_buf) {
  10083. auto o_base = vk_tensor_offset(other) + other->view_offs;
  10084. auto o_size = ggml_nbytes(other);
  10085. if ((o_base <= n_base && n_base < o_base + o_size) ||
  10086. (n_base <= o_base && o_base < n_base + n_size)) {
  10087. return true;
  10088. }
  10089. }
  10090. }
  10091. return false;
  10092. };
  10093. // For all fused ops, check if the destination node or any of the source
  10094. // nodes require synchronization.
  10095. for (int32_t i = 0; i < ctx->num_additional_fused_ops + 1 && !need_sync; ++i) {
  10096. const ggml_tensor *cur_node = cgraph->nodes[node_idx + i];
  10097. // If the node actually writes to memory, then check if it needs to sync
  10098. if (ctx->fused_ops_write_mask & (1 << i)) {
  10099. if (overlaps_unsynced(cur_node, ctx->unsynced_nodes_read) || overlaps_unsynced(cur_node, ctx->unsynced_nodes_written)) {
  10100. need_sync = true;
  10101. break;
  10102. }
  10103. }
  10104. for (uint32_t j = 0; j < GGML_MAX_SRC; ++j) {
  10105. if (!cur_node->src[j]) {
  10106. continue;
  10107. }
  10108. if (overlaps_unsynced(cur_node->src[j], ctx->unsynced_nodes_written)) {
  10109. need_sync = true;
  10110. break;
  10111. }
  10112. }
  10113. }
  10114. #define ENABLE_SYNC_LOGGING 0
  10115. if (need_sync) {
  10116. #if ENABLE_SYNC_LOGGING
  10117. std::cerr << "sync" << std::endl;
  10118. #endif
  10119. ctx->unsynced_nodes_written.clear();
  10120. ctx->unsynced_nodes_read.clear();
  10121. ggml_vk_sync_buffers(ctx, compute_ctx);
  10122. }
  10123. // Add all fused nodes to the unsynchronized lists.
  10124. for (int32_t i = 0; i < ctx->num_additional_fused_ops + 1; ++i) {
  10125. const ggml_tensor *cur_node = cgraph->nodes[node_idx + i];
  10126. // Multiple outputs could be written, e.g. in topk_moe. Add them all to the list.
  10127. if (ctx->fused_ops_write_mask & (1 << i)) {
  10128. ctx->unsynced_nodes_written.push_back(cur_node);
  10129. }
  10130. for (uint32_t j = 0; j < GGML_MAX_SRC; ++j) {
  10131. if (!cur_node->src[j]) {
  10132. continue;
  10133. }
  10134. ctx->unsynced_nodes_read.push_back(cur_node->src[j]);
  10135. }
  10136. }
  10137. }
  10138. #if ENABLE_SYNC_LOGGING
  10139. if (!dryrun) {
  10140. for (int i = 0; i < ctx->num_additional_fused_ops + 1; ++i) {
  10141. auto *n = cgraph->nodes[node_idx + i];
  10142. std::cerr << node_idx + i << " " << ggml_op_name(n->op) << " " << n->name;
  10143. if (n->op == GGML_OP_GLU) {
  10144. std::cerr << " " << ggml_glu_op_name(ggml_get_glu_op(n)) << " " << (n->src[1] ? "split" : "single") << " ";
  10145. }
  10146. std::cerr << std::endl;
  10147. }
  10148. }
  10149. #endif
  10150. switch (node->op) {
  10151. case GGML_OP_REPEAT:
  10152. ggml_vk_repeat(ctx, compute_ctx, src0, node, dryrun);
  10153. break;
  10154. case GGML_OP_REPEAT_BACK:
  10155. ggml_vk_repeat_back(ctx, compute_ctx, src0, node, dryrun);
  10156. break;
  10157. case GGML_OP_ACC:
  10158. ggml_vk_acc(ctx, compute_ctx, src0, src1, node, dryrun);
  10159. break;
  10160. case GGML_OP_GET_ROWS:
  10161. ggml_vk_get_rows(ctx, compute_ctx, src0, src1, node, dryrun);
  10162. break;
  10163. case GGML_OP_ADD:
  10164. if (ctx->num_additional_fused_ops) {
  10165. ggml_vk_multi_add(ctx, compute_ctx, cgraph, node_idx, dryrun);
  10166. } else {
  10167. ggml_vk_add(ctx, compute_ctx, src0, src1, node, dryrun);
  10168. }
  10169. break;
  10170. case GGML_OP_SUB:
  10171. ggml_vk_sub(ctx, compute_ctx, src0, src1, node, dryrun);
  10172. break;
  10173. case GGML_OP_MUL:
  10174. ggml_vk_mul(ctx, compute_ctx, src0, src1, node, dryrun);
  10175. break;
  10176. case GGML_OP_DIV:
  10177. ggml_vk_div(ctx, compute_ctx, src0, src1, node, dryrun);
  10178. break;
  10179. case GGML_OP_ADD_ID:
  10180. ggml_vk_add_id(ctx, compute_ctx, src0, src1, src2, node, dryrun);
  10181. break;
  10182. case GGML_OP_CONCAT:
  10183. ggml_vk_concat(ctx, compute_ctx, src0, src1, node, dryrun);
  10184. break;
  10185. case GGML_OP_UPSCALE:
  10186. ggml_vk_upscale(ctx, compute_ctx, src0, node, dryrun);
  10187. break;
  10188. case GGML_OP_SCALE:
  10189. ggml_vk_scale(ctx, compute_ctx, src0, node, dryrun);
  10190. break;
  10191. case GGML_OP_SQR:
  10192. ggml_vk_sqr(ctx, compute_ctx, src0, node, dryrun);
  10193. break;
  10194. case GGML_OP_SQRT:
  10195. ggml_vk_sqrt(ctx, compute_ctx, src0, node, dryrun);
  10196. break;
  10197. case GGML_OP_SIN:
  10198. ggml_vk_sin(ctx, compute_ctx, src0, node, dryrun);
  10199. break;
  10200. case GGML_OP_COS:
  10201. ggml_vk_cos(ctx, compute_ctx, src0, node, dryrun);
  10202. break;
  10203. case GGML_OP_CLAMP:
  10204. ggml_vk_clamp(ctx, compute_ctx, src0, node, dryrun);
  10205. break;
  10206. case GGML_OP_PAD:
  10207. ggml_vk_pad(ctx, compute_ctx, src0, node, dryrun);
  10208. break;
  10209. case GGML_OP_ROLL:
  10210. ggml_vk_roll(ctx, compute_ctx, src0, node, dryrun);
  10211. break;
  10212. case GGML_OP_CPY:
  10213. case GGML_OP_CONT:
  10214. case GGML_OP_DUP:
  10215. ggml_vk_cpy(ctx, compute_ctx, src0, node, dryrun);
  10216. break;
  10217. case GGML_OP_SET_ROWS:
  10218. ggml_vk_set_rows(ctx, compute_ctx, src0, src1, node, dryrun);
  10219. break;
  10220. case GGML_OP_SILU_BACK:
  10221. ggml_vk_silu_back(ctx, compute_ctx, src0, src1, node, dryrun);
  10222. break;
  10223. case GGML_OP_NORM:
  10224. ggml_vk_norm(ctx, compute_ctx, src0, node, dryrun);
  10225. break;
  10226. case GGML_OP_GROUP_NORM:
  10227. ggml_vk_group_norm(ctx, compute_ctx, src0, node, dryrun);
  10228. break;
  10229. case GGML_OP_RMS_NORM:
  10230. if (ctx->num_additional_fused_ops > 0) {
  10231. // fused rms_norm + mul
  10232. ggml_tensor *mul = cgraph->nodes[node_idx + 1];
  10233. ggml_tensor *other_src = mul->src[0] == node ? mul->src[1] : mul->src[0];
  10234. ggml_vk_rms_norm(ctx, compute_ctx, src0, other_src, mul, (float *)node->op_params, dryrun);
  10235. } else {
  10236. ggml_vk_rms_norm(ctx, compute_ctx, src0, src0, node, (float *)node->op_params, dryrun);
  10237. }
  10238. break;
  10239. case GGML_OP_RMS_NORM_BACK:
  10240. ggml_vk_rms_norm_back(ctx, compute_ctx, src0, src1, node, dryrun);
  10241. break;
  10242. case GGML_OP_L2_NORM:
  10243. ggml_vk_l2_norm(ctx, compute_ctx, src0, node, dryrun);
  10244. break;
  10245. case GGML_OP_UNARY:
  10246. switch (ggml_get_unary_op(node)) {
  10247. case GGML_UNARY_OP_EXP:
  10248. case GGML_UNARY_OP_SILU:
  10249. case GGML_UNARY_OP_GELU:
  10250. case GGML_UNARY_OP_GELU_ERF:
  10251. case GGML_UNARY_OP_GELU_QUICK:
  10252. case GGML_UNARY_OP_RELU:
  10253. case GGML_UNARY_OP_TANH:
  10254. case GGML_UNARY_OP_SIGMOID:
  10255. case GGML_UNARY_OP_HARDSIGMOID:
  10256. case GGML_UNARY_OP_HARDSWISH:
  10257. ggml_vk_unary(ctx, compute_ctx, src0, node, dryrun);
  10258. break;
  10259. default:
  10260. return false;
  10261. }
  10262. break;
  10263. case GGML_OP_GLU:
  10264. switch (ggml_get_glu_op(node)) {
  10265. case GGML_GLU_OP_GEGLU:
  10266. case GGML_GLU_OP_REGLU:
  10267. case GGML_GLU_OP_SWIGLU:
  10268. case GGML_GLU_OP_SWIGLU_OAI:
  10269. case GGML_GLU_OP_GEGLU_ERF:
  10270. case GGML_GLU_OP_GEGLU_QUICK:
  10271. ggml_vk_glu(ctx, compute_ctx, src0, src1, node, dryrun);
  10272. break;
  10273. default:
  10274. return false;
  10275. }
  10276. break;
  10277. case GGML_OP_DIAG_MASK_INF:
  10278. ggml_vk_diag_mask_inf(ctx, compute_ctx, src0, node, dryrun);
  10279. break;
  10280. case GGML_OP_SOFT_MAX:
  10281. if (ctx->num_additional_fused_ops) {
  10282. ggml_vk_topk_moe(ctx, compute_ctx, cgraph, node_idx, dryrun);
  10283. } else {
  10284. ggml_vk_soft_max(ctx, compute_ctx, src0, src1, src2, node, dryrun);
  10285. }
  10286. break;
  10287. case GGML_OP_SOFT_MAX_BACK:
  10288. ggml_vk_soft_max_back(ctx, compute_ctx, src0, src1, node, dryrun);
  10289. break;
  10290. case GGML_OP_ROPE:
  10291. ggml_vk_rope(ctx, compute_ctx, cgraph, node_idx, false, dryrun);
  10292. break;
  10293. case GGML_OP_ROPE_BACK:
  10294. ggml_vk_rope(ctx, compute_ctx, cgraph, node_idx, true, dryrun);
  10295. break;
  10296. case GGML_OP_ARGSORT:
  10297. if (ctx->num_additional_fused_ops) {
  10298. ggml_vk_topk_moe(ctx, compute_ctx, cgraph, node_idx, dryrun);
  10299. } else {
  10300. ggml_vk_argsort(ctx, compute_ctx, src0, node, dryrun);
  10301. }
  10302. break;
  10303. case GGML_OP_SUM:
  10304. ggml_vk_sum(ctx, compute_ctx, src0, node, dryrun);
  10305. break;
  10306. case GGML_OP_SUM_ROWS:
  10307. ggml_vk_sum_rows(ctx, compute_ctx, src0, node, dryrun);
  10308. break;
  10309. case GGML_OP_MEAN:
  10310. ggml_vk_mean(ctx, compute_ctx, src0, node, dryrun);
  10311. break;
  10312. case GGML_OP_ARGMAX:
  10313. ggml_vk_argmax(ctx, compute_ctx, src0, node, dryrun);
  10314. break;
  10315. case GGML_OP_COUNT_EQUAL:
  10316. ggml_vk_count_equal(ctx, compute_ctx, src0, src1, node, dryrun);
  10317. break;
  10318. case GGML_OP_IM2COL:
  10319. ggml_vk_im2col(ctx, compute_ctx, src0, src1, node, dryrun);
  10320. break;
  10321. case GGML_OP_IM2COL_3D:
  10322. ggml_vk_im2col_3d(ctx, compute_ctx, src0, src1, node, dryrun);
  10323. break;
  10324. case GGML_OP_TIMESTEP_EMBEDDING:
  10325. ggml_vk_timestep_embedding(ctx, compute_ctx, src0, node, dryrun);
  10326. break;
  10327. case GGML_OP_CONV_TRANSPOSE_1D:
  10328. ggml_vk_conv_transpose_1d(ctx, compute_ctx, src0, src1, node, dryrun);
  10329. break;
  10330. case GGML_OP_POOL_2D:
  10331. ggml_vk_pool_2d(ctx, compute_ctx, src0, node, dryrun);
  10332. break;
  10333. case GGML_OP_CONV_2D:
  10334. ggml_vk_conv_2d(ctx, compute_ctx, src0, src1, node, dryrun);
  10335. break;
  10336. case GGML_OP_CONV_TRANSPOSE_2D:
  10337. ggml_vk_conv_transpose_2d(ctx, compute_ctx, src0, src1, node, dryrun);
  10338. break;
  10339. case GGML_OP_CONV_2D_DW:
  10340. ggml_vk_conv_2d_dw(ctx, compute_ctx, src0, src1, node, dryrun);
  10341. break;
  10342. case GGML_OP_LEAKY_RELU:
  10343. ggml_vk_leaky_relu(ctx, compute_ctx, src0, node, dryrun);
  10344. break;
  10345. case GGML_OP_MUL_MAT:
  10346. ggml_vk_mul_mat(ctx, compute_ctx, cgraph, node_idx, dryrun);
  10347. break;
  10348. case GGML_OP_MUL_MAT_ID:
  10349. ggml_vk_mul_mat_id(ctx, compute_ctx, cgraph, node_idx, dryrun);
  10350. break;
  10351. case GGML_OP_FLASH_ATTN_EXT:
  10352. ggml_vk_flash_attn(ctx, compute_ctx, src0, src1, src2, src3, node->src[4], node, dryrun);
  10353. break;
  10354. case GGML_OP_RWKV_WKV6:
  10355. ggml_vk_rwkv_wkv6(ctx, compute_ctx, node, dryrun);
  10356. break;
  10357. case GGML_OP_RWKV_WKV7:
  10358. ggml_vk_rwkv_wkv7(ctx, compute_ctx, node, dryrun);
  10359. break;
  10360. case GGML_OP_SSM_SCAN:
  10361. ggml_vk_ssm_scan(ctx, compute_ctx, node, dryrun);
  10362. break;
  10363. case GGML_OP_SSM_CONV:
  10364. ggml_vk_ssm_conv(ctx, compute_ctx, node, dryrun);
  10365. break;
  10366. case GGML_OP_OPT_STEP_ADAMW:
  10367. ggml_vk_opt_step_adamw(ctx, compute_ctx, node, dryrun);
  10368. break;
  10369. case GGML_OP_OPT_STEP_SGD:
  10370. ggml_vk_opt_step_sgd(ctx, compute_ctx, src0, src1, src2, node, dryrun);
  10371. break;
  10372. default:
  10373. return false;
  10374. }
  10375. if (dryrun) {
  10376. return false;
  10377. }
  10378. ctx->tensor_ctxs[node_idx] = compute_ctx;
  10379. #if defined(GGML_VULKAN_CHECK_RESULTS)
  10380. // Force context reset on each node so that each tensor ends up in its own context
  10381. // and can be run and compared to its CPU equivalent separately
  10382. last_node = true;
  10383. #endif
  10384. if (submit || last_node) {
  10385. ggml_vk_ctx_end(compute_ctx);
  10386. // TODO probably it'd be better to pass a exit_node flag to ggml_vk_compute_forward
  10387. if (last_node) {
  10388. compute_ctx->exit_tensor_idx = node_idx_begin;
  10389. }
  10390. else {
  10391. compute_ctx->exit_tensor_idx = -1;
  10392. }
  10393. ctx->compute_ctx.reset();
  10394. bool ok = ggml_vk_compute_forward(ctx, cgraph, node_begin, node_idx_begin, false, almost_ready);
  10395. if (!ok) {
  10396. if (node->op == GGML_OP_UNARY) {
  10397. std::cerr << __func__ << ": error: op not supported UNARY " << node->name << " (" << ggml_unary_op_name(static_cast<ggml_unary_op>(node->op_params[0])) << ")" << std::endl;
  10398. } else if (node->op == GGML_OP_GLU) {
  10399. std::cerr << __func__ << ": error: op not supported GLU " << node->name << " (" << ggml_glu_op_name(static_cast<ggml_glu_op>(node->op_params[0])) << ")" << std::endl;
  10400. } else {
  10401. std::cerr << __func__ << ": error: op not supported " << node->name << " (" << ggml_op_name(node->op) << ")" << std::endl;
  10402. }
  10403. }
  10404. }
  10405. return true;
  10406. }
  10407. static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, ggml_tensor * tensor, int tensor_idx, bool use_fence = true, bool almost_ready = false) {
  10408. GGML_UNUSED(cgraph);
  10409. ggml_backend_buffer * buf = nullptr;
  10410. switch (tensor->op) {
  10411. case GGML_OP_ADD:
  10412. case GGML_OP_ACC:
  10413. case GGML_OP_GET_ROWS:
  10414. case GGML_OP_SUB:
  10415. case GGML_OP_MUL:
  10416. case GGML_OP_DIV:
  10417. case GGML_OP_ADD_ID:
  10418. case GGML_OP_CONCAT:
  10419. case GGML_OP_UPSCALE:
  10420. case GGML_OP_SCALE:
  10421. case GGML_OP_SQR:
  10422. case GGML_OP_SQRT:
  10423. case GGML_OP_SIN:
  10424. case GGML_OP_COS:
  10425. case GGML_OP_CLAMP:
  10426. case GGML_OP_PAD:
  10427. case GGML_OP_ROLL:
  10428. case GGML_OP_CPY:
  10429. case GGML_OP_SET_ROWS:
  10430. case GGML_OP_CONT:
  10431. case GGML_OP_DUP:
  10432. case GGML_OP_SILU_BACK:
  10433. case GGML_OP_NORM:
  10434. case GGML_OP_GROUP_NORM:
  10435. case GGML_OP_RMS_NORM:
  10436. case GGML_OP_RMS_NORM_BACK:
  10437. case GGML_OP_L2_NORM:
  10438. case GGML_OP_DIAG_MASK_INF:
  10439. case GGML_OP_SOFT_MAX:
  10440. case GGML_OP_SOFT_MAX_BACK:
  10441. case GGML_OP_ROPE:
  10442. case GGML_OP_ROPE_BACK:
  10443. case GGML_OP_RESHAPE:
  10444. case GGML_OP_VIEW:
  10445. case GGML_OP_PERMUTE:
  10446. case GGML_OP_TRANSPOSE:
  10447. case GGML_OP_NONE:
  10448. case GGML_OP_ARGSORT:
  10449. case GGML_OP_SUM:
  10450. case GGML_OP_SUM_ROWS:
  10451. case GGML_OP_MEAN:
  10452. case GGML_OP_ARGMAX:
  10453. case GGML_OP_COUNT_EQUAL:
  10454. case GGML_OP_IM2COL:
  10455. case GGML_OP_IM2COL_3D:
  10456. case GGML_OP_TIMESTEP_EMBEDDING:
  10457. case GGML_OP_CONV_TRANSPOSE_1D:
  10458. case GGML_OP_POOL_2D:
  10459. case GGML_OP_CONV_2D:
  10460. case GGML_OP_CONV_TRANSPOSE_2D:
  10461. case GGML_OP_CONV_2D_DW:
  10462. case GGML_OP_RWKV_WKV6:
  10463. case GGML_OP_RWKV_WKV7:
  10464. case GGML_OP_SSM_SCAN:
  10465. case GGML_OP_SSM_CONV:
  10466. case GGML_OP_LEAKY_RELU:
  10467. case GGML_OP_REPEAT:
  10468. case GGML_OP_REPEAT_BACK:
  10469. case GGML_OP_OPT_STEP_ADAMW:
  10470. case GGML_OP_OPT_STEP_SGD:
  10471. buf = tensor->buffer;
  10472. break;
  10473. case GGML_OP_UNARY:
  10474. switch (ggml_get_unary_op(tensor)) {
  10475. case GGML_UNARY_OP_EXP:
  10476. case GGML_UNARY_OP_SILU:
  10477. case GGML_UNARY_OP_GELU:
  10478. case GGML_UNARY_OP_GELU_ERF:
  10479. case GGML_UNARY_OP_GELU_QUICK:
  10480. case GGML_UNARY_OP_RELU:
  10481. case GGML_UNARY_OP_TANH:
  10482. case GGML_UNARY_OP_SIGMOID:
  10483. case GGML_UNARY_OP_HARDSIGMOID:
  10484. case GGML_UNARY_OP_HARDSWISH:
  10485. buf = tensor->buffer;
  10486. break;
  10487. default:
  10488. return false;
  10489. }
  10490. break;
  10491. case GGML_OP_GLU:
  10492. switch (ggml_get_glu_op(tensor)) {
  10493. case GGML_GLU_OP_GEGLU:
  10494. case GGML_GLU_OP_REGLU:
  10495. case GGML_GLU_OP_SWIGLU:
  10496. case GGML_GLU_OP_SWIGLU_OAI:
  10497. case GGML_GLU_OP_GEGLU_ERF:
  10498. case GGML_GLU_OP_GEGLU_QUICK:
  10499. buf = tensor->buffer;
  10500. break;
  10501. default:
  10502. return false;
  10503. }
  10504. break;
  10505. case GGML_OP_MUL_MAT:
  10506. case GGML_OP_MUL_MAT_ID:
  10507. case GGML_OP_FLASH_ATTN_EXT:
  10508. buf = tensor->buffer;
  10509. break;
  10510. default:
  10511. return false;
  10512. }
  10513. if (buf == nullptr) {
  10514. return false;
  10515. }
  10516. VK_LOG_DEBUG("ggml_vk_compute_forward(" << tensor << ", name=" << tensor->name << ", op=" << ggml_op_name(tensor->op) << ", type=" << tensor->type << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << ", view_src=" << tensor->view_src << ", view_offs=" << tensor->view_offs << ")");
  10517. vk_context subctx = ctx->tensor_ctxs[tensor_idx].lock();
  10518. // always wait for the GPU work to be done for the last submit
  10519. if (tensor_idx == subctx->exit_tensor_idx) {
  10520. use_fence = true;
  10521. }
  10522. // Only run if ctx hasn't been submitted yet
  10523. if (!subctx->seqs.empty()) {
  10524. #ifdef GGML_VULKAN_CHECK_RESULTS
  10525. ggml_vk_check_results_0(ctx, cgraph, tensor_idx);
  10526. use_fence = true;
  10527. #endif
  10528. // Do staging buffer copies
  10529. for (auto& cpy : subctx->in_memcpys) {
  10530. memcpy(cpy.dst, cpy.src, cpy.n);
  10531. }
  10532. for (auto& mset : subctx->memsets) {
  10533. memset(mset.dst, mset.val, mset.n);
  10534. }
  10535. if (almost_ready && !ctx->almost_ready_fence_pending && !use_fence) {
  10536. ggml_vk_submit(subctx, ctx->almost_ready_fence);
  10537. ctx->almost_ready_fence_pending = true;
  10538. } else {
  10539. ggml_vk_submit(subctx, use_fence ? ctx->fence : vk::Fence{});
  10540. }
  10541. if (use_fence) {
  10542. ggml_vk_wait_for_fence(ctx);
  10543. }
  10544. #ifdef GGML_VULKAN_CHECK_RESULTS
  10545. ggml_vk_check_results_1(ctx, cgraph, tensor_idx);
  10546. #endif
  10547. }
  10548. if (tensor_idx == subctx->exit_tensor_idx) {
  10549. // Do staging buffer copies
  10550. for (auto& cpy : subctx->out_memcpys) {
  10551. memcpy(cpy.dst, cpy.src, cpy.n);
  10552. }
  10553. subctx->in_memcpys.clear();
  10554. subctx->out_memcpys.clear();
  10555. subctx->memsets.clear();
  10556. }
  10557. return true;
  10558. }
  10559. // Clean up after graph processing is done
  10560. static void ggml_vk_graph_cleanup(ggml_backend_vk_context * ctx) {
  10561. VK_LOG_DEBUG("ggml_vk_graph_cleanup()");
  10562. ctx->prealloc_y_last_pipeline_used = {};
  10563. ctx->unsynced_nodes_written.clear();
  10564. ctx->unsynced_nodes_read.clear();
  10565. ctx->prealloc_x_need_sync = ctx->prealloc_y_need_sync = ctx->prealloc_split_k_need_sync = false;
  10566. ggml_vk_command_pool_cleanup(ctx->device, ctx->compute_cmd_pool);
  10567. ggml_vk_command_pool_cleanup(ctx->device, ctx->transfer_cmd_pool);
  10568. for (size_t i = 0; i < ctx->gc.semaphores.size(); i++) {
  10569. ctx->device->device.destroySemaphore({ ctx->gc.semaphores[i].s });
  10570. }
  10571. ctx->gc.semaphores.clear();
  10572. for (size_t i = 0; i < ctx->gc.tl_semaphores.size(); i++) {
  10573. ctx->device->device.destroySemaphore({ ctx->gc.tl_semaphores[i].s });
  10574. }
  10575. ctx->gc.tl_semaphores.clear();
  10576. ctx->semaphore_idx = 0;
  10577. ctx->event_idx = 0;
  10578. for (auto& event : ctx->gc.events) {
  10579. ctx->device->device.resetEvent(event);
  10580. }
  10581. ctx->tensor_ctxs.clear();
  10582. ctx->gc.contexts.clear();
  10583. ctx->pipeline_descriptor_set_requirements = 0;
  10584. ctx->descriptor_set_idx = 0;
  10585. }
  10586. // Clean up on backend free
  10587. static void ggml_vk_cleanup(ggml_backend_vk_context * ctx) {
  10588. VK_LOG_DEBUG("ggml_vk_cleanup(" << ctx->name << ")");
  10589. ggml_vk_graph_cleanup(ctx);
  10590. ggml_vk_destroy_buffer(ctx->prealloc_x);
  10591. ggml_vk_destroy_buffer(ctx->prealloc_y);
  10592. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  10593. ctx->prealloc_y_last_pipeline_used = nullptr;
  10594. ctx->prealloc_size_x = 0;
  10595. ctx->prealloc_size_y = 0;
  10596. ctx->prealloc_size_split_k = 0;
  10597. for (auto& event : ctx->gc.events) {
  10598. ctx->device->device.destroyEvent(event);
  10599. }
  10600. ctx->gc.events.clear();
  10601. ctx->device->device.destroyFence(ctx->fence);
  10602. ctx->device->device.destroyFence(ctx->almost_ready_fence);
  10603. for (auto& pool : ctx->descriptor_pools) {
  10604. ctx->device->device.destroyDescriptorPool(pool);
  10605. }
  10606. ctx->descriptor_pools.clear();
  10607. ctx->descriptor_sets.clear();
  10608. ctx->compute_cmd_pool.destroy(ctx->device->device);
  10609. ctx->transfer_cmd_pool.destroy(ctx->device->device);
  10610. }
  10611. static int ggml_vk_get_device_count() {
  10612. ggml_vk_instance_init();
  10613. return vk_instance.device_indices.size();
  10614. }
  10615. static void ggml_vk_get_device_description(int device, char * description, size_t description_size) {
  10616. ggml_vk_instance_init();
  10617. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  10618. vk::PhysicalDeviceProperties props;
  10619. devices[device].getProperties(&props);
  10620. snprintf(description, description_size, "%s", props.deviceName.data());
  10621. }
  10622. // backend interface
  10623. #define UNUSED GGML_UNUSED
  10624. // device backend
  10625. static bool ggml_backend_buffer_is_vk(ggml_backend_buffer_t buffer) {
  10626. return buffer->buft->iface.get_name == ggml_backend_vk_buffer_type_name;
  10627. }
  10628. static void ggml_backend_vk_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  10629. VK_LOG_MEMORY("ggml_backend_vk_buffer_free_buffer()");
  10630. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  10631. ggml_vk_destroy_buffer(ctx->dev_buffer);
  10632. delete ctx;
  10633. }
  10634. static void * ggml_backend_vk_buffer_get_base(ggml_backend_buffer_t buffer) {
  10635. return vk_ptr_base;
  10636. UNUSED(buffer);
  10637. }
  10638. static enum ggml_status ggml_backend_vk_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
  10639. VK_LOG_DEBUG("ggml_backend_vk_buffer_init_tensor(" << buffer << " (" << buffer->context << "), " << tensor << ")");
  10640. if (tensor->view_src != nullptr) {
  10641. GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft);
  10642. }
  10643. return GGML_STATUS_SUCCESS;
  10644. }
  10645. static void ggml_backend_vk_buffer_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
  10646. VK_LOG_DEBUG("ggml_backend_vk_buffer_memset_tensor(" << buffer << ", " << tensor << ", " << value << ", " << offset << ", " << size << ")");
  10647. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  10648. vk_buffer buf = buf_ctx->dev_buffer;
  10649. uint32_t val32 = (uint32_t)value * 0x01010101;
  10650. ggml_vk_buffer_memset(buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, val32, size);
  10651. }
  10652. static void ggml_backend_vk_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  10653. VK_LOG_DEBUG("ggml_backend_vk_buffer_set_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")");
  10654. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  10655. vk_buffer buf = buf_ctx->dev_buffer;
  10656. ggml_vk_buffer_write(buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
  10657. }
  10658. static void ggml_backend_vk_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  10659. VK_LOG_DEBUG("ggml_backend_vk_buffer_get_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")");
  10660. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  10661. vk_buffer buf = buf_ctx->dev_buffer;
  10662. ggml_vk_buffer_read(buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
  10663. }
  10664. static bool ggml_backend_vk_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
  10665. if (ggml_backend_buffer_is_vk(src->buffer)) {
  10666. ggml_backend_vk_buffer_context * src_buf_ctx = (ggml_backend_vk_buffer_context *)src->buffer->context;
  10667. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  10668. vk_buffer src_buf = src_buf_ctx->dev_buffer;
  10669. vk_buffer dst_buf = dst_buf_ctx->dev_buffer;
  10670. ggml_vk_buffer_copy(dst_buf, vk_tensor_offset(dst) + dst->view_offs, src_buf, vk_tensor_offset(src) + src->view_offs, ggml_nbytes(src));
  10671. return true;
  10672. }
  10673. return false;
  10674. UNUSED(buffer);
  10675. }
  10676. static void ggml_backend_vk_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  10677. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  10678. ggml_vk_buffer_memset(ctx->dev_buffer, 0, value, buffer->size);
  10679. }
  10680. static ggml_backend_buffer_i ggml_backend_vk_buffer_interface = {
  10681. /* .free_buffer = */ ggml_backend_vk_buffer_free_buffer,
  10682. /* .get_base = */ ggml_backend_vk_buffer_get_base,
  10683. /* .init_tensor = */ ggml_backend_vk_buffer_init_tensor,
  10684. /* .memset_tensor = */ ggml_backend_vk_buffer_memset_tensor,
  10685. /* .set_tensor = */ ggml_backend_vk_buffer_set_tensor,
  10686. /* .get_tensor = */ ggml_backend_vk_buffer_get_tensor,
  10687. /* .cpy_tensor = */ ggml_backend_vk_buffer_cpy_tensor,
  10688. /* .clear = */ ggml_backend_vk_buffer_clear,
  10689. /* .reset = */ NULL,
  10690. };
  10691. // vk buffer type
  10692. static const char * ggml_backend_vk_buffer_type_name(ggml_backend_buffer_type_t buft) {
  10693. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *)buft->context;
  10694. return ctx->name.c_str();
  10695. }
  10696. static ggml_backend_buffer_t ggml_backend_vk_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  10697. VK_LOG_MEMORY("ggml_backend_vk_buffer_type_alloc_buffer(" << size << ")");
  10698. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  10699. vk_buffer dev_buffer = nullptr;
  10700. try {
  10701. dev_buffer = ggml_vk_create_buffer_device(ctx->device, size);
  10702. } catch (const vk::SystemError& e) {
  10703. return nullptr;
  10704. }
  10705. ggml_backend_vk_buffer_context * bufctx = new ggml_backend_vk_buffer_context(ctx->device, std::move(dev_buffer), ctx->name);
  10706. return ggml_backend_buffer_init(buft, ggml_backend_vk_buffer_interface, bufctx, size);
  10707. }
  10708. static size_t ggml_backend_vk_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  10709. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  10710. return ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  10711. }
  10712. static size_t ggml_backend_vk_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
  10713. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  10714. return ctx->device->suballocation_block_size;
  10715. }
  10716. static size_t ggml_backend_vk_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
  10717. return ggml_nbytes(tensor);
  10718. UNUSED(buft);
  10719. }
  10720. ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num) {
  10721. ggml_vk_instance_init();
  10722. VK_LOG_DEBUG("ggml_backend_vk_buffer_type(" << dev_num << ")");
  10723. vk_device dev = ggml_vk_get_device(dev_num);
  10724. return &dev->buffer_type;
  10725. }
  10726. // host buffer type
  10727. static const char * ggml_backend_vk_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
  10728. return GGML_VK_NAME "_Host";
  10729. UNUSED(buft);
  10730. }
  10731. static const char * ggml_backend_vk_host_buffer_name(ggml_backend_buffer_t buffer) {
  10732. return GGML_VK_NAME "_Host";
  10733. UNUSED(buffer);
  10734. }
  10735. static void ggml_backend_vk_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  10736. VK_LOG_MEMORY("ggml_backend_vk_host_buffer_free_buffer()");
  10737. ggml_vk_host_free(vk_instance.devices[0], buffer->context);
  10738. }
  10739. static ggml_backend_buffer_t ggml_backend_vk_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  10740. VK_LOG_MEMORY("ggml_backend_vk_host_buffer_type_alloc_buffer(" << size << ")");
  10741. size += 32; // Behave like the CPU buffer type
  10742. void * ptr = nullptr;
  10743. try {
  10744. ptr = ggml_vk_host_malloc(vk_instance.devices[0], size);
  10745. } catch (vk::SystemError& e) {
  10746. GGML_LOG_WARN("ggml_vulkan: Failed to allocate pinned memory (%s)\n", e.what());
  10747. // fallback to cpu buffer
  10748. return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
  10749. }
  10750. ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
  10751. buffer->buft = buft;
  10752. buffer->iface.free_buffer = ggml_backend_vk_host_buffer_free_buffer;
  10753. return buffer;
  10754. UNUSED(buft);
  10755. }
  10756. static size_t ggml_backend_vk_host_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  10757. return vk_instance.devices[0]->properties.limits.minMemoryMapAlignment;
  10758. UNUSED(buft);
  10759. }
  10760. static size_t ggml_backend_vk_host_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
  10761. return vk_instance.devices[0]->suballocation_block_size;
  10762. UNUSED(buft);
  10763. }
  10764. // Should be changed to return device-specific host buffer type
  10765. // but that probably requires changes in llama.cpp
  10766. ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type() {
  10767. static struct ggml_backend_buffer_type ggml_backend_vk_buffer_type_host = {
  10768. /* .iface = */ {
  10769. /* .get_name = */ ggml_backend_vk_host_buffer_type_name,
  10770. /* .alloc_buffer = */ ggml_backend_vk_host_buffer_type_alloc_buffer,
  10771. /* .get_alignment = */ ggml_backend_vk_host_buffer_type_get_alignment,
  10772. /* .get_max_size = */ ggml_backend_vk_host_buffer_type_get_max_size,
  10773. /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
  10774. /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
  10775. },
  10776. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_vk_reg(), 0),
  10777. /* .context = */ nullptr,
  10778. };
  10779. // Make sure device 0 is initialized
  10780. ggml_vk_instance_init();
  10781. ggml_vk_get_device(0);
  10782. return &ggml_backend_vk_buffer_type_host;
  10783. }
  10784. // backend
  10785. static const char * ggml_backend_vk_name(ggml_backend_t backend) {
  10786. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  10787. return ctx->name.c_str();
  10788. }
  10789. static void ggml_backend_vk_free(ggml_backend_t backend) {
  10790. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  10791. VK_LOG_DEBUG("ggml_backend_vk_free(" << ctx->name << ")");
  10792. ggml_vk_cleanup(ctx);
  10793. delete ctx;
  10794. delete backend;
  10795. }
  10796. static ggml_backend_buffer_type_t ggml_backend_vk_get_default_buffer_type(ggml_backend_t backend) {
  10797. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  10798. return &ctx->device->buffer_type;
  10799. }
  10800. static void ggml_backend_vk_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  10801. VK_LOG_DEBUG("ggml_backend_vk_set_tensor_async(" << size << ")");
  10802. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  10803. GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
  10804. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
  10805. vk_context transfer_ctx;
  10806. if (ctx->transfer_ctx.expired()) {
  10807. // Initialize new transfer context
  10808. transfer_ctx = ggml_vk_create_context(ctx, ctx->transfer_cmd_pool);
  10809. ctx->transfer_ctx = transfer_ctx;
  10810. ggml_vk_ctx_begin(ctx->device, transfer_ctx);
  10811. } else {
  10812. transfer_ctx = ctx->transfer_ctx.lock();
  10813. }
  10814. vk_buffer buf = buf_ctx->dev_buffer;
  10815. ggml_vk_buffer_write_async(transfer_ctx, buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
  10816. }
  10817. static void ggml_backend_vk_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  10818. VK_LOG_DEBUG("ggml_backend_vk_get_tensor_async(" << size << ")");
  10819. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  10820. GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
  10821. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
  10822. vk_context transfer_ctx;
  10823. if (ctx->transfer_ctx.expired()) {
  10824. // Initialize new transfer context
  10825. transfer_ctx = ggml_vk_create_context(ctx, ctx->transfer_cmd_pool);
  10826. ctx->transfer_ctx = transfer_ctx;
  10827. ggml_vk_ctx_begin(ctx->device, transfer_ctx);
  10828. } else {
  10829. transfer_ctx = ctx->transfer_ctx.lock();
  10830. }
  10831. vk_buffer buf = buf_ctx->dev_buffer;
  10832. ggml_vk_buffer_read_async(transfer_ctx, buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
  10833. }
  10834. static bool ggml_backend_vk_cpy_tensor_async(ggml_backend_t backend, const ggml_tensor * src, ggml_tensor * dst) {
  10835. VK_LOG_DEBUG("ggml_backend_vk_cpy_tensor_async()");
  10836. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  10837. if ((dst->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || dst->buffer->buft == ggml_backend_vk_host_buffer_type()) && ggml_backend_buffer_is_vk(src->buffer)) {
  10838. ggml_backend_vk_buffer_context * src_buf_ctx = (ggml_backend_vk_buffer_context *)src->buffer->context;
  10839. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  10840. vk_context transfer_ctx;
  10841. if (ctx->transfer_ctx.expired()) {
  10842. // Initialize new transfer context
  10843. transfer_ctx = ggml_vk_create_context(ctx, ctx->transfer_cmd_pool);
  10844. ctx->transfer_ctx = transfer_ctx;
  10845. ggml_vk_ctx_begin(ctx->device, transfer_ctx);
  10846. } else {
  10847. transfer_ctx = ctx->transfer_ctx.lock();
  10848. }
  10849. vk_buffer src_buf = src_buf_ctx->dev_buffer;
  10850. vk_buffer dst_buf = dst_buf_ctx->dev_buffer;
  10851. ggml_vk_buffer_copy_async(transfer_ctx, dst_buf, vk_tensor_offset(dst) + dst->view_offs, src_buf, vk_tensor_offset(src) + src->view_offs, ggml_nbytes(src));
  10852. return true;
  10853. }
  10854. return false;
  10855. }
  10856. static void ggml_backend_vk_synchronize(ggml_backend_t backend) {
  10857. VK_LOG_DEBUG("ggml_backend_vk_synchronize()");
  10858. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  10859. if(ctx->transfer_ctx.expired()) {
  10860. return;
  10861. }
  10862. vk_context transfer_ctx = ctx->transfer_ctx.lock();
  10863. ggml_vk_ctx_end(transfer_ctx);
  10864. for (auto& cpy : transfer_ctx->in_memcpys) {
  10865. memcpy(cpy.dst, cpy.src, cpy.n);
  10866. }
  10867. ggml_vk_submit(transfer_ctx, ctx->fence);
  10868. ggml_vk_wait_for_fence(ctx);
  10869. for (auto& cpy : transfer_ctx->out_memcpys) {
  10870. memcpy(cpy.dst, cpy.src, cpy.n);
  10871. }
  10872. ctx->transfer_ctx.reset();
  10873. }
  10874. static bool ggml_vk_is_empty(ggml_tensor * node) {
  10875. return ggml_is_empty(node) || node->op == GGML_OP_NONE || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE;
  10876. }
  10877. static bool ggml_vk_can_fuse(const ggml_backend_vk_context * ctx, const struct ggml_cgraph * cgraph, int node_idx, std::initializer_list<enum ggml_op> ops) {
  10878. if (!ggml_can_fuse(cgraph, node_idx, ops)) {
  10879. return false;
  10880. }
  10881. if (ops.size() == 2 && ops.begin()[0] == GGML_OP_RMS_NORM && ops.begin()[1] == GGML_OP_MUL) {
  10882. // additional constraints specific to this fusion
  10883. const ggml_tensor *rms_norm = cgraph->nodes[node_idx];
  10884. const ggml_tensor *mul = cgraph->nodes[node_idx + 1];
  10885. GGML_ASSERT(rms_norm->src[0]->type == GGML_TYPE_F32);
  10886. GGML_ASSERT(rms_norm->type == GGML_TYPE_F32);
  10887. // rms_norm only supports f32
  10888. if (mul->src[0]->type != GGML_TYPE_F32 ||
  10889. mul->src[1]->type != GGML_TYPE_F32 ||
  10890. mul->type != GGML_TYPE_F32) {
  10891. return false;
  10892. }
  10893. // if rms_norm is the B operand, then we don't handle broadcast
  10894. if (rms_norm == mul->src[1] &&
  10895. !ggml_are_same_shape(mul->src[0], rms_norm)) {
  10896. return false;
  10897. }
  10898. // rms_norm shader assumes contiguous rows
  10899. if (!ggml_is_contiguous_rows(mul->src[0]) || !ggml_is_contiguous_rows(mul->src[1])) {
  10900. return false;
  10901. }
  10902. }
  10903. if (ops.size() == 2 && ops.begin()[0] == GGML_OP_MUL_MAT && ops.begin()[1] == GGML_OP_ADD) {
  10904. // additional constraints specific to this fusion
  10905. const ggml_tensor *mul = cgraph->nodes[node_idx];
  10906. const ggml_tensor *add = cgraph->nodes[node_idx + 1];
  10907. const ggml_tensor *bias = add->src[0] == mul ? add->src[1] : add->src[0];
  10908. // mat-vec only
  10909. if (ggml_nrows(mul) != 1) {
  10910. return false;
  10911. }
  10912. // shaders assume the types match
  10913. if (mul->type != bias->type) {
  10914. return false;
  10915. }
  10916. // shaders reuse the D shape for bias
  10917. if (!ggml_are_same_shape(mul, bias) ||
  10918. !ggml_are_same_stride(mul, bias)) {
  10919. return false;
  10920. }
  10921. // unaligned bias isn't handled
  10922. if (get_misalign_bytes(ctx, bias) != 0) {
  10923. return false;
  10924. }
  10925. }
  10926. if (ops.size() == 2 && ops.begin()[0] == GGML_OP_MUL_MAT_ID && ops.begin()[1] == GGML_OP_ADD_ID) {
  10927. // additional constraints specific to this fusion
  10928. const ggml_tensor *mul = cgraph->nodes[node_idx];
  10929. const ggml_tensor *add = cgraph->nodes[node_idx + 1];
  10930. const ggml_tensor *bias = add->src[1];
  10931. if (mul != add->src[0]) {
  10932. return false;
  10933. }
  10934. // mat-vec only
  10935. if (!ggml_vk_use_mul_mat_vec_id(cgraph, node_idx)) {
  10936. return false;
  10937. }
  10938. // shaders assume the types match
  10939. if (mul->type != bias->type) {
  10940. return false;
  10941. }
  10942. // shaders assume the bias is contiguous
  10943. if (!ggml_is_contiguous(bias)) {
  10944. return false;
  10945. }
  10946. // the ID tensor must be the same for mul_mat_id and add_id
  10947. if (mul->src[2] != add->src[2]) {
  10948. return false;
  10949. }
  10950. // unaligned bias isn't handled
  10951. if (get_misalign_bytes(ctx, bias) != 0) {
  10952. return false;
  10953. }
  10954. }
  10955. return true;
  10956. }
  10957. static bool ggml_vk_can_fuse_topk_moe(ggml_backend_vk_context * ctx, const struct ggml_cgraph * cgraph,
  10958. int node_idx, topk_moe_mode mode) {
  10959. const ggml_tensor * softmax;
  10960. const ggml_tensor * weights;
  10961. switch (mode) {
  10962. case TOPK_MOE_EARLY_SOFTMAX_NORM:
  10963. softmax = cgraph->nodes[node_idx + 0];
  10964. weights = cgraph->nodes[node_idx + 9];
  10965. break;
  10966. case TOPK_MOE_EARLY_SOFTMAX:
  10967. softmax = cgraph->nodes[node_idx + 0];
  10968. weights = cgraph->nodes[node_idx + 4];
  10969. break;
  10970. case TOPK_MOE_LATE_SOFTMAX:
  10971. softmax = cgraph->nodes[node_idx + 4];
  10972. weights = cgraph->nodes[node_idx + 5];
  10973. break;
  10974. default:
  10975. return false;
  10976. }
  10977. const float * op_params = (const float *)softmax->op_params;
  10978. float scale = op_params[0];
  10979. float max_bias = op_params[1];
  10980. if (!ggml_is_contiguous(softmax->src[0]) || !ggml_is_contiguous(weights)) {
  10981. return false;
  10982. }
  10983. if (scale != 1.0f || max_bias != 0.0f) {
  10984. return false;
  10985. }
  10986. // don't fuse when masks or sinks are present
  10987. if (softmax->src[1] || softmax->src[2]) {
  10988. return false;
  10989. }
  10990. const int n_expert = softmax->ne[0];
  10991. // n_expert must be a power of 2
  10992. if (!is_pow2(n_expert) || n_expert > (1 << (num_topk_moe_pipelines-1))) {
  10993. return false;
  10994. }
  10995. if (!ctx->device->subgroup_arithmetic ||
  10996. !ctx->device->subgroup_shuffle ||
  10997. !ctx->device->subgroup_require_full_support ||
  10998. ctx->device->disable_fusion) {
  10999. return false;
  11000. }
  11001. return true;
  11002. }
  11003. static bool ggml_vk_can_fuse_rope_set_rows(ggml_backend_vk_context * ctx, const struct ggml_cgraph * cgraph,
  11004. int node_idx) {
  11005. GGML_UNUSED(ctx);
  11006. const ggml_tensor *rope = cgraph->nodes[node_idx + 0];
  11007. const ggml_tensor *view = cgraph->nodes[node_idx + 1];
  11008. const ggml_tensor *set_rows = cgraph->nodes[node_idx + 2];
  11009. // ne3 not tested
  11010. if (rope->src[0]->ne[3] != 1) {
  11011. return false;
  11012. }
  11013. if (set_rows->type != GGML_TYPE_F32 && set_rows->type != GGML_TYPE_F16) {
  11014. return false;
  11015. }
  11016. if (set_rows->src[1]->type != GGML_TYPE_I64) {
  11017. return false;
  11018. }
  11019. // The view should flatten two dims of rope into one dim
  11020. if (!ggml_is_contiguous(view) ||
  11021. view->ne[0] != rope->ne[0] * rope->ne[1]) {
  11022. return false;
  11023. }
  11024. // Only norm/neox shaders have the fusion code
  11025. const int mode = ((const int32_t *) rope->op_params)[2];
  11026. if (mode != GGML_ROPE_TYPE_NORMAL && mode != GGML_ROPE_TYPE_NEOX) {
  11027. return false;
  11028. }
  11029. return true;
  11030. }
  11031. static uint32_t ggml_vk_fuse_multi_add(ggml_backend_vk_context * ctx, const struct ggml_cgraph * cgraph, int node_idx) {
  11032. const ggml_tensor *first_node = cgraph->nodes[node_idx];
  11033. if (first_node->op != GGML_OP_ADD) {
  11034. return 0;
  11035. }
  11036. if (!ctx->device->multi_add) {
  11037. return 0;
  11038. }
  11039. int32_t num_adds = 1;
  11040. while (node_idx + num_adds < cgraph->n_nodes &&
  11041. cgraph->nodes[node_idx + num_adds]->op == GGML_OP_ADD &&
  11042. num_adds < MAX_FUSED_ADDS) {
  11043. num_adds++;
  11044. }
  11045. // The shader currently requires same shapes (but different strides are allowed),
  11046. // everything f32, and no misalignment
  11047. for (int32_t i = 0; i < num_adds; ++i) {
  11048. const ggml_tensor *next_node = cgraph->nodes[node_idx + i];
  11049. if (!ggml_are_same_shape(first_node, next_node->src[0]) ||
  11050. !ggml_are_same_shape(first_node, next_node->src[1]) ||
  11051. next_node->type != GGML_TYPE_F32 ||
  11052. next_node->src[0]->type != GGML_TYPE_F32 ||
  11053. next_node->src[1]->type != GGML_TYPE_F32 ||
  11054. get_misalign_bytes(ctx, next_node) ||
  11055. get_misalign_bytes(ctx, next_node->src[0]) ||
  11056. get_misalign_bytes(ctx, next_node->src[1])) {
  11057. num_adds = i;
  11058. }
  11059. }
  11060. // Verify we can fuse these
  11061. ggml_op adds[MAX_FUSED_ADDS];
  11062. for (int32_t i = 0; i < num_adds; ++i) {
  11063. adds[i] = GGML_OP_ADD;
  11064. }
  11065. // decrease num_adds if they can't all be fused
  11066. while (num_adds > 1 && !ggml_can_fuse(cgraph, node_idx, adds, num_adds)) {
  11067. num_adds--;
  11068. }
  11069. // a single add is not "fused", so just return zero
  11070. if (num_adds == 1) {
  11071. return 0;
  11072. }
  11073. return num_adds;
  11074. }
  11075. static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
  11076. VK_LOG_DEBUG("ggml_backend_vk_graph_compute(" << cgraph->n_nodes << " nodes)");
  11077. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  11078. if (vk_instance.debug_utils_support) {
  11079. vk::DebugUtilsLabelEXT dul = {};
  11080. dul.pLabelName = "ggml_backend_vk_graph_compute";
  11081. dul.color = std::array<float,4>{1.0f, 1.0f, 1.0f, 1.0f};
  11082. vk_instance.pfn_vkQueueBeginDebugUtilsLabelEXT(ctx->device->compute_queue.queue, reinterpret_cast<VkDebugUtilsLabelEXT*>(&dul));
  11083. }
  11084. ctx->prealloc_size_add_rms_partials = 0;
  11085. ctx->prealloc_size_add_rms_partials_offset = 0;
  11086. ctx->do_add_rms_partials = false;
  11087. uint64_t total_mat_mul_bytes = 0;
  11088. for (int i = 0; i < cgraph->n_nodes; i++) {
  11089. if (!ctx->device->disable_fusion) {
  11090. uint32_t num_adds = ggml_vk_fuse_multi_add(ctx, cgraph, i);
  11091. if (num_adds) {
  11092. ctx->num_additional_fused_ops = num_adds - 1;
  11093. } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_RMS_NORM, GGML_OP_MUL })) {
  11094. ctx->num_additional_fused_ops = 1;
  11095. } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_MUL_MAT, GGML_OP_ADD })) {
  11096. ctx->num_additional_fused_ops = 1;
  11097. } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_MUL_MAT_ID, GGML_OP_ADD_ID })) {
  11098. ctx->num_additional_fused_ops = 1;
  11099. } else if (ggml_can_fuse_subgraph(cgraph, i, { GGML_OP_ROPE, GGML_OP_VIEW, GGML_OP_SET_ROWS }, { i + 2 }) &&
  11100. ggml_check_edges(cgraph, i, rope_view_set_rows_edges) &&
  11101. ggml_vk_can_fuse_rope_set_rows(ctx, cgraph, i)) {
  11102. ctx->num_additional_fused_ops = 2;
  11103. } else if (ggml_can_fuse_subgraph(cgraph, i, topk_moe_early_softmax_norm, { i + 3, i + 9 }) &&
  11104. ggml_check_edges(cgraph, i, topk_moe_early_softmax_norm_edges) &&
  11105. ggml_vk_can_fuse_topk_moe(ctx, cgraph, i, TOPK_MOE_EARLY_SOFTMAX_NORM)) {
  11106. ctx->num_additional_fused_ops = topk_moe_early_softmax_norm.size() - 1;
  11107. } else if (ggml_can_fuse_subgraph(cgraph, i, topk_moe_early_softmax, { i + 3, i + 4 }) &&
  11108. ggml_check_edges(cgraph, i, topk_moe_early_softmax_edges) &&
  11109. ggml_vk_can_fuse_topk_moe(ctx, cgraph, i, TOPK_MOE_EARLY_SOFTMAX)) {
  11110. ctx->num_additional_fused_ops = topk_moe_early_softmax.size() - 1;
  11111. } else if (ggml_can_fuse_subgraph(cgraph, i, topk_moe_late_softmax, { i + 1, i + 5 }) &&
  11112. ggml_check_edges(cgraph, i, topk_moe_late_softmax_edges) &&
  11113. ggml_vk_can_fuse_topk_moe(ctx, cgraph, i, TOPK_MOE_LATE_SOFTMAX)) {
  11114. ctx->num_additional_fused_ops = topk_moe_late_softmax.size() - 1;
  11115. }
  11116. }
  11117. ggml_vk_build_graph(ctx, cgraph, i, nullptr, 0, true, false, false, false);
  11118. if (cgraph->nodes[i]->op == GGML_OP_MUL_MAT || cgraph->nodes[i]->op == GGML_OP_MUL_MAT_ID) {
  11119. total_mat_mul_bytes += ggml_nbytes(cgraph->nodes[i]->src[0]);
  11120. } else if (cgraph->nodes[i]->op == GGML_OP_CONV_2D || cgraph->nodes[i]->op == GGML_OP_CONV_TRANSPOSE_2D) {
  11121. // Return CRSxNPQxsizeof(*) to account as many bytes as mul_mat has in im2col->mul_mat mode.
  11122. auto CRS_size =
  11123. cgraph->nodes[i]->src[0]->ne[0] * cgraph->nodes[i]->src[0]->ne[1] * cgraph->nodes[i]->src[1]->ne[2];
  11124. auto NPQ_size = cgraph->nodes[i]->ne[0] * cgraph->nodes[i]->ne[1] * cgraph->nodes[i]->ne[3];
  11125. total_mat_mul_bytes += NPQ_size * CRS_size * ggml_type_size(cgraph->nodes[i]->type);
  11126. }
  11127. i += ctx->num_additional_fused_ops;
  11128. ctx->num_additional_fused_ops = 0;
  11129. }
  11130. if (ctx->device->need_compiles) {
  11131. ggml_vk_load_shaders(ctx->device);
  11132. }
  11133. ggml_vk_preallocate_buffers(ctx);
  11134. ggml_pipeline_allocate_descriptor_sets(ctx);
  11135. int last_node = cgraph->n_nodes - 1;
  11136. // If the last op in the cgraph isn't backend GPU, the command buffer doesn't get closed properly
  11137. while (last_node > 0 && ggml_vk_is_empty(cgraph->nodes[last_node])) {
  11138. last_node -= 1;
  11139. }
  11140. // Reserve tensor context space for all nodes
  11141. ctx->tensor_ctxs.resize(cgraph->n_nodes);
  11142. bool first_node_in_batch = true; // true if next node will be first node in a batch
  11143. int submit_node_idx = 0; // index to first node in a batch
  11144. vk_context compute_ctx;
  11145. if (vk_perf_logger_enabled) {
  11146. // allocate/resize the query pool
  11147. if (ctx->device->num_queries < cgraph->n_nodes + 1) {
  11148. if (ctx->device->query_pool) {
  11149. ctx->device->device.destroyQueryPool(ctx->device->query_pool);
  11150. }
  11151. vk::QueryPoolCreateInfo query_create_info;
  11152. query_create_info.queryType = vk::QueryType::eTimestamp;
  11153. query_create_info.queryCount = cgraph->n_nodes + 100;
  11154. ctx->device->query_pool = ctx->device->device.createQueryPool(query_create_info);
  11155. ctx->device->num_queries = query_create_info.queryCount;
  11156. }
  11157. ctx->device->device.resetQueryPool(ctx->device->query_pool, 0, cgraph->n_nodes+1);
  11158. GGML_ASSERT(ctx->compute_ctx.expired());
  11159. compute_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  11160. ctx->compute_ctx = compute_ctx;
  11161. ggml_vk_ctx_begin(ctx->device, compute_ctx);
  11162. compute_ctx->s->buffer.writeTimestamp(vk::PipelineStageFlagBits::eAllCommands, ctx->device->query_pool, 0);
  11163. }
  11164. ctx->prealloc_y_last_pipeline_used = nullptr;
  11165. ctx->prealloc_y_last_tensor_used = nullptr;
  11166. if (ctx->prealloc_size_add_rms_partials) {
  11167. if (ctx->compute_ctx.expired()) {
  11168. compute_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  11169. ctx->compute_ctx = compute_ctx;
  11170. ggml_vk_ctx_begin(ctx->device, compute_ctx);
  11171. } else {
  11172. compute_ctx = ctx->compute_ctx.lock();
  11173. }
  11174. // initialize partial sums to zero.
  11175. ggml_vk_buffer_memset_async(compute_ctx, ctx->prealloc_add_rms_partials, 0, 0, ctx->prealloc_size_add_rms_partials);
  11176. ggml_vk_sync_buffers(ctx, compute_ctx);
  11177. }
  11178. // Submit after enough work has accumulated, to overlap CPU cmdbuffer generation with GPU execution.
  11179. // Estimate the amount of matmul work by looking at the weight matrix size, and submit every 100MB
  11180. // (and scaled down based on model size, so smaller models submit earlier).
  11181. // Also submit at least every 100 nodes, in case there are workloads without as much matmul.
  11182. int nodes_per_submit = 100;
  11183. int submitted_nodes = 0;
  11184. int submit_count = 0;
  11185. uint64_t mul_mat_bytes = 0;
  11186. uint64_t mul_mat_bytes_per_submit = std::min(uint64_t(100*1000*1000), total_mat_mul_bytes / 40u);
  11187. for (int i = 0; i < cgraph->n_nodes; i++) {
  11188. if (first_node_in_batch) {
  11189. submit_node_idx = i;
  11190. }
  11191. if (cgraph->nodes[i]->op == GGML_OP_MUL_MAT || cgraph->nodes[i]->op == GGML_OP_MUL_MAT_ID) {
  11192. mul_mat_bytes += ggml_nbytes(cgraph->nodes[i]->src[0]);
  11193. }
  11194. if (!ctx->device->disable_fusion) {
  11195. uint32_t num_adds = ggml_vk_fuse_multi_add(ctx, cgraph, i);
  11196. if (num_adds) {
  11197. ctx->num_additional_fused_ops = num_adds - 1;
  11198. } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_RMS_NORM, GGML_OP_MUL })) {
  11199. ctx->num_additional_fused_ops = 1;
  11200. } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_MUL_MAT, GGML_OP_ADD })) {
  11201. ctx->num_additional_fused_ops = 1;
  11202. } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_MUL_MAT_ID, GGML_OP_ADD_ID })) {
  11203. ctx->num_additional_fused_ops = 1;
  11204. } else if (ggml_can_fuse_subgraph(cgraph, i, { GGML_OP_ROPE, GGML_OP_VIEW, GGML_OP_SET_ROWS }, { i + 2 }) &&
  11205. ggml_check_edges(cgraph, i, rope_view_set_rows_edges) &&
  11206. ggml_vk_can_fuse_rope_set_rows(ctx, cgraph, i)) {
  11207. ctx->num_additional_fused_ops = 2;
  11208. } else if (ggml_can_fuse_subgraph(cgraph, i, topk_moe_early_softmax_norm, { i + 3, i + 9 }) &&
  11209. ggml_check_edges(cgraph, i, topk_moe_early_softmax_norm_edges) &&
  11210. ggml_vk_can_fuse_topk_moe(ctx, cgraph, i, TOPK_MOE_EARLY_SOFTMAX_NORM)) {
  11211. ctx->num_additional_fused_ops = topk_moe_early_softmax_norm.size() - 1;
  11212. // view of argsort writes to memory
  11213. ctx->fused_ops_write_mask |= 1 << 3;
  11214. } else if (ggml_can_fuse_subgraph(cgraph, i, topk_moe_early_softmax, { i + 3, i + 4 }) &&
  11215. ggml_check_edges(cgraph, i, topk_moe_early_softmax_edges) &&
  11216. ggml_vk_can_fuse_topk_moe(ctx, cgraph, i, TOPK_MOE_EARLY_SOFTMAX)) {
  11217. ctx->num_additional_fused_ops = topk_moe_early_softmax.size() - 1;
  11218. // view of argsort writes to memory
  11219. ctx->fused_ops_write_mask |= 1 << 3;
  11220. } else if (ggml_can_fuse_subgraph(cgraph, i, topk_moe_late_softmax, { i + 1, i + 5 }) &&
  11221. ggml_check_edges(cgraph, i, topk_moe_late_softmax_edges) &&
  11222. ggml_vk_can_fuse_topk_moe(ctx, cgraph, i, TOPK_MOE_LATE_SOFTMAX)) {
  11223. ctx->num_additional_fused_ops = topk_moe_late_softmax.size() - 1;
  11224. // view of argsort writes to memory
  11225. ctx->fused_ops_write_mask |= 1 << 1;
  11226. }
  11227. }
  11228. ctx->fused_ops_write_mask |= 1 << ctx->num_additional_fused_ops;
  11229. // Signal the almost_ready fence when the graph is mostly complete (< 20% remaining)
  11230. bool almost_ready = (cgraph->n_nodes - i) < cgraph->n_nodes / 5;
  11231. bool submit = (submitted_nodes >= nodes_per_submit) ||
  11232. (mul_mat_bytes >= mul_mat_bytes_per_submit) ||
  11233. (i + ctx->num_additional_fused_ops >= last_node) ||
  11234. (almost_ready && !ctx->almost_ready_fence_pending);
  11235. bool enqueued = ggml_vk_build_graph(ctx, cgraph, i, cgraph->nodes[submit_node_idx], submit_node_idx, false, i + ctx->num_additional_fused_ops >= last_node, almost_ready, submit);
  11236. if (vk_perf_logger_enabled) {
  11237. if (ctx->compute_ctx.expired()) {
  11238. compute_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  11239. ctx->compute_ctx = compute_ctx;
  11240. ggml_vk_ctx_begin(ctx->device, compute_ctx);
  11241. } else {
  11242. compute_ctx = ctx->compute_ctx.lock();
  11243. }
  11244. // If there are fused ops, just write out timestamps for all nodes to keep the accounting simple
  11245. for (int j = 0; j < ctx->num_additional_fused_ops + 1; ++j) {
  11246. compute_ctx->s->buffer.writeTimestamp(vk::PipelineStageFlagBits::eAllCommands, ctx->device->query_pool, i+j+1);
  11247. }
  11248. }
  11249. if (enqueued) {
  11250. ++submitted_nodes;
  11251. #ifndef GGML_VULKAN_CHECK_RESULTS
  11252. if (first_node_in_batch) {
  11253. first_node_in_batch = false;
  11254. }
  11255. #endif
  11256. }
  11257. if (submit && enqueued) {
  11258. first_node_in_batch = true;
  11259. submitted_nodes = 0;
  11260. mul_mat_bytes = 0;
  11261. if (submit_count < 3) {
  11262. mul_mat_bytes_per_submit *= 2;
  11263. }
  11264. submit_count++;
  11265. }
  11266. i += ctx->num_additional_fused_ops;
  11267. ctx->num_additional_fused_ops = 0;
  11268. ctx->fused_ops_write_mask = 0;
  11269. }
  11270. if (vk_perf_logger_enabled) {
  11271. // End the command buffer and submit/wait
  11272. GGML_ASSERT(!ctx->compute_ctx.expired());
  11273. compute_ctx = ctx->compute_ctx.lock();
  11274. ggml_vk_ctx_end(compute_ctx);
  11275. ggml_vk_submit(compute_ctx, ctx->device->fence);
  11276. VK_CHECK(ctx->device->device.waitForFences({ ctx->device->fence }, true, UINT64_MAX), "GGML_VULKAN_PERF waitForFences");
  11277. ctx->device->device.resetFences({ ctx->device->fence });
  11278. // Get the results and pass them to the logger
  11279. std::vector<uint64_t> timestamps(cgraph->n_nodes + 1);
  11280. VK_CHECK(ctx->device->device.getQueryPoolResults(ctx->device->query_pool, 0, cgraph->n_nodes + 1, (cgraph->n_nodes + 1)*sizeof(uint64_t), timestamps.data(), sizeof(uint64_t), vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait), "get timestamp results");
  11281. for (int i = 0; i < cgraph->n_nodes; i++) {
  11282. if (!ggml_vk_is_empty(cgraph->nodes[i])) {
  11283. ctx->device->perf_logger->log_timing(cgraph->nodes[i], uint64_t((timestamps[i+1] - timestamps[i]) * ctx->device->properties.limits.timestampPeriod));
  11284. }
  11285. }
  11286. ctx->device->perf_logger->print_timings();
  11287. }
  11288. ggml_vk_graph_cleanup(ctx);
  11289. return GGML_STATUS_SUCCESS;
  11290. UNUSED(backend);
  11291. }
  11292. // Sort the graph for improved parallelism.
  11293. static void ggml_vk_graph_optimize(ggml_backend_t backend, struct ggml_cgraph * graph)
  11294. {
  11295. VK_LOG_DEBUG("ggml_vk_graph_optimize(" << graph->n_nodes << " nodes)");
  11296. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  11297. if (ctx->device->disable_graph_optimize) {
  11298. return;
  11299. }
  11300. auto const &is_empty = [](ggml_tensor * node) -> bool {
  11301. return node->op == GGML_OP_NONE || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE;
  11302. };
  11303. auto const &is_src_of = [](const ggml_tensor *dst, const ggml_tensor *src) -> bool {
  11304. for (uint32_t s = 0; s < GGML_MAX_SRC; ++s) {
  11305. if (dst->src[s] == src) {
  11306. return true;
  11307. }
  11308. }
  11309. // implicit dependency if they view the same tensor
  11310. const ggml_tensor *dst2 = dst->view_src ? dst->view_src : dst;
  11311. const ggml_tensor *src2 = src->view_src ? src->view_src : src;
  11312. if (dst2 == src2) {
  11313. return true;
  11314. }
  11315. return false;
  11316. };
  11317. // This function tries to reorder the graph to allow nodes to run in parallel.
  11318. // This helps with small batches, but for large batches its a slowdown, probably
  11319. // due to cache contention. So only reorder if the majority of nodes have few rows.
  11320. int num_small_nodes = 0;
  11321. int num_counted_nodes = 0;
  11322. for (int i = 0; i < graph->n_nodes; ++i) {
  11323. if (!is_empty(graph->nodes[i]) &&
  11324. graph->nodes[i]->op != GGML_OP_SET_ROWS) {
  11325. if (ggml_nrows(graph->nodes[i]) <= 8) {
  11326. num_small_nodes++;
  11327. }
  11328. num_counted_nodes++;
  11329. }
  11330. }
  11331. if (num_small_nodes < num_counted_nodes / 2) {
  11332. return;
  11333. }
  11334. std::vector<ggml_tensor *> new_order;
  11335. std::vector<bool> used(graph->n_nodes, false);
  11336. int first_unused = 0;
  11337. while (first_unused < graph->n_nodes) {
  11338. std::vector<int> current_set;
  11339. // Check for fusion patterns and avoid reordering them
  11340. auto const &match_pattern = [&](const std::initializer_list<ggml_op> &pattern, int start) -> bool {
  11341. if (start + (int)pattern.size() <= graph->n_nodes) {
  11342. bool is_pattern = true;
  11343. for (size_t j = 0; j < pattern.size(); ++j) {
  11344. if (graph->nodes[start + j]->op != pattern.begin()[j] || used[start + j]) {
  11345. is_pattern = false;
  11346. }
  11347. }
  11348. return is_pattern;
  11349. }
  11350. return false;
  11351. };
  11352. auto const &keep_pattern = [&](const std::initializer_list<ggml_op> &pattern) -> bool {
  11353. if (match_pattern(pattern, first_unused)) {
  11354. for (size_t j = 0; j < pattern.size(); ++j) {
  11355. new_order.push_back(graph->nodes[first_unused + j]);
  11356. used[first_unused + j] = true;
  11357. }
  11358. while (first_unused < graph->n_nodes && used[first_unused]) {
  11359. first_unused++;
  11360. }
  11361. return true;
  11362. }
  11363. return false;
  11364. };
  11365. if (keep_pattern(topk_moe_early_softmax_norm)) {
  11366. continue;
  11367. }
  11368. if (keep_pattern(topk_moe_early_softmax)) {
  11369. continue;
  11370. }
  11371. if (keep_pattern(topk_moe_late_softmax)) {
  11372. continue;
  11373. }
  11374. // First, grab the next unused node.
  11375. current_set.push_back(first_unused);
  11376. // Loop through the next N nodes. Grab any that don't depend on other nodes that
  11377. // haven't already been run. Nodes that have already been run have used[i] set
  11378. // to true. Allow nodes that depend on the previous node if it's a fusion pattern
  11379. // that we support (e.g. RMS_NORM + MUL).
  11380. // This first pass only grabs "real" (non-view nodes). Second pass grabs view nodes.
  11381. // The goal is to not interleave real and view nodes in a way that breaks fusion.
  11382. const int NUM_TO_CHECK = 20;
  11383. for (int j = first_unused+1; j < std::min(first_unused + NUM_TO_CHECK, graph->n_nodes); ++j) {
  11384. if (used[j]) {
  11385. continue;
  11386. }
  11387. if (is_empty(graph->nodes[j])) {
  11388. continue;
  11389. }
  11390. // Don't pull forward nodes from fusion patterns
  11391. if (match_pattern(topk_moe_early_softmax_norm, j) ||
  11392. match_pattern(topk_moe_early_softmax, j) ||
  11393. match_pattern(topk_moe_late_softmax, j)) {
  11394. continue;
  11395. }
  11396. bool ok = true;
  11397. for (int c = first_unused; c < j; ++c) {
  11398. if (!used[c] &&
  11399. is_src_of(graph->nodes[j], graph->nodes[c]) &&
  11400. !(j == c+1 && c == current_set.back() && graph->nodes[c]->op == GGML_OP_RMS_NORM && graph->nodes[j]->op == GGML_OP_MUL) &&
  11401. !(j == c+1 && c == current_set.back() && graph->nodes[c]->op == GGML_OP_MUL_MAT && graph->nodes[j]->op == GGML_OP_ADD) &&
  11402. !(j == c+1 && c == current_set.back() && graph->nodes[c]->op == GGML_OP_MUL_MAT_ID && graph->nodes[j]->op == GGML_OP_ADD_ID)) {
  11403. ok = false;
  11404. break;
  11405. }
  11406. }
  11407. if (ok) {
  11408. current_set.push_back(j);
  11409. // Look for ROPE + VIEW + SET_ROWS and make them consecutive
  11410. if (graph->nodes[j]->op == GGML_OP_ROPE) {
  11411. int view_idx = -1;
  11412. int set_rows_idx = -1;
  11413. for (int k = j+1; k < std::min(j + 10, graph->n_nodes); ++k) {
  11414. if (view_idx == -1 &&
  11415. graph->nodes[k]->op == GGML_OP_VIEW &&
  11416. graph->nodes[k]->src[0] == graph->nodes[j]) {
  11417. view_idx = k;
  11418. continue;
  11419. }
  11420. if (view_idx != -1 &&
  11421. set_rows_idx == -1 &&
  11422. graph->nodes[k]->op == GGML_OP_SET_ROWS &&
  11423. graph->nodes[k]->src[0] == graph->nodes[view_idx]) {
  11424. set_rows_idx = k;
  11425. break;
  11426. }
  11427. }
  11428. if (set_rows_idx != -1) {
  11429. current_set.push_back(view_idx);
  11430. current_set.push_back(set_rows_idx);
  11431. used[view_idx] = true;
  11432. used[set_rows_idx] = true;
  11433. }
  11434. }
  11435. }
  11436. }
  11437. // Second pass grabs view nodes.
  11438. // Skip this if it would break a fusion optimization (don't split up add->rms_norm or add->add).
  11439. if (graph->nodes[current_set.back()]->op != GGML_OP_ADD) {
  11440. for (int j = first_unused+1; j < std::min(first_unused + NUM_TO_CHECK, graph->n_nodes); ++j) {
  11441. if (used[j]) {
  11442. continue;
  11443. }
  11444. if (!is_empty(graph->nodes[j])) {
  11445. continue;
  11446. }
  11447. bool ok = true;
  11448. for (int c = first_unused; c < j; ++c) {
  11449. bool c_in_current_set = std::find(current_set.begin(), current_set.end(), c) != current_set.end();
  11450. // skip views whose srcs haven't been processed.
  11451. if (!used[c] &&
  11452. is_src_of(graph->nodes[j], graph->nodes[c]) &&
  11453. !c_in_current_set) {
  11454. ok = false;
  11455. break;
  11456. }
  11457. }
  11458. if (ok) {
  11459. current_set.push_back(j);
  11460. }
  11461. }
  11462. }
  11463. // Push the current set into new_order
  11464. for (auto c : current_set) {
  11465. new_order.push_back(graph->nodes[c]);
  11466. used[c] = true;
  11467. }
  11468. while (first_unused < graph->n_nodes && used[first_unused]) {
  11469. first_unused++;
  11470. }
  11471. }
  11472. // Replace the graph with the new order.
  11473. for (int i = 0; i < graph->n_nodes; ++i) {
  11474. graph->nodes[i] = new_order[i];
  11475. }
  11476. }
  11477. // TODO: enable async and synchronize
  11478. static ggml_backend_i ggml_backend_vk_interface = {
  11479. /* .get_name = */ ggml_backend_vk_name,
  11480. /* .free = */ ggml_backend_vk_free,
  11481. /* .set_tensor_async = */ NULL, // ggml_backend_vk_set_tensor_async,
  11482. /* .get_tensor_async = */ NULL, // ggml_backend_vk_get_tensor_async,
  11483. /* .cpy_tensor_async = */ NULL, // ggml_backend_vk_cpy_tensor_async,
  11484. /* .synchronize = */ NULL, // ggml_backend_vk_synchronize,
  11485. /* .graph_plan_create = */ NULL,
  11486. /* .graph_plan_free = */ NULL,
  11487. /* .graph_plan_update = */ NULL,
  11488. /* .graph_plan_compute = */ NULL,
  11489. /* .graph_compute = */ ggml_backend_vk_graph_compute,
  11490. /* .event_record = */ NULL,
  11491. /* .event_wait = */ NULL,
  11492. /* .graph_optimize = */ ggml_vk_graph_optimize,
  11493. };
  11494. static ggml_guid_t ggml_backend_vk_guid() {
  11495. static ggml_guid guid = { 0xb8, 0xf7, 0x4f, 0x86, 0x40, 0x3c, 0xe1, 0x02, 0x91, 0xc8, 0xdd, 0xe9, 0x02, 0x3f, 0xc0, 0x2b };
  11496. return &guid;
  11497. }
  11498. ggml_backend_t ggml_backend_vk_init(size_t dev_num) {
  11499. VK_LOG_DEBUG("ggml_backend_vk_init(" << dev_num << ")");
  11500. ggml_backend_vk_context * ctx = new ggml_backend_vk_context;
  11501. ggml_vk_init(ctx, dev_num);
  11502. ggml_backend_t vk_backend = new ggml_backend {
  11503. /* .guid = */ ggml_backend_vk_guid(),
  11504. /* .iface = */ ggml_backend_vk_interface,
  11505. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_vk_reg(), dev_num),
  11506. /* .context = */ ctx,
  11507. };
  11508. return vk_backend;
  11509. }
  11510. bool ggml_backend_is_vk(ggml_backend_t backend) {
  11511. return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_vk_guid());
  11512. }
  11513. int ggml_backend_vk_get_device_count() {
  11514. return ggml_vk_get_device_count();
  11515. }
  11516. void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size) {
  11517. GGML_ASSERT(device < (int) vk_instance.device_indices.size());
  11518. int dev_idx = vk_instance.device_indices[device];
  11519. ggml_vk_get_device_description(dev_idx, description, description_size);
  11520. }
  11521. void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total) {
  11522. GGML_ASSERT(device < (int) vk_instance.device_indices.size());
  11523. GGML_ASSERT(device < (int) vk_instance.device_supports_membudget.size());
  11524. vk::PhysicalDevice vkdev = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device]];
  11525. vk::PhysicalDeviceMemoryBudgetPropertiesEXT budgetprops;
  11526. vk::PhysicalDeviceMemoryProperties2 memprops = {};
  11527. bool membudget_supported = vk_instance.device_supports_membudget[device];
  11528. if (membudget_supported) {
  11529. memprops.pNext = &budgetprops;
  11530. }
  11531. vkdev.getMemoryProperties2(&memprops);
  11532. for (uint32_t i = 0; i < memprops.memoryProperties.memoryHeapCount; ++i) {
  11533. const vk::MemoryHeap & heap = memprops.memoryProperties.memoryHeaps[i];
  11534. if (heap.flags & vk::MemoryHeapFlagBits::eDeviceLocal) {
  11535. *total = heap.size;
  11536. if (membudget_supported && i < budgetprops.heapUsage.size()) {
  11537. *free = budgetprops.heapBudget[i] - budgetprops.heapUsage[i];
  11538. } else {
  11539. *free = heap.size;
  11540. }
  11541. break;
  11542. }
  11543. }
  11544. }
  11545. static vk::PhysicalDeviceType ggml_backend_vk_get_device_type(int device_idx) {
  11546. GGML_ASSERT(device_idx >= 0 && device_idx < (int) vk_instance.device_indices.size());
  11547. vk::PhysicalDevice device = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device_idx]];
  11548. vk::PhysicalDeviceProperties2 props = {};
  11549. device.getProperties2(&props);
  11550. return props.properties.deviceType;
  11551. }
  11552. static std::string ggml_backend_vk_get_device_pci_id(int device_idx) {
  11553. GGML_ASSERT(device_idx >= 0 && device_idx < (int) vk_instance.device_indices.size());
  11554. vk::PhysicalDevice device = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device_idx]];
  11555. const std::vector<vk::ExtensionProperties> ext_props = device.enumerateDeviceExtensionProperties();
  11556. bool ext_support = false;
  11557. for (const auto& properties : ext_props) {
  11558. if (strcmp("VK_EXT_pci_bus_info", properties.extensionName) == 0) {
  11559. ext_support = true;
  11560. break;
  11561. }
  11562. }
  11563. if (!ext_support) {
  11564. return "";
  11565. }
  11566. vk::PhysicalDeviceProperties2 props = {};
  11567. vk::PhysicalDevicePCIBusInfoPropertiesEXT pci_bus_info = {};
  11568. props.pNext = &pci_bus_info;
  11569. device.getProperties2(&props);
  11570. const uint32_t pci_domain = pci_bus_info.pciDomain;
  11571. const uint32_t pci_bus = pci_bus_info.pciBus;
  11572. const uint32_t pci_device = pci_bus_info.pciDevice;
  11573. const uint8_t pci_function = (uint8_t) pci_bus_info.pciFunction; // pci function is between 0 and 7, prevent printf overflow warning
  11574. char pci_bus_id[16] = {};
  11575. snprintf(pci_bus_id, sizeof(pci_bus_id), "%04x:%02x:%02x.%x", pci_domain, pci_bus, pci_device, pci_function);
  11576. return std::string(pci_bus_id);
  11577. }
  11578. //////////////////////////
  11579. struct ggml_backend_vk_device_context {
  11580. size_t device;
  11581. std::string name;
  11582. std::string description;
  11583. bool is_integrated_gpu;
  11584. std::string pci_bus_id;
  11585. };
  11586. static const char * ggml_backend_vk_device_get_name(ggml_backend_dev_t dev) {
  11587. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11588. return ctx->name.c_str();
  11589. }
  11590. static const char * ggml_backend_vk_device_get_description(ggml_backend_dev_t dev) {
  11591. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11592. return ctx->description.c_str();
  11593. }
  11594. static void ggml_backend_vk_device_get_memory(ggml_backend_dev_t device, size_t * free, size_t * total) {
  11595. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)device->context;
  11596. ggml_backend_vk_get_device_memory(ctx->device, free, total);
  11597. }
  11598. static ggml_backend_buffer_type_t ggml_backend_vk_device_get_buffer_type(ggml_backend_dev_t dev) {
  11599. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11600. return ggml_backend_vk_buffer_type(ctx->device);
  11601. }
  11602. static ggml_backend_buffer_type_t ggml_backend_vk_device_get_host_buffer_type(ggml_backend_dev_t dev) {
  11603. UNUSED(dev);
  11604. return ggml_backend_vk_host_buffer_type();
  11605. }
  11606. static enum ggml_backend_dev_type ggml_backend_vk_device_get_type(ggml_backend_dev_t dev) {
  11607. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11608. return ctx->is_integrated_gpu ? GGML_BACKEND_DEVICE_TYPE_IGPU : GGML_BACKEND_DEVICE_TYPE_GPU;
  11609. }
  11610. static void ggml_backend_vk_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
  11611. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11612. props->name = ggml_backend_vk_device_get_name(dev);
  11613. props->description = ggml_backend_vk_device_get_description(dev);
  11614. props->type = ggml_backend_vk_device_get_type(dev);
  11615. props->device_id = ctx->pci_bus_id.empty() ? nullptr : ctx->pci_bus_id.c_str();
  11616. ggml_backend_vk_device_get_memory(dev, &props->memory_free, &props->memory_total);
  11617. props->caps = {
  11618. /* .async = */ false,
  11619. /* .host_buffer = */ true,
  11620. /* .buffer_from_host_ptr = */ false,
  11621. /* .events = */ false,
  11622. };
  11623. }
  11624. static ggml_backend_t ggml_backend_vk_device_init(ggml_backend_dev_t dev, const char * params) {
  11625. UNUSED(params);
  11626. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11627. return ggml_backend_vk_init(ctx->device);
  11628. }
  11629. static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
  11630. switch (op->op) {
  11631. case GGML_OP_UNARY:
  11632. switch (ggml_get_unary_op(op)) {
  11633. case GGML_UNARY_OP_EXP:
  11634. case GGML_UNARY_OP_GELU:
  11635. case GGML_UNARY_OP_GELU_ERF:
  11636. case GGML_UNARY_OP_GELU_QUICK:
  11637. case GGML_UNARY_OP_SILU:
  11638. case GGML_UNARY_OP_RELU:
  11639. case GGML_UNARY_OP_TANH:
  11640. case GGML_UNARY_OP_SIGMOID:
  11641. case GGML_UNARY_OP_HARDSIGMOID:
  11642. case GGML_UNARY_OP_HARDSWISH:
  11643. return ggml_is_contiguous(op->src[0]) &&
  11644. (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) &&
  11645. (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16) &&
  11646. (op->src[0]->type == op->type);
  11647. default:
  11648. return false;
  11649. }
  11650. case GGML_OP_GLU:
  11651. switch (ggml_get_glu_op(op)) {
  11652. case GGML_GLU_OP_GEGLU:
  11653. case GGML_GLU_OP_REGLU:
  11654. case GGML_GLU_OP_SWIGLU:
  11655. case GGML_GLU_OP_SWIGLU_OAI:
  11656. case GGML_GLU_OP_GEGLU_ERF:
  11657. case GGML_GLU_OP_GEGLU_QUICK:
  11658. return ggml_is_contiguous(op->src[0]) &&
  11659. (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) &&
  11660. (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16) &&
  11661. (op->src[0]->type == op->type);
  11662. default:
  11663. return false;
  11664. }
  11665. case GGML_OP_MUL_MAT:
  11666. case GGML_OP_MUL_MAT_ID:
  11667. {
  11668. ggml_type src0_type = op->src[0]->type;
  11669. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11670. const vk_device& device = ggml_vk_get_device(ctx->device);
  11671. if (op->op == GGML_OP_MUL_MAT_ID) {
  11672. if (!device->mul_mat_id_s[src0_type] && !device->mul_mat_id_m[src0_type] && !device->mul_mat_id_l[src0_type]) {
  11673. // If there's not enough shared memory for row_ids and the result tile, fallback to CPU
  11674. return false;
  11675. }
  11676. }
  11677. switch (src0_type) {
  11678. case GGML_TYPE_F32:
  11679. case GGML_TYPE_F16:
  11680. case GGML_TYPE_BF16:
  11681. case GGML_TYPE_Q4_0:
  11682. case GGML_TYPE_Q4_1:
  11683. case GGML_TYPE_Q5_0:
  11684. case GGML_TYPE_Q5_1:
  11685. case GGML_TYPE_Q8_0:
  11686. case GGML_TYPE_Q2_K:
  11687. case GGML_TYPE_Q3_K:
  11688. case GGML_TYPE_Q4_K:
  11689. case GGML_TYPE_Q5_K:
  11690. case GGML_TYPE_Q6_K:
  11691. case GGML_TYPE_IQ1_S:
  11692. case GGML_TYPE_IQ1_M:
  11693. case GGML_TYPE_IQ2_XXS:
  11694. case GGML_TYPE_IQ2_XS:
  11695. case GGML_TYPE_IQ2_S:
  11696. case GGML_TYPE_IQ3_XXS:
  11697. case GGML_TYPE_IQ3_S:
  11698. case GGML_TYPE_IQ4_XS:
  11699. case GGML_TYPE_IQ4_NL:
  11700. case GGML_TYPE_MXFP4:
  11701. break;
  11702. default:
  11703. return false;
  11704. }
  11705. struct ggml_tensor * a;
  11706. struct ggml_tensor * b;
  11707. if (op->op == GGML_OP_MUL_MAT) {
  11708. a = op->src[0];
  11709. b = op->src[1];
  11710. } else {
  11711. a = op->src[2];
  11712. b = op->src[1];
  11713. }
  11714. if (a->ne[3] != b->ne[3]) {
  11715. return false;
  11716. }
  11717. if (!(ggml_vk_dim01_contiguous(op->src[0]) || op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_BF16) ||
  11718. !(ggml_vk_dim01_contiguous(op->src[1]) || op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == GGML_TYPE_F16)) {
  11719. return false;
  11720. }
  11721. if (op->src[0]->type == GGML_TYPE_BF16 && op->src[1]->type == GGML_TYPE_F16) {
  11722. // We currently don't have a bf16 x f16 shader, or an fp16->bf16 copy shader.
  11723. // So don't support this combination for now.
  11724. return false;
  11725. }
  11726. return true;
  11727. }
  11728. case GGML_OP_FLASH_ATTN_EXT:
  11729. {
  11730. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11731. auto device = ggml_vk_get_device(ctx->device);
  11732. bool coopmat2 = device->coopmat2;
  11733. uint32_t HSK = op->src[1]->ne[0];
  11734. uint32_t HSV = op->src[2]->ne[0];
  11735. if ((HSK % 8) != 0 || (HSV % 8) != 0) {
  11736. return false;
  11737. }
  11738. if (op->src[4] && op->src[4]->type != GGML_TYPE_F32) {
  11739. return false;
  11740. }
  11741. if (op->src[0]->type != GGML_TYPE_F32) {
  11742. return false;
  11743. }
  11744. if (op->type != GGML_TYPE_F32) {
  11745. return false;
  11746. }
  11747. if (op->src[3] && op->src[3]->type != GGML_TYPE_F16) {
  11748. return false;
  11749. }
  11750. // It's straightforward to support different K/V dequant, but would
  11751. // significantly increase the number of pipelines
  11752. if (op->src[1]->type != op->src[2]->type) {
  11753. return false;
  11754. }
  11755. switch (op->src[1]->type) {
  11756. case GGML_TYPE_F16:
  11757. case GGML_TYPE_F32:
  11758. case GGML_TYPE_Q4_0:
  11759. case GGML_TYPE_Q8_0:
  11760. // supported in scalar and coopmat2 paths
  11761. break;
  11762. case GGML_TYPE_Q4_1:
  11763. case GGML_TYPE_Q5_0:
  11764. case GGML_TYPE_Q5_1:
  11765. // K dequants currently disabled because D dimension is rounded up to 256 and runs inefficiently
  11766. //case GGML_TYPE_Q2_K:
  11767. //case GGML_TYPE_Q3_K:
  11768. //case GGML_TYPE_Q4_K:
  11769. //case GGML_TYPE_Q5_K:
  11770. //case GGML_TYPE_Q6_K:
  11771. //case GGML_TYPE_IQ1_S:
  11772. //case GGML_TYPE_IQ1_M:
  11773. //case GGML_TYPE_IQ2_XXS:
  11774. //case GGML_TYPE_IQ2_XS:
  11775. //case GGML_TYPE_IQ2_S:
  11776. //case GGML_TYPE_IQ3_XXS:
  11777. //case GGML_TYPE_IQ3_S:
  11778. //case GGML_TYPE_IQ4_XS:
  11779. case GGML_TYPE_IQ4_NL:
  11780. // currently supported only in coopmat2 path
  11781. if (!coopmat2) {
  11782. return false;
  11783. }
  11784. break;
  11785. default:
  11786. return false;
  11787. }
  11788. if (!coopmat2 && !device->subgroup_shuffle) {
  11789. // scalar FA uses subgroupShuffle
  11790. return false;
  11791. }
  11792. return true;
  11793. }
  11794. case GGML_OP_GET_ROWS:
  11795. {
  11796. switch (op->src[0]->type) {
  11797. case GGML_TYPE_F32:
  11798. case GGML_TYPE_F16:
  11799. case GGML_TYPE_BF16:
  11800. case GGML_TYPE_Q4_0:
  11801. case GGML_TYPE_Q4_1:
  11802. case GGML_TYPE_Q5_0:
  11803. case GGML_TYPE_Q5_1:
  11804. case GGML_TYPE_Q8_0:
  11805. case GGML_TYPE_Q2_K:
  11806. case GGML_TYPE_Q3_K:
  11807. case GGML_TYPE_Q4_K:
  11808. case GGML_TYPE_Q5_K:
  11809. case GGML_TYPE_Q6_K:
  11810. case GGML_TYPE_IQ1_S:
  11811. case GGML_TYPE_IQ1_M:
  11812. case GGML_TYPE_IQ2_XXS:
  11813. case GGML_TYPE_IQ2_XS:
  11814. case GGML_TYPE_IQ2_S:
  11815. case GGML_TYPE_IQ3_XXS:
  11816. case GGML_TYPE_IQ3_S:
  11817. case GGML_TYPE_IQ4_XS:
  11818. case GGML_TYPE_IQ4_NL:
  11819. case GGML_TYPE_MXFP4:
  11820. return true;
  11821. default:
  11822. return false;
  11823. }
  11824. }
  11825. case GGML_OP_SET_ROWS:
  11826. {
  11827. switch (op->type) {
  11828. case GGML_TYPE_F32:
  11829. case GGML_TYPE_F16:
  11830. case GGML_TYPE_BF16:
  11831. case GGML_TYPE_Q4_0:
  11832. case GGML_TYPE_Q4_1:
  11833. case GGML_TYPE_Q5_0:
  11834. case GGML_TYPE_Q5_1:
  11835. case GGML_TYPE_Q8_0:
  11836. case GGML_TYPE_IQ4_NL:
  11837. return true;
  11838. default:
  11839. return false;
  11840. }
  11841. }
  11842. case GGML_OP_CONT:
  11843. case GGML_OP_CPY:
  11844. case GGML_OP_DUP:
  11845. {
  11846. ggml_type src0_type = op->src[0]->type;
  11847. ggml_type src1_type = op->src[1] != nullptr ? op->src[1]->type : src0_type;
  11848. if (src0_type == GGML_TYPE_F32) {
  11849. switch (src1_type) {
  11850. case GGML_TYPE_F32:
  11851. case GGML_TYPE_F16:
  11852. case GGML_TYPE_BF16:
  11853. case GGML_TYPE_Q4_0:
  11854. case GGML_TYPE_Q4_1:
  11855. case GGML_TYPE_Q5_0:
  11856. case GGML_TYPE_Q5_1:
  11857. case GGML_TYPE_Q8_0:
  11858. case GGML_TYPE_IQ4_NL:
  11859. return true;
  11860. default:
  11861. break;
  11862. }
  11863. }
  11864. if (src1_type == GGML_TYPE_F32) {
  11865. switch (src0_type) {
  11866. case GGML_TYPE_F16:
  11867. case GGML_TYPE_Q4_0:
  11868. case GGML_TYPE_Q4_1:
  11869. case GGML_TYPE_Q5_0:
  11870. case GGML_TYPE_Q5_1:
  11871. case GGML_TYPE_Q8_0:
  11872. case GGML_TYPE_IQ4_NL:
  11873. return true;
  11874. default:
  11875. break;
  11876. }
  11877. }
  11878. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  11879. return true;
  11880. }
  11881. if (
  11882. (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_I32) ||
  11883. (src0_type == GGML_TYPE_I32 && src1_type == GGML_TYPE_F32)
  11884. ) {
  11885. return true;
  11886. }
  11887. // We can handle copying from a type to the same type if it's
  11888. // contiguous (memcpy). We use f16 or f32 shaders to do the copy,
  11889. // so the type/block size must be a multiple of 4.
  11890. if (src0_type == src1_type &&
  11891. ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op) &&
  11892. (ggml_type_size(src0_type) % 2) == 0) {
  11893. return true;
  11894. }
  11895. return false;
  11896. }
  11897. case GGML_OP_REPEAT:
  11898. return ggml_type_size(op->type) == sizeof(float) && ggml_type_size(op->src[0]->type) == sizeof(float);
  11899. case GGML_OP_REPEAT_BACK:
  11900. return op->type == GGML_TYPE_F32 && op->src[0]->type == GGML_TYPE_F32;
  11901. case GGML_OP_ROPE:
  11902. case GGML_OP_ROPE_BACK:
  11903. case GGML_OP_NONE:
  11904. case GGML_OP_RESHAPE:
  11905. case GGML_OP_VIEW:
  11906. case GGML_OP_PERMUTE:
  11907. case GGML_OP_TRANSPOSE:
  11908. case GGML_OP_RMS_NORM:
  11909. return true;
  11910. case GGML_OP_NORM:
  11911. case GGML_OP_GROUP_NORM:
  11912. case GGML_OP_L2_NORM:
  11913. return ggml_is_contiguous(op->src[0]);
  11914. case GGML_OP_ADD:
  11915. case GGML_OP_SUB:
  11916. case GGML_OP_MUL:
  11917. case GGML_OP_DIV:
  11918. return (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) &&
  11919. (op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == GGML_TYPE_F16) &&
  11920. (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16);
  11921. case GGML_OP_ADD_ID:
  11922. return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32 && op->src[2]->type == GGML_TYPE_I32 &&
  11923. op->type == GGML_TYPE_F32;
  11924. case GGML_OP_SILU_BACK:
  11925. case GGML_OP_RMS_NORM_BACK:
  11926. case GGML_OP_SQR:
  11927. case GGML_OP_SQRT:
  11928. case GGML_OP_SIN:
  11929. case GGML_OP_COS:
  11930. case GGML_OP_CLAMP:
  11931. case GGML_OP_LEAKY_RELU:
  11932. case GGML_OP_OPT_STEP_ADAMW:
  11933. case GGML_OP_OPT_STEP_SGD:
  11934. return op->src[0]->type == GGML_TYPE_F32;
  11935. case GGML_OP_ARGSORT:
  11936. return op->ne[0] <= max_argsort_cols;
  11937. case GGML_OP_UPSCALE:
  11938. case GGML_OP_ACC:
  11939. case GGML_OP_CONCAT:
  11940. case GGML_OP_SCALE:
  11941. case GGML_OP_PAD:
  11942. case GGML_OP_ROLL:
  11943. case GGML_OP_DIAG_MASK_INF:
  11944. case GGML_OP_SOFT_MAX:
  11945. case GGML_OP_SOFT_MAX_BACK:
  11946. return true;
  11947. case GGML_OP_SUM:
  11948. case GGML_OP_SUM_ROWS:
  11949. case GGML_OP_MEAN:
  11950. return op->src[0]->type == GGML_TYPE_F32 && ggml_is_contiguous_rows(op->src[0]);
  11951. case GGML_OP_ARGMAX:
  11952. case GGML_OP_COUNT_EQUAL:
  11953. case GGML_OP_IM2COL:
  11954. case GGML_OP_IM2COL_3D:
  11955. case GGML_OP_TIMESTEP_EMBEDDING:
  11956. case GGML_OP_CONV_2D_DW:
  11957. case GGML_OP_POOL_2D:
  11958. case GGML_OP_RWKV_WKV6:
  11959. case GGML_OP_RWKV_WKV7:
  11960. return true;
  11961. case GGML_OP_SSM_SCAN:
  11962. {
  11963. for (int i = 0; i < 6; i++) {
  11964. if (op->src[i] && ggml_is_quantized(op->src[i]->type)) {
  11965. return false;
  11966. }
  11967. }
  11968. if (op->src[6] && op->src[6]->type != GGML_TYPE_I32) {
  11969. return false;
  11970. }
  11971. if (op->src[0]->type != GGML_TYPE_F32 || op->type != GGML_TYPE_F32) {
  11972. return false;
  11973. }
  11974. const uint32_t d_state = op->src[0]->ne[0];
  11975. const uint32_t head_dim = op->src[0]->ne[1];
  11976. bool is_mamba2 = (op->src[3] && op->src[3]->nb[1] == sizeof(float));
  11977. if (!is_mamba2) {
  11978. return false;
  11979. }
  11980. if ((d_state != 128 && d_state != 256) || head_dim % 16 != 0) {
  11981. return false;
  11982. }
  11983. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11984. const vk_device& device = ggml_vk_get_device(ctx->device);
  11985. const uint32_t SPLIT_H = 16;
  11986. size_t stateC_size = SPLIT_H * d_state * sizeof(float);
  11987. if (stateC_size > device->properties.limits.maxComputeSharedMemorySize) {
  11988. return false;
  11989. }
  11990. return true;
  11991. }
  11992. case GGML_OP_SSM_CONV:
  11993. return true;
  11994. case GGML_OP_CONV_TRANSPOSE_1D:
  11995. return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32;
  11996. case GGML_OP_CONV_2D:
  11997. case GGML_OP_CONV_TRANSPOSE_2D:
  11998. {
  11999. // Op is disabled for Apple because it segfaults at pipeline create time on MoltenVK
  12000. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  12001. const vk_device& device = ggml_vk_get_device(ctx->device);
  12002. if (op->op == GGML_OP_CONV_TRANSPOSE_2D &&
  12003. device->properties.limits.maxPushConstantsSize < sizeof(vk_op_conv_transpose_2d_push_constants)) {
  12004. return false;
  12005. }
  12006. // Channel-contiguous format is not supported yet.
  12007. return ((op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) &&
  12008. op->src[1]->type == GGML_TYPE_F32 &&
  12009. op->type == GGML_TYPE_F32 &&
  12010. ggml_is_contiguous(op->src[0]) &&
  12011. ggml_is_contiguous(op->src[1]) &&
  12012. ggml_is_contiguous(op));
  12013. }
  12014. default:
  12015. return false;
  12016. }
  12017. UNUSED(dev);
  12018. }
  12019. static bool ggml_backend_vk_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
  12020. if (buft->iface.get_name != ggml_backend_vk_buffer_type_name) {
  12021. return false;
  12022. }
  12023. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  12024. ggml_backend_vk_buffer_type_context * buft_ctx = (ggml_backend_vk_buffer_type_context *)buft->context;
  12025. return buft_ctx->device->idx == ctx->device;
  12026. }
  12027. static bool ggml_backend_vk_device_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
  12028. const int min_batch_size = 32;
  12029. return (op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS) ||
  12030. (op->ne[2] >= min_batch_size && op->op == GGML_OP_MUL_MAT_ID);
  12031. UNUSED(dev);
  12032. }
  12033. static const struct ggml_backend_device_i ggml_backend_vk_device_i = {
  12034. /* .get_name = */ ggml_backend_vk_device_get_name,
  12035. /* .get_description = */ ggml_backend_vk_device_get_description,
  12036. /* .get_memory = */ ggml_backend_vk_device_get_memory,
  12037. /* .get_type = */ ggml_backend_vk_device_get_type,
  12038. /* .get_props = */ ggml_backend_vk_device_get_props,
  12039. /* .init_backend = */ ggml_backend_vk_device_init,
  12040. /* .get_buffer_type = */ ggml_backend_vk_device_get_buffer_type,
  12041. /* .get_host_buffer_type = */ ggml_backend_vk_device_get_host_buffer_type,
  12042. /* .buffer_from_host_ptr = */ NULL,
  12043. /* .supports_op = */ ggml_backend_vk_device_supports_op,
  12044. /* .supports_buft = */ ggml_backend_vk_device_supports_buft,
  12045. /* .offload_op = */ ggml_backend_vk_device_offload_op,
  12046. /* .event_new = */ NULL,
  12047. /* .event_free = */ NULL,
  12048. /* .event_synchronize = */ NULL,
  12049. };
  12050. static const char * ggml_backend_vk_reg_get_name(ggml_backend_reg_t reg) {
  12051. UNUSED(reg);
  12052. return GGML_VK_NAME;
  12053. }
  12054. static size_t ggml_backend_vk_reg_get_device_count(ggml_backend_reg_t reg) {
  12055. UNUSED(reg);
  12056. return ggml_backend_vk_get_device_count();
  12057. }
  12058. static ggml_backend_dev_t ggml_backend_vk_reg_get_device(ggml_backend_reg_t reg, size_t device) {
  12059. static std::vector<ggml_backend_dev_t> devices;
  12060. static bool initialized = false;
  12061. {
  12062. static std::mutex mutex;
  12063. std::lock_guard<std::mutex> lock(mutex);
  12064. if (!initialized) {
  12065. for (int i = 0; i < ggml_backend_vk_get_device_count(); i++) {
  12066. ggml_backend_vk_device_context * ctx = new ggml_backend_vk_device_context;
  12067. char desc[256];
  12068. ggml_backend_vk_get_device_description(i, desc, sizeof(desc));
  12069. ctx->device = i;
  12070. ctx->name = GGML_VK_NAME + std::to_string(i);
  12071. ctx->description = desc;
  12072. ctx->is_integrated_gpu = ggml_backend_vk_get_device_type(i) == vk::PhysicalDeviceType::eIntegratedGpu;
  12073. ctx->pci_bus_id = ggml_backend_vk_get_device_pci_id(i);
  12074. devices.push_back(new ggml_backend_device {
  12075. /* .iface = */ ggml_backend_vk_device_i,
  12076. /* .reg = */ reg,
  12077. /* .context = */ ctx,
  12078. });
  12079. }
  12080. initialized = true;
  12081. }
  12082. }
  12083. GGML_ASSERT(device < devices.size());
  12084. return devices[device];
  12085. }
  12086. static const struct ggml_backend_reg_i ggml_backend_vk_reg_i = {
  12087. /* .get_name = */ ggml_backend_vk_reg_get_name,
  12088. /* .get_device_count = */ ggml_backend_vk_reg_get_device_count,
  12089. /* .get_device = */ ggml_backend_vk_reg_get_device,
  12090. /* .get_proc_address = */ NULL,
  12091. };
  12092. ggml_backend_reg_t ggml_backend_vk_reg() {
  12093. static ggml_backend_reg reg = {
  12094. /* .api_version = */ GGML_BACKEND_API_VERSION,
  12095. /* .iface = */ ggml_backend_vk_reg_i,
  12096. /* .context = */ nullptr,
  12097. };
  12098. try {
  12099. ggml_vk_instance_init();
  12100. return &reg;
  12101. } catch (const vk::SystemError& e) {
  12102. VK_LOG_DEBUG("ggml_backend_vk_reg() -> Error: System error: " << e.what());
  12103. return nullptr;
  12104. } catch (const std::exception &e) {
  12105. VK_LOG_DEBUG("ggml_backend_vk_reg() -> Error: " << e.what());
  12106. return nullptr;
  12107. } catch (...) {
  12108. VK_LOG_DEBUG("ggml_backend_vk_reg() -> Error: unknown exception during Vulkan init");
  12109. return nullptr;
  12110. }
  12111. }
  12112. // Extension availability
  12113. static bool ggml_vk_instance_validation_ext_available() {
  12114. #ifdef GGML_VULKAN_VALIDATE
  12115. // Check if validation layer provides the extension
  12116. const std::string layer_name = "VK_LAYER_KHRONOS_validation";
  12117. for (const auto& layer : vk::enumerateInstanceLayerProperties()) {
  12118. if (layer_name == layer.layerName.data()) {
  12119. for (const auto& ext : vk::enumerateInstanceExtensionProperties(layer_name)) {
  12120. if (strcmp("VK_EXT_validation_features", ext.extensionName.data()) == 0) {
  12121. return true;
  12122. }
  12123. }
  12124. }
  12125. }
  12126. std::cerr << "ggml_vulkan: WARNING: Validation layer or layer extension VK_EXT_validation_features not found." << std::endl;
  12127. #endif
  12128. return false;
  12129. }
  12130. static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions) {
  12131. #ifdef __APPLE__
  12132. // Check for portability enumeration extension for MoltenVK support
  12133. for (const auto& properties : instance_extensions) {
  12134. if (strcmp("VK_KHR_portability_enumeration", properties.extensionName) == 0) {
  12135. return true;
  12136. }
  12137. }
  12138. std::cerr << "ggml_vulkan: WARNING: Instance extension VK_KHR_portability_enumeration not found." << std::endl;
  12139. #endif
  12140. return false;
  12141. UNUSED(instance_extensions);
  12142. }
  12143. // Extension availability
  12144. static bool ggml_vk_instance_debug_utils_ext_available(
  12145. const std::vector<vk::ExtensionProperties> & instance_extensions) {
  12146. // Check for portability enumeration extension for MoltenVK support
  12147. for (const auto & properties : instance_extensions) {
  12148. if (strcmp("VK_EXT_debug_utils", properties.extensionName) == 0) {
  12149. return true;
  12150. }
  12151. }
  12152. std::cerr << "ggml_vulkan: WARNING: Instance extension VK_EXT_debug_utils not found." << std::endl;
  12153. return false;
  12154. UNUSED(instance_extensions);
  12155. }
  12156. static bool ggml_vk_device_is_supported(const vk::PhysicalDevice & vkdev) {
  12157. VkPhysicalDeviceFeatures2 device_features2;
  12158. device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
  12159. VkPhysicalDeviceVulkan11Features vk11_features;
  12160. vk11_features.pNext = nullptr;
  12161. vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
  12162. device_features2.pNext = &vk11_features;
  12163. vkGetPhysicalDeviceFeatures2(vkdev, &device_features2);
  12164. return vk11_features.storageBuffer16BitAccess;
  12165. }
  12166. static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props, vk_device_architecture arch) {
  12167. switch (props.vendorID) {
  12168. case VK_VENDOR_ID_INTEL:
  12169. // Only allowing Xe2 GPU at the moment since Xe2 GPU can gain significant performance boost,
  12170. // while some older hardware (ex. Arc A770) has performance regressions
  12171. return arch == vk_device_architecture::INTEL_XE2;
  12172. case VK_VENDOR_ID_AMD:
  12173. if (driver_props.driverID == vk::DriverId::eAmdProprietary || driver_props.driverID == vk::DriverId::eAmdOpenSource) {
  12174. // Workaround for AMD proprietary driver reporting support on all GPUs
  12175. return arch == vk_device_architecture::AMD_RDNA3;
  12176. }
  12177. return true;
  12178. default:
  12179. return true;
  12180. }
  12181. }
  12182. // checks
  12183. #ifdef GGML_VULKAN_CHECK_RESULTS
  12184. static void ggml_vk_print_graph_origin(const ggml_tensor * tensor, std::vector<const ggml_tensor *>& done, int level = 0) {
  12185. if (std::find(done.begin(), done.end(), tensor) != done.end() || level > 10) {
  12186. return;
  12187. }
  12188. for (int j = 0; j < level; j++) {
  12189. std::cerr << " ";
  12190. }
  12191. std::cerr << ggml_op_name(tensor->op) << " gpu=" << (tensor->extra != nullptr) << std::endl;
  12192. done.push_back(tensor);
  12193. for (int i = 0; i < GGML_MAX_SRC; i++) {
  12194. if (tensor->src[i] != nullptr) {
  12195. ggml_vk_print_graph_origin(tensor->src[i], done, level + 1);
  12196. }
  12197. }
  12198. }
  12199. static void ggml_vk_print_tensor_area(const ggml_tensor * tensor, const void * data, int i0, int i1, int i2, int i3) {
  12200. if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16 && tensor->type != GGML_TYPE_I32) {
  12201. return;
  12202. }
  12203. i0 = std::max(i0, 5);
  12204. i1 = std::max(i1, 5);
  12205. i2 = std::max(i2, 0);
  12206. i3 = std::max(i3, 0);
  12207. fprintf(stderr, " ");
  12208. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  12209. fprintf(stderr, "%7d ", idx1);
  12210. }
  12211. fprintf(stderr, "\n");
  12212. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  12213. fprintf(stderr, "%7d: ", idx0);
  12214. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  12215. if (idx0 >= 0 && idx0 < tensor->ne[0] && idx1 >= 0 && idx1 < tensor->ne[1] && i2 >= 0 && i2 < tensor->ne[2] && i3 >= 0 && i3 < tensor->ne[3]) {
  12216. float val;
  12217. if (tensor->type == GGML_TYPE_F32) {
  12218. val = *(const float *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  12219. } else if (tensor->type == GGML_TYPE_F16) {
  12220. val = ggml_fp16_to_fp32(*(const ggml_fp16_t *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]));
  12221. } else if (tensor->type == GGML_TYPE_I32) {
  12222. val = *(const int32_t *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  12223. } else {
  12224. GGML_ABORT("fatal error");
  12225. }
  12226. fprintf(stderr, "% 7.2f ", val);
  12227. } else {
  12228. fprintf(stderr, " ");
  12229. }
  12230. }
  12231. fprintf(stderr, "\n");
  12232. }
  12233. }
  12234. static void ggml_vk_print_tensor(const ggml_tensor * tensor, const char * name) {
  12235. void * tensor_data = tensor->data;
  12236. const bool is_gpu = tensor->buffer != nullptr && ggml_backend_buffer_is_vk(tensor->buffer);
  12237. if (is_gpu) {
  12238. const size_t tensor_size = ggml_nbytes(tensor);
  12239. tensor_data = malloc(tensor_size);
  12240. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
  12241. vk_buffer buffer_gpu = buf_ctx->dev_buffer;
  12242. ggml_vk_buffer_read(buffer_gpu, vk_tensor_offset(tensor) + tensor->view_offs, tensor_data, tensor_size);
  12243. }
  12244. std::cerr << "TENSOR CHECK " << name << " (" << tensor->name << "): " << ggml_op_name(tensor->op) << std::endl;
  12245. std::cerr << "tensor=" << tensor << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << std::endl;
  12246. if (tensor->src[0] != nullptr) {
  12247. std::cerr << "tensor->src[0]=" << tensor->src[0] << " name=" << tensor->src[0]->name << " op=" << ggml_op_name(tensor->src[0]->op) << " type=" << ggml_type_name(tensor->src[0]->type) << " ne0=" << tensor->src[0]->ne[0] << " nb0=" << tensor->src[0]->nb[0] << " ne1=" << tensor->src[0]->ne[1] << " nb1=" << tensor->src[0]->nb[1] << " ne2=" << tensor->src[0]->ne[2] << " nb2=" << tensor->src[0]->nb[2] << " ne3=" << tensor->src[0]->ne[3] << " nb3=" << tensor->src[0]->nb[3] << std::endl;
  12248. }
  12249. if (tensor->src[1] != nullptr) {
  12250. std::cerr << "tensor->src[1]=" << tensor->src[1] << " name=" << tensor->src[1]->name << " op=" << ggml_op_name(tensor->src[1]->op) << " type=" << ggml_type_name(tensor->src[1]->type) << " ne0=" << tensor->src[1]->ne[0] << " nb0=" << tensor->src[1]->nb[0] << " ne1=" << tensor->src[1]->ne[1] << " nb1=" << tensor->src[1]->nb[1] << " ne2=" << tensor->src[1]->ne[2] << " nb2=" << tensor->src[1]->nb[2] << " ne3=" << tensor->src[1]->ne[3] << " nb3=" << tensor->src[1]->nb[3] << std::endl;
  12251. }
  12252. std::cerr << std::endl << "Result:" << std::endl;
  12253. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 0, 0);
  12254. std::cerr << std::endl;
  12255. std::vector<const ggml_tensor *> done;
  12256. ggml_vk_print_graph_origin(tensor, done);
  12257. if (is_gpu) {
  12258. free(tensor_data);
  12259. }
  12260. }
  12261. void * comp_result;
  12262. size_t comp_size;
  12263. size_t comp_nb[GGML_MAX_DIMS];
  12264. size_t check_counter = 0;
  12265. static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, int tensor_idx) {
  12266. ggml_tensor * tensor = cgraph->nodes[tensor_idx];
  12267. if (tensor->op == GGML_OP_TRANSPOSE || tensor->op == GGML_OP_SET_ROWS) {
  12268. return;
  12269. }
  12270. bool fused_rms_norm_mul = false;
  12271. int rms_norm_idx = -1;
  12272. if (ctx->num_additional_fused_ops == 1 &&
  12273. tensor->op == GGML_OP_RMS_NORM &&
  12274. cgraph->nodes[tensor_idx + 1]->op == GGML_OP_MUL) {
  12275. fused_rms_norm_mul = true;
  12276. tensor = cgraph->nodes[tensor_idx + 1];
  12277. }
  12278. check_counter++;
  12279. if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {
  12280. return;
  12281. }
  12282. VK_LOG_DEBUG("ggml_vk_check_results_0(" << tensor->name << ")");
  12283. ggml_tensor * src0 = tensor->src[0];
  12284. ggml_tensor * src1 = tensor->src[1];
  12285. struct ggml_init_params iparams = {
  12286. /*.mem_size =*/ 2ul*1024ul*1024ul*1024ul,
  12287. /*.mem_buffer =*/ NULL,
  12288. /*.no_alloc =*/ false,
  12289. };
  12290. struct ggml_context * ggml_ctx = ggml_init(iparams);
  12291. std::array<struct ggml_tensor *, GGML_MAX_SRC> src_clone = {nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr};
  12292. std::array<size_t, GGML_MAX_SRC> src_size = {};
  12293. std::array<void *, GGML_MAX_SRC> src_buffer = {};
  12294. const char * srci_name[GGML_MAX_SRC] = {"src0", "src1", "src2", "src3", "src4", "src5", "src6", "src7", "src8", "src9"};
  12295. struct ggml_tensor * tensor_clone = nullptr;
  12296. for (int i = 0; i < GGML_MAX_SRC; i++) {
  12297. ggml_tensor * srci = tensor->src[i];
  12298. if (fused_rms_norm_mul) {
  12299. rms_norm_idx = tensor->src[0]->op == GGML_OP_RMS_NORM ? 0 : 1;
  12300. ggml_tensor *rms_norm = tensor->src[rms_norm_idx];
  12301. switch (i) {
  12302. case 0: srci = rms_norm->src[0]; break;
  12303. case 1: srci = tensor->src[1 - rms_norm_idx]; break;
  12304. default: continue;
  12305. }
  12306. }
  12307. if (srci == nullptr) {
  12308. continue;
  12309. }
  12310. ggml_tensor * srci_clone = ggml_dup_tensor(ggml_ctx, srci);
  12311. size_t srci_size = ggml_nbytes(srci);
  12312. src_clone[i] = srci_clone;
  12313. src_size[i] = ggml_nbytes(srci);
  12314. src_buffer[i] = malloc(srci_size);
  12315. srci_clone->data = src_buffer[i];
  12316. if (ggml_backend_buffer_is_host(srci->buffer)) {
  12317. memcpy(srci_clone->data, srci->data, srci_size);
  12318. memcpy(srci_clone->nb, srci->nb, sizeof(size_t) * GGML_MAX_DIMS);
  12319. } else if (ggml_backend_buffer_is_vk(srci->buffer)) {
  12320. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)srci->buffer->context;
  12321. vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
  12322. uint64_t offset = vk_tensor_offset(srci) + srci->view_offs;
  12323. if (!ggml_is_contiguous(srci) && ggml_vk_dim01_contiguous(srci)) {
  12324. for (int i3 = 0; i3 < srci->ne[3]; i3++) {
  12325. for (int i2 = 0; i2 < srci->ne[2]; i2++) {
  12326. const int idx = i3*srci->ne[2] + i2;
  12327. ggml_vk_buffer_read(buffer_gpu, offset + idx * srci->nb[2], ((char *)srci_clone->data + idx * srci_clone->nb[2]), srci->ne[1] * srci->nb[1]);
  12328. }
  12329. }
  12330. srci_clone->nb[0] = srci->nb[0];
  12331. srci_clone->nb[1] = srci->nb[1];
  12332. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  12333. srci_clone->nb[i] = srci_clone->nb[i - 1]*srci_clone->ne[i - 1];
  12334. }
  12335. } else {
  12336. if (offset + srci_size >= buffer_gpu->size) {
  12337. srci_size = buffer_gpu->size - offset;
  12338. }
  12339. ggml_vk_buffer_read(buffer_gpu, offset, srci_clone->data, srci_size);
  12340. memcpy(srci_clone->nb, srci->nb, sizeof(size_t) * GGML_MAX_DIMS);
  12341. }
  12342. } else {
  12343. GGML_ABORT("fatal error");
  12344. }
  12345. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  12346. ggml_vk_print_tensor(srci, srci_name[i]);
  12347. }
  12348. }
  12349. if (tensor->op == GGML_OP_FLASH_ATTN_EXT) {
  12350. const float * params = (const float *)tensor->op_params;
  12351. tensor_clone = ggml_flash_attn_ext(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], src_clone[3], params[0], params[1], params[2]);
  12352. if (src_clone[4]) {
  12353. ggml_flash_attn_ext_add_sinks(tensor_clone, src_clone[4]);
  12354. }
  12355. } else if (tensor->op == GGML_OP_MUL_MAT) {
  12356. tensor_clone = ggml_mul_mat(ggml_ctx, src_clone[0], src_clone[1]);
  12357. } else if (tensor->op == GGML_OP_MUL_MAT_ID) {
  12358. tensor_clone = ggml_mul_mat_id(ggml_ctx, src_clone[0], src_clone[1], src_clone[2]);
  12359. } else if (tensor->op == GGML_OP_SUB) {
  12360. tensor_clone = ggml_sub(ggml_ctx, src_clone[0], src_clone[1]);
  12361. } else if (tensor->op == GGML_OP_MUL) {
  12362. if (fused_rms_norm_mul) {
  12363. tensor_clone = ggml_rms_norm(ggml_ctx, src_clone[0], *(float *)tensor->src[rms_norm_idx]->op_params);
  12364. tensor_clone = ggml_mul(ggml_ctx, tensor_clone, src_clone[1 - rms_norm_idx]);
  12365. } else {
  12366. tensor_clone = ggml_mul(ggml_ctx, src_clone[0], src_clone[1]);
  12367. }
  12368. } else if (tensor->op == GGML_OP_DIV) {
  12369. tensor_clone = ggml_div(ggml_ctx, src_clone[0], src_clone[1]);
  12370. } else if (tensor->op == GGML_OP_CONCAT) {
  12371. tensor_clone = ggml_concat(ggml_ctx, src_clone[0], src_clone[1], *(int *)tensor->op_params);
  12372. } else if (tensor->op == GGML_OP_UPSCALE) {
  12373. tensor_clone = ggml_interpolate(ggml_ctx, src_clone[0], tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], (ggml_scale_mode) tensor->op_params[0]);
  12374. } else if (tensor->op == GGML_OP_SCALE) {
  12375. const float * params = (const float *)tensor->op_params;
  12376. tensor_clone = ggml_scale_bias(ggml_ctx, src_clone[0], params[0], params[1]);
  12377. } else if (tensor->op == GGML_OP_SQR) {
  12378. tensor_clone = ggml_sqr(ggml_ctx, src_clone[0]);
  12379. } else if (tensor->op == GGML_OP_SQRT) {
  12380. tensor_clone = ggml_sqrt(ggml_ctx, src_clone[0]);
  12381. } else if (tensor->op == GGML_OP_SIN) {
  12382. tensor_clone = ggml_sin(ggml_ctx, src_clone[0]);
  12383. } else if (tensor->op == GGML_OP_COS) {
  12384. tensor_clone = ggml_cos(ggml_ctx, src_clone[0]);
  12385. } else if (tensor->op == GGML_OP_CLAMP) {
  12386. const float * params = (const float *)tensor->op_params;
  12387. tensor_clone = ggml_clamp(ggml_ctx, src_clone[0], params[0], params[1]);
  12388. } else if (tensor->op == GGML_OP_PAD) {
  12389. tensor_clone = ggml_pad_ext(ggml_ctx, src_clone[0], tensor->op_params[0], tensor->op_params[1], tensor->op_params[2], tensor->op_params[3],
  12390. tensor->op_params[4], tensor->op_params[5], tensor->op_params[6], tensor->op_params[7]);
  12391. } else if (tensor->op == GGML_OP_REPEAT) {
  12392. tensor_clone = ggml_repeat(ggml_ctx, src_clone[0], tensor);
  12393. } else if (tensor->op == GGML_OP_REPEAT_BACK) {
  12394. tensor_clone = ggml_repeat_back(ggml_ctx, src_clone[0], tensor);
  12395. } else if (tensor->op == GGML_OP_ADD) {
  12396. tensor_clone = ggml_add(ggml_ctx, src_clone[0], src_clone[1]);
  12397. } else if (tensor->op == GGML_OP_ACC) {
  12398. tensor_clone = ggml_acc(ggml_ctx, src_clone[0], src_clone[1], tensor->op_params[0], tensor->op_params[1], tensor->op_params[2], tensor->op_params[3]);
  12399. } else if (tensor->op == GGML_OP_NORM) {
  12400. tensor_clone = ggml_norm(ggml_ctx, src_clone[0], *(float *)tensor->op_params);
  12401. } else if (tensor->op == GGML_OP_GROUP_NORM) {
  12402. const float * float_params = (const float *)tensor->op_params;
  12403. tensor_clone = ggml_group_norm(ggml_ctx, src_clone[0], tensor->op_params[0], float_params[1]);
  12404. } else if (tensor->op == GGML_OP_RMS_NORM) {
  12405. tensor_clone = ggml_rms_norm(ggml_ctx, src_clone[0], *(float *)tensor->op_params);
  12406. } else if (tensor->op == GGML_OP_RMS_NORM_BACK) {
  12407. const float eps = ((float *) tensor->op_params)[0];
  12408. tensor_clone = ggml_rms_norm_back(ggml_ctx, src_clone[0], src_clone[1], eps);
  12409. } else if (tensor->op == GGML_OP_SILU_BACK) {
  12410. tensor_clone = ggml_silu_back(ggml_ctx, src_clone[0], src_clone[1]);
  12411. } else if (tensor->op == GGML_OP_L2_NORM) {
  12412. const float eps = ((float *) tensor->op_params)[0];
  12413. tensor_clone = ggml_l2_norm(ggml_ctx, src_clone[0], eps);
  12414. } else if (tensor->op == GGML_OP_SOFT_MAX) {
  12415. if (src1 != nullptr) {
  12416. const float * params = (const float *)tensor->op_params;
  12417. tensor_clone = ggml_soft_max_ext(ggml_ctx, src_clone[0], src_clone[1], params[0], params[1]);
  12418. } else {
  12419. tensor_clone = ggml_soft_max(ggml_ctx, src_clone[0]);
  12420. }
  12421. } else if (tensor->op == GGML_OP_SOFT_MAX_BACK) {
  12422. tensor_clone = ggml_soft_max_ext_back(ggml_ctx, src_clone[0], src_clone[1], ((float *)tensor->op_params)[0], ((float *)tensor->op_params)[1]);
  12423. } else if (tensor->op == GGML_OP_DIAG_MASK_INF) {
  12424. tensor_clone = ggml_diag_mask_inf(ggml_ctx, src_clone[0], tensor->op_params[0]);
  12425. } else if (tensor->op == GGML_OP_ROPE || tensor->op == GGML_OP_ROPE_BACK) {
  12426. const int n_dims = ((int32_t *) tensor->op_params)[1];
  12427. const int mode = ((int32_t *) tensor->op_params)[2];
  12428. //const int n_ctx_ggml = ((int32_t *) tensor->op_params)[3];
  12429. const int n_ctx_orig_ggml = ((int32_t *) tensor->op_params)[4];
  12430. const float freq_base = ((float *) tensor->op_params)[5];
  12431. const float freq_scale = ((float *) tensor->op_params)[6];
  12432. const float ext_factor = ((float *) tensor->op_params)[7];
  12433. const float attn_factor = ((float *) tensor->op_params)[8];
  12434. const float beta_fast = ((float *) tensor->op_params)[9];
  12435. const float beta_slow = ((float *) tensor->op_params)[10];
  12436. if (mode & GGML_ROPE_TYPE_MROPE) {
  12437. int32_t *sections = ((int32_t *) tensor->op_params) + 11;
  12438. if (tensor->op == GGML_OP_ROPE) {
  12439. tensor_clone = ggml_rope_multi(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], n_dims, sections, mode, n_ctx_orig_ggml, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
  12440. } else {
  12441. tensor_clone = ggml_rope_multi_back(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], n_dims, sections, mode, n_ctx_orig_ggml, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
  12442. }
  12443. } else {
  12444. if (tensor->op == GGML_OP_ROPE) {
  12445. tensor_clone = ggml_rope_ext(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], n_dims, mode, n_ctx_orig_ggml, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
  12446. } else {
  12447. tensor_clone = ggml_rope_ext_back(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], n_dims, mode, n_ctx_orig_ggml, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
  12448. }
  12449. }
  12450. } else if (tensor->op == GGML_OP_UNARY) {
  12451. switch (ggml_get_unary_op(tensor)) {
  12452. case GGML_UNARY_OP_EXP:
  12453. tensor_clone = ggml_exp(ggml_ctx, src_clone[0]);
  12454. break;
  12455. case GGML_UNARY_OP_SILU:
  12456. tensor_clone = ggml_silu(ggml_ctx, src_clone[0]);
  12457. break;
  12458. case GGML_UNARY_OP_GELU:
  12459. tensor_clone = ggml_gelu(ggml_ctx, src_clone[0]);
  12460. break;
  12461. case GGML_UNARY_OP_GELU_ERF:
  12462. tensor_clone = ggml_gelu_erf(ggml_ctx, src_clone[0]);
  12463. break;
  12464. case GGML_UNARY_OP_GELU_QUICK:
  12465. tensor_clone = ggml_gelu_quick(ggml_ctx, src_clone[0]);
  12466. break;
  12467. case GGML_UNARY_OP_RELU:
  12468. tensor_clone = ggml_relu(ggml_ctx, src_clone[0]);
  12469. break;
  12470. case GGML_UNARY_OP_TANH:
  12471. tensor_clone = ggml_tanh(ggml_ctx, src_clone[0]);
  12472. break;
  12473. case GGML_UNARY_OP_SIGMOID:
  12474. tensor_clone = ggml_sigmoid(ggml_ctx, src_clone[0]);
  12475. break;
  12476. case GGML_UNARY_OP_HARDSIGMOID:
  12477. tensor_clone = ggml_hardsigmoid(ggml_ctx, src_clone[0]);
  12478. break;
  12479. case GGML_UNARY_OP_HARDSWISH:
  12480. tensor_clone = ggml_hardswish(ggml_ctx, src_clone[0]);
  12481. break;
  12482. default:
  12483. std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
  12484. GGML_ABORT("fatal error");
  12485. }
  12486. } else if (tensor->op == GGML_OP_GLU) {
  12487. if (src_clone[1] == nullptr) {
  12488. tensor_clone = ggml_glu(ggml_ctx, src_clone[0], (ggml_glu_op) tensor->op_params[0], tensor->op_params[1]);
  12489. } else {
  12490. tensor_clone = ggml_glu_split(ggml_ctx, src_clone[0], src_clone[1], (ggml_glu_op) tensor->op_params[0]);
  12491. }
  12492. ggml_set_op_params_i32(tensor_clone, 2, ggml_get_op_params_i32(tensor, 2));
  12493. ggml_set_op_params_i32(tensor_clone, 3, ggml_get_op_params_i32(tensor, 3));
  12494. } else if (tensor->op == GGML_OP_CPY || tensor->op == GGML_OP_DUP) {
  12495. if (src1 == nullptr) {
  12496. tensor_clone = ggml_dup(ggml_ctx, src_clone[0]);
  12497. tensor_clone->type = tensor->type;
  12498. } else {
  12499. tensor_clone = ggml_cpy(ggml_ctx, src_clone[0], src_clone[1]);
  12500. }
  12501. } else if (tensor->op == GGML_OP_CONT) {
  12502. tensor_clone = ggml_cont_4d(ggml_ctx, src_clone[0], tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  12503. } else if (tensor->op == GGML_OP_RESHAPE) {
  12504. tensor_clone = ggml_reshape_4d(ggml_ctx, src_clone[0], tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  12505. } else if (tensor->op == GGML_OP_VIEW) {
  12506. tensor_clone = ggml_view_4d(ggml_ctx, src_clone[0], tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], tensor->nb[1], tensor->nb[2], tensor->nb[3], ((int32_t *) tensor->op_params)[0]);
  12507. } else if (tensor->op == GGML_OP_PERMUTE) {
  12508. int32_t * params = (int32_t *)tensor->op_params;
  12509. tensor_clone = ggml_permute(ggml_ctx, src_clone[0], params[0], params[1], params[2], params[3]);
  12510. } else if (tensor->op == GGML_OP_TRANSPOSE) {
  12511. tensor_clone = ggml_transpose(ggml_ctx, src_clone[0]);
  12512. } else if (tensor->op == GGML_OP_GET_ROWS) {
  12513. tensor_clone = ggml_get_rows(ggml_ctx, src_clone[0], src_clone[1]);
  12514. } else if (tensor->op == GGML_OP_ARGSORT) {
  12515. tensor_clone = ggml_argsort(ggml_ctx, src_clone[0], (ggml_sort_order) *(int *)tensor->op_params);
  12516. } else if (tensor->op == GGML_OP_SUM) {
  12517. tensor_clone = ggml_sum(ggml_ctx, src_clone[0]);
  12518. } else if (tensor->op == GGML_OP_SUM_ROWS) {
  12519. tensor_clone = ggml_sum_rows(ggml_ctx, src_clone[0]);
  12520. } else if (tensor->op == GGML_OP_MEAN) {
  12521. tensor_clone = ggml_mean(ggml_ctx, src_clone[0]);
  12522. } else if (tensor->op == GGML_OP_ARGMAX) {
  12523. tensor_clone = ggml_argmax(ggml_ctx, src_clone[0]);
  12524. } else if (tensor->op == GGML_OP_COUNT_EQUAL) {
  12525. tensor_clone = ggml_count_equal(ggml_ctx, src_clone[0], src_clone[1]);
  12526. } else if (tensor->op == GGML_OP_IM2COL) {
  12527. const int32_t s0 = tensor->op_params[0];
  12528. const int32_t s1 = tensor->op_params[1];
  12529. const int32_t p0 = tensor->op_params[2];
  12530. const int32_t p1 = tensor->op_params[3];
  12531. const int32_t d0 = tensor->op_params[4];
  12532. const int32_t d1 = tensor->op_params[5];
  12533. const bool is_2D = tensor->op_params[6] == 1;
  12534. tensor_clone = ggml_im2col(ggml_ctx, src_clone[0], src_clone[1], s0, s1, p0, p1, d0, d1, is_2D, tensor->type);
  12535. } else if (tensor->op == GGML_OP_IM2COL_3D) {
  12536. const int32_t s0 = tensor->op_params[0];
  12537. const int32_t s1 = tensor->op_params[1];
  12538. const int32_t s2 = tensor->op_params[2];
  12539. const int32_t p0 = tensor->op_params[3];
  12540. const int32_t p1 = tensor->op_params[4];
  12541. const int32_t p2 = tensor->op_params[5];
  12542. const int32_t d0 = tensor->op_params[6];
  12543. const int32_t d1 = tensor->op_params[7];
  12544. const int32_t d2 = tensor->op_params[8];
  12545. const int32_t IC = tensor->op_params[9];
  12546. tensor_clone = ggml_im2col_3d(ggml_ctx, src_clone[0], src_clone[1], IC, s0, s1, s2, p0, p1, p2, d0, d1, d2, tensor->type);
  12547. } else if (tensor->op == GGML_OP_TIMESTEP_EMBEDDING) {
  12548. const int32_t dim = tensor->op_params[0];
  12549. const int32_t max_period = tensor->op_params[1];
  12550. tensor_clone = ggml_timestep_embedding(ggml_ctx, src_clone[0], dim, max_period);
  12551. } else if (tensor->op == GGML_OP_CONV_TRANSPOSE_1D){
  12552. const int32_t s0 = tensor->op_params[0];
  12553. const int32_t p0 = tensor->op_params[1];
  12554. const int32_t d0 = tensor->op_params[2];
  12555. tensor_clone = ggml_conv_transpose_1d(ggml_ctx, src_clone[0], src_clone[1], s0, p0, d0);
  12556. } else if (tensor->op == GGML_OP_POOL_2D) {
  12557. enum ggml_op_pool op = static_cast<ggml_op_pool>(tensor->op_params[0]);
  12558. const int32_t k0 = tensor->op_params[1];
  12559. const int32_t k1 = tensor->op_params[2];
  12560. const int32_t s0 = tensor->op_params[3];
  12561. const int32_t s1 = tensor->op_params[4];
  12562. const int32_t p0 = tensor->op_params[5];
  12563. const int32_t p1 = tensor->op_params[6];
  12564. tensor_clone = ggml_pool_2d(ggml_ctx, src_clone[0], op, k0, k1, s0, s1, p0, p1);
  12565. } else if (tensor->op == GGML_OP_CONV_2D) {
  12566. const int32_t s0 = tensor->op_params[0];
  12567. const int32_t s1 = tensor->op_params[1];
  12568. const int32_t p0 = tensor->op_params[2];
  12569. const int32_t p1 = tensor->op_params[3];
  12570. const int32_t d0 = tensor->op_params[4];
  12571. const int32_t d1 = tensor->op_params[5];
  12572. tensor_clone = ggml_conv_2d(ggml_ctx, src_clone[0], src_clone[1], s0, s1, p0, p1, d0, d1);
  12573. } else if (tensor->op == GGML_OP_CONV_TRANSPOSE_2D) {
  12574. const int32_t s = tensor->op_params[0];
  12575. tensor_clone = ggml_conv_transpose_2d_p0(ggml_ctx, src_clone[0], src_clone[1], s);
  12576. } else if (tensor->op == GGML_OP_LEAKY_RELU) {
  12577. const float * op_params = (const float *)tensor->op_params;
  12578. tensor_clone = ggml_leaky_relu(ggml_ctx, src_clone[0], op_params[0], false);
  12579. } else if (tensor->op == GGML_OP_RWKV_WKV6) {
  12580. tensor_clone = ggml_rwkv_wkv6(ggml_ctx, src_clone[0], src_clone[1],
  12581. src_clone[2], src_clone[3], src_clone[4], src_clone[5]);
  12582. } else if (tensor->op == GGML_OP_RWKV_WKV7) {
  12583. tensor_clone = ggml_rwkv_wkv7(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], src_clone[3],
  12584. src_clone[4], src_clone[5], src_clone[6]);
  12585. } else if (tensor->op == GGML_OP_OPT_STEP_ADAMW) {
  12586. src_clone[0]->flags = src0->flags;
  12587. tensor_clone = ggml_opt_step_adamw(ggml_ctx, src_clone[0], src_clone[1],
  12588. src_clone[2], src_clone[3], src_clone[4]);
  12589. } else if (tensor->op == GGML_OP_OPT_STEP_SGD) {
  12590. src_clone[0]->flags = src0->flags;
  12591. tensor_clone = ggml_opt_step_sgd(ggml_ctx, src_clone[0], src_clone[1],
  12592. src_clone[2]);
  12593. } else if (tensor->op == GGML_OP_ADD_ID) {
  12594. tensor_clone = ggml_add_id(ggml_ctx, src_clone[0], src_clone[1], src_clone[2]);
  12595. } else if (tensor->op == GGML_OP_SSM_SCAN) {
  12596. tensor_clone = ggml_ssm_scan(ggml_ctx, src_clone[0], src_clone[1], src_clone[2],
  12597. src_clone[3], src_clone[4], src_clone[5], src_clone[6]);
  12598. } else if (tensor->op == GGML_OP_SSM_CONV) {
  12599. tensor_clone = ggml_ssm_conv(ggml_ctx, src_clone[0], src_clone[1]);
  12600. }
  12601. else {
  12602. std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
  12603. GGML_ABORT("fatal error");
  12604. }
  12605. ggml_cgraph * cgraph_cpu = ggml_new_graph(ggml_ctx);
  12606. ggml_build_forward_expand(cgraph_cpu, tensor_clone);
  12607. ggml_graph_compute_with_ctx(ggml_ctx, cgraph_cpu, 8);
  12608. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  12609. ggml_vk_print_tensor(tensor_clone, "tensor_clone");
  12610. }
  12611. comp_size = ggml_nbytes(tensor_clone);
  12612. comp_result = malloc(comp_size);
  12613. memcpy(comp_result, tensor_clone->data, comp_size);
  12614. memcpy(comp_nb, tensor_clone->nb, sizeof(size_t) * GGML_MAX_DIMS);
  12615. for (int i = 0; i < GGML_MAX_SRC; i++) {
  12616. if (src_buffer[i] != nullptr) {
  12617. free(src_buffer[i]);
  12618. }
  12619. }
  12620. ggml_free(ggml_ctx);
  12621. VK_LOG_DEBUG("END ggml_vk_check_results_0(" << tensor->name << ")");
  12622. }
  12623. static void ggml_vk_check_results_1(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, int tensor_idx) {
  12624. ggml_tensor * tensor = cgraph->nodes[tensor_idx];
  12625. if (tensor->op == GGML_OP_TRANSPOSE || tensor->op == GGML_OP_SET_ROWS) {
  12626. return;
  12627. }
  12628. if (ctx->num_additional_fused_ops == 1 &&
  12629. tensor->op == GGML_OP_RMS_NORM &&
  12630. cgraph->nodes[tensor_idx + 1]->op == GGML_OP_MUL) {
  12631. tensor = cgraph->nodes[tensor_idx + 1];
  12632. }
  12633. if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {
  12634. return;
  12635. }
  12636. VK_LOG_DEBUG("ggml_vk_check_results_1(" << tensor->name << ")");
  12637. ggml_tensor * src0 = tensor->src[0];
  12638. ggml_tensor * src1 = tensor->src[1];
  12639. ggml_tensor * src2 = tensor->src[2];
  12640. ggml_tensor * src3 = tensor->src[3];
  12641. void * tensor_data = tensor->data;
  12642. if (ggml_backend_buffer_is_vk(tensor->buffer)) {
  12643. size_t tensor_size = ggml_nbytes(tensor);
  12644. tensor_data = malloc(tensor_size);
  12645. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
  12646. vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
  12647. uint64_t offset = vk_tensor_offset(tensor) + tensor->view_offs;
  12648. if (offset + tensor_size >= buffer_gpu->size) {
  12649. tensor_size = buffer_gpu->size - offset;
  12650. }
  12651. ggml_vk_buffer_read(buffer_gpu, offset, tensor_data, tensor_size);
  12652. }
  12653. float first_error_result = -1.0f;
  12654. float first_error_correct = -1.0f;
  12655. std::array<int, 4> first_error = { -1, -1, -1, -1 };
  12656. double avg_err = 0.0;
  12657. size_t counter = 0;
  12658. for (int i3 = 0; i3 < tensor->ne[3]; i3++) {
  12659. for (int i2 = 0; i2 < tensor->ne[2]; i2++) {
  12660. for (int i1 = 0; i1 < tensor->ne[1]; i1++) {
  12661. for (int i0 = 0; i0 < tensor->ne[0]; i0++) {
  12662. const bool buffer_size_fit = i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0] < comp_size;
  12663. float correct = 0.0f;
  12664. float result = 0.0f;
  12665. if (buffer_size_fit) {
  12666. if (tensor->type == GGML_TYPE_F32) {
  12667. correct = *(float *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
  12668. result = *(float *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  12669. } else if (tensor->type == GGML_TYPE_F16) {
  12670. correct = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]));
  12671. result = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]));
  12672. } else if (tensor->type == GGML_TYPE_BF16) {
  12673. correct = ggml_bf16_to_fp32(*(ggml_bf16_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]));
  12674. result = ggml_bf16_to_fp32(*(ggml_bf16_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]));
  12675. } else if (tensor->type == GGML_TYPE_I32) {
  12676. correct = *(int32_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
  12677. result = *(int32_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  12678. } else if (tensor->type == GGML_TYPE_I64) {
  12679. correct = *(int64_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
  12680. result = *(int64_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  12681. } else {
  12682. std::cerr << "Results check not implemented for type " << ggml_type_name(tensor->type) << std::endl;
  12683. }
  12684. } else {
  12685. std::cerr << "Missing debug code for type " << ggml_type_name(tensor->type) << std::endl;
  12686. GGML_ABORT("fatal error");
  12687. }
  12688. if ((std::isnan(correct) != std::isnan(result)) || (std::isinf(correct) != std::isinf(result)) || !buffer_size_fit) {
  12689. std::cerr << "ERROR: Invalid value in " << ggml_op_name(tensor->op) << " i3=" << i3 << " i2=" << i2 << " i1=" << i1 << " i0=" << i0 << " result=" << result << " correct=" << correct << " avg_err=" << (avg_err / counter) << std::endl;
  12690. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  12691. if (src0 != nullptr) {
  12692. std::cerr << "src0=" << src0 << " src0->name=" << src0->name << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  12693. }
  12694. if (src1 != nullptr) {
  12695. std::cerr << "src1=" << src1 << " src1->name=" << src1->name << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  12696. }
  12697. if (src2 != nullptr) {
  12698. std::cerr << "src2=" << src2 << " src2->name=" << src2->name << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  12699. }
  12700. if (src3 != nullptr) {
  12701. std::cerr << "src3=" << src3 << " src3->name=" << src3->name << " op=" << ggml_op_name(src3->op) << " type=" << ggml_type_name(src3->type) << " ne0=" << src3->ne[0] << " nb0=" << src3->nb[0] << " ne1=" << src3->ne[1] << " nb1=" << src3->nb[1] << " ne2=" << src3->ne[2] << " nb2=" << src3->nb[2] << " ne3=" << src3->ne[3] << " nb3=" << src3->nb[3] << " offset=" << src3->view_offs << std::endl;
  12702. }
  12703. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  12704. std::cerr << std::endl << "Result:" << std::endl;
  12705. ggml_vk_print_tensor_area(tensor, tensor_data, i0, i1, i2, i3);
  12706. std::cerr << std::endl << "Correct:" << std::endl;
  12707. ggml_vk_print_tensor_area(tensor, comp_result, i0, i1, i2, i3);
  12708. std::cerr << std::endl;
  12709. std::vector<const ggml_tensor *> done;
  12710. ggml_vk_print_graph_origin(tensor, done);
  12711. GGML_ABORT("fatal error");
  12712. }
  12713. const double denom = std::fabs(correct) > 1.0f ? (std::fabs(correct) > 1e-8 ? std::fabs(correct) : 1e-8) : 1.0f;
  12714. if (first_error[0] == -1 && std::fabs(correct - result) / denom > 0.5) {
  12715. first_error[0] = i0;
  12716. first_error[1] = i1;
  12717. first_error[2] = i2;
  12718. first_error[3] = i3;
  12719. first_error_result = result;
  12720. first_error_correct = correct;
  12721. }
  12722. // Special case, value is infinite, avoid NaN result in avg_err
  12723. // NaN also appears in results, if both are nan error is 0
  12724. if (!std::isinf(correct) && !std::isinf(result) && !std::isnan(correct) && !std::isnan(result)) {
  12725. avg_err += std::fabs(correct - result) / denom;
  12726. }
  12727. counter++;
  12728. }
  12729. }
  12730. }
  12731. }
  12732. avg_err /= counter;
  12733. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  12734. std::cerr << "TENSOR CHECK: avg_err=" << avg_err << " in " << ggml_op_name(tensor->op) << " (check " << check_counter << ")" << std::endl;
  12735. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  12736. if (src0 != nullptr) {
  12737. std::cerr << "src0=" << src0 << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  12738. }
  12739. if (src1 != nullptr) {
  12740. std::cerr << "src1=" << src1 << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  12741. }
  12742. if (src2 != nullptr) {
  12743. std::cerr << "src2=" << src2 << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  12744. }
  12745. if (src3 != nullptr) {
  12746. std::cerr << "src3=" << src3 << " op=" << ggml_op_name(src3->op) << " type=" << ggml_type_name(src3->type) << " ne0=" << src3->ne[0] << " nb0=" << src3->nb[0] << " ne1=" << src3->ne[1] << " nb1=" << src3->nb[1] << " ne2=" << src3->ne[2] << " nb2=" << src3->nb[2] << " ne3=" << src3->ne[3] << " nb3=" << src3->nb[3] << " offset=" << src3->view_offs << std::endl;
  12747. }
  12748. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  12749. std::cerr << std::endl << "Result:" << std::endl;
  12750. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 0, 0);
  12751. std::cerr << std::endl << "Correct:" << std::endl;
  12752. ggml_vk_print_tensor_area(tensor, comp_result, 5, 5, 0, 0);
  12753. std::cerr << std::endl;
  12754. std::vector<const ggml_tensor *> done;
  12755. ggml_vk_print_graph_origin(tensor, done);
  12756. }
  12757. if (avg_err > 0.5 || std::isnan(avg_err)) {
  12758. std::cerr << "ERROR: avg_err=" << avg_err << " in " << ggml_op_name(tensor->op) << " (check " << check_counter << ")" << std::endl;
  12759. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  12760. if (src0 != nullptr) {
  12761. std::cerr << "src0=" << src0 << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  12762. }
  12763. if (src1 != nullptr) {
  12764. std::cerr << "src1=" << src1 << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  12765. }
  12766. if (src2 != nullptr) {
  12767. std::cerr << "src2=" << src2 << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  12768. }
  12769. if (src3 != nullptr) {
  12770. std::cerr << "src3=" << src3 << " op=" << ggml_op_name(src3->op) << " type=" << ggml_type_name(src3->type) << " ne0=" << src3->ne[0] << " nb0=" << src3->nb[0] << " ne1=" << src3->ne[1] << " nb1=" << src3->nb[1] << " ne2=" << src3->ne[2] << " nb2=" << src3->nb[2] << " ne3=" << src3->ne[3] << " nb3=" << src3->nb[3] << " offset=" << src3->view_offs << std::endl;
  12771. }
  12772. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  12773. std::cerr << std::endl << "Result:" << std::endl;
  12774. ggml_vk_print_tensor_area(tensor, tensor_data, first_error[0], first_error[1], first_error[2], first_error[3]);
  12775. std::cerr << std::endl << "Correct:" << std::endl;
  12776. ggml_vk_print_tensor_area(tensor, comp_result, first_error[0], first_error[1], first_error[2], first_error[3]);
  12777. std::cerr << std::endl;
  12778. std::vector<const ggml_tensor *> done;
  12779. ggml_vk_print_graph_origin(tensor, done);
  12780. GGML_ABORT("fatal error");
  12781. } else {
  12782. std::cerr << check_counter << " " << tensor->name << " op=" << ggml_op_name(tensor->op) << " avg_err=" << avg_err << std::endl;
  12783. }
  12784. free(comp_result);
  12785. comp_result = nullptr;
  12786. comp_size = 0;
  12787. if (ggml_backend_buffer_is_vk(tensor->buffer)) {
  12788. free(tensor_data);
  12789. }
  12790. VK_LOG_DEBUG("END ggml_vk_check_results_1(" << tensor->name << ")");
  12791. }
  12792. #endif
  12793. GGML_BACKEND_DL_IMPL(ggml_backend_vk_reg)