ggml-vulkan.cpp 750 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020
  1. #include "ggml-vulkan.h"
  2. #include <vulkan/vulkan_core.h>
  3. #if defined(GGML_VULKAN_RUN_TESTS) || defined(GGML_VULKAN_CHECK_RESULTS)
  4. #include <chrono>
  5. #include "ggml-cpu.h"
  6. #endif
  7. // See https://github.com/KhronosGroup/Vulkan-Hpp?tab=readme-ov-file#extensions--per-device-function-pointers-
  8. #define VULKAN_HPP_DISPATCH_LOADER_DYNAMIC 1
  9. // We use VULKAN_HPP_DEFAULT_DISPATCHER, but not VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE
  10. // to avoid conflicts with applications or other libraries who might use it.
  11. #if VK_HEADER_VERSION >= 301
  12. namespace vk::detail { class DispatchLoaderDynamic; }
  13. using vk::detail::DispatchLoaderDynamic;
  14. #else
  15. namespace vk { class DispatchLoaderDynamic; }
  16. using vk::DispatchLoaderDynamic;
  17. #endif
  18. DispatchLoaderDynamic & ggml_vk_default_dispatcher();
  19. #define VULKAN_HPP_DEFAULT_DISPATCHER ggml_vk_default_dispatcher()
  20. #include <vulkan/vulkan.hpp>
  21. #include <algorithm>
  22. #include <cmath>
  23. #include <iomanip>
  24. #include <iostream>
  25. #include <tuple>
  26. #include <vector>
  27. #include <sstream>
  28. #include <utility>
  29. #include <memory>
  30. #include <limits>
  31. #include <map>
  32. #include <set>
  33. #include <unordered_map>
  34. #include <memory>
  35. #include <mutex>
  36. #include <future>
  37. #include <thread>
  38. #if defined(_MSC_VER)
  39. # define NOMINMAX 1
  40. # include <windows.h>
  41. # define YIELD() YieldProcessor()
  42. #elif defined(__clang__) || defined(__GNUC__)
  43. # if defined(__x86_64__) ||defined(__i386__)
  44. # include <immintrin.h>
  45. # define YIELD() _mm_pause()
  46. # elif defined(__arm__) || defined(__aarch64__)
  47. # if defined(__clang__)
  48. # include <arm_acle.h>
  49. # define YIELD() __yield()
  50. # else
  51. # define YIELD() asm volatile("yield")
  52. # endif
  53. # endif
  54. #endif
  55. #if !defined(YIELD)
  56. #define YIELD()
  57. #endif
  58. #include "ggml-impl.h"
  59. #include "ggml-backend-impl.h"
  60. #include "ggml-vulkan-shaders.hpp"
  61. // remove this once it's more widely available in the SDK
  62. #if !defined(VK_KHR_shader_bfloat16)
  63. #define VK_KHR_shader_bfloat16 1
  64. #define VK_KHR_SHADER_BFLOAT16_SPEC_VERSION 1
  65. #define VK_KHR_SHADER_BFLOAT16_EXTENSION_NAME "VK_KHR_shader_bfloat16"
  66. #define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_BFLOAT16_FEATURES_KHR ((VkStructureType)1000141000)
  67. #define VK_COMPONENT_TYPE_BFLOAT16_KHR ((VkComponentTypeKHR)1000141000)
  68. typedef struct VkPhysicalDeviceShaderBfloat16FeaturesKHR {
  69. VkStructureType sType;
  70. void* pNext;
  71. VkBool32 shaderBFloat16Type;
  72. VkBool32 shaderBFloat16DotProduct;
  73. VkBool32 shaderBFloat16CooperativeMatrix;
  74. } VkPhysicalDeviceShaderBfloat16FeaturesKHR;
  75. #endif
  76. #define ROUNDUP_POW2(M, N) (((M) + (N) - 1) & ~((N) - 1))
  77. #define CEIL_DIV(M, N) (((M) + (N)-1) / (N))
  78. static bool is_pow2(uint32_t x) { return x > 1 && (x & (x-1)) == 0; }
  79. #define VK_VENDOR_ID_AMD 0x1002
  80. #define VK_VENDOR_ID_APPLE 0x106b
  81. #define VK_VENDOR_ID_INTEL 0x8086
  82. #define VK_VENDOR_ID_NVIDIA 0x10de
  83. #define VK_DEVICE_DESCRIPTOR_POOL_SIZE 256
  84. #define GGML_VK_MAX_NODES 8192
  85. #define VK_CHECK(err, msg) \
  86. do { \
  87. vk::Result err_ = (err); \
  88. if (err_ != vk::Result::eSuccess) { \
  89. fprintf(stderr, "ggml_vulkan: %s error %s at %s:%d\n", \
  90. #err, to_string(err_).c_str(), __FILE__, __LINE__); \
  91. exit(1); \
  92. } \
  93. } while (0)
  94. #ifdef GGML_VULKAN_DEBUG
  95. #define VK_LOG_DEBUG(msg) std::cerr << msg << std::endl
  96. #else
  97. #define VK_LOG_DEBUG(msg) ((void) 0)
  98. #endif // GGML_VULKAN_DEBUG
  99. struct ggml_backend_vk_context;
  100. #define MAX_PARAMETER_COUNT 12
  101. // Max number of adds that can be fused without exceeding MAX_PARAMETER_COUNT.
  102. #define MAX_FUSED_ADDS (MAX_PARAMETER_COUNT - 3)
  103. struct vk_pipeline_struct {
  104. std::string name;
  105. vk::ShaderModule shader_module;
  106. vk::PipelineLayout layout;
  107. vk::Pipeline pipeline;
  108. uint32_t push_constant_size;
  109. uint32_t parameter_count;
  110. std::array<uint32_t, 3> wg_denoms;
  111. uint32_t align;
  112. // true if fields have been set by ggml_vk_create_pipeline
  113. bool initialized {};
  114. // set to true to request the pipeline is compiled
  115. std::atomic<bool> needed {};
  116. // set to true when the shader has been compiled
  117. std::atomic<bool> compiled {};
  118. // number of registers used, extracted from pipeline executable properties
  119. uint32_t register_count {};
  120. };
  121. typedef std::shared_ptr<vk_pipeline_struct> vk_pipeline;
  122. typedef std::weak_ptr<vk_pipeline_struct> vk_pipeline_ref;
  123. static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline);
  124. struct vk_matmul_pipeline_struct {
  125. vk_pipeline l, m, s;
  126. vk_pipeline a_l, a_m, a_s;
  127. // Returns true when all unaligned pipelines are null.
  128. // We only check for unaligned variants since one of the unaligned pipelines must exist
  129. // while aligned pipelines are optional
  130. bool is_empty() const {
  131. return l == nullptr && m == nullptr && s == nullptr;
  132. }
  133. };
  134. typedef std::shared_ptr<vk_matmul_pipeline_struct> vk_matmul_pipeline;
  135. struct vk_matmul_pipeline2 {
  136. vk_matmul_pipeline2() {
  137. f16acc = std::make_shared<vk_matmul_pipeline_struct>();
  138. f32acc = std::make_shared<vk_matmul_pipeline_struct>();
  139. }
  140. vk_matmul_pipeline f32acc;
  141. vk_matmul_pipeline f16acc;
  142. };
  143. struct vk_device_struct;
  144. typedef std::shared_ptr<vk_device_struct> vk_device;
  145. typedef std::weak_ptr<vk_device_struct> vk_device_ref;
  146. struct vk_buffer_struct;
  147. typedef std::shared_ptr<vk_buffer_struct> vk_buffer;
  148. typedef std::weak_ptr<vk_buffer_struct> vk_buffer_ref;
  149. struct ggml_backend_vk_buffer_type_context {
  150. std::string name;
  151. vk_device device;
  152. };
  153. struct vk_queue;
  154. // Stores command pool/buffers. There's an instance of this
  155. // for each (context,queue) pair and for each (device,queue) pair.
  156. struct vk_command_pool {
  157. void init(vk_device& device, vk_queue *q_);
  158. void destroy(vk::Device& device);
  159. vk::CommandPool pool;
  160. uint32_t cmd_buffer_idx;
  161. std::vector<vk::CommandBuffer> cmd_buffers;
  162. vk_queue *q;
  163. };
  164. // Prevent simultaneous submissions to the same queue.
  165. // This could be per vk_queue if we stopped having two vk_queue structures
  166. // sharing the same vk::Queue.
  167. static std::mutex queue_mutex;
  168. struct vk_queue {
  169. uint32_t queue_family_index;
  170. vk::Queue queue;
  171. vk_command_pool cmd_pool;
  172. vk::PipelineStageFlags stage_flags;
  173. bool transfer_only;
  174. // copy everything except the cmd_pool
  175. void copyFrom(vk_queue &other) {
  176. queue_family_index = other.queue_family_index;
  177. queue = other.queue;
  178. stage_flags = other.stage_flags;
  179. transfer_only = other.transfer_only;
  180. }
  181. };
  182. static const char * ggml_backend_vk_buffer_type_name(ggml_backend_buffer_type_t buft);
  183. static ggml_backend_buffer_t ggml_backend_vk_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size);
  184. static size_t ggml_backend_vk_buffer_type_get_alignment(ggml_backend_buffer_type_t buft);
  185. static size_t ggml_backend_vk_buffer_type_get_max_size(ggml_backend_buffer_type_t buft);
  186. static size_t ggml_backend_vk_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor);
  187. static ggml_backend_buffer_type_i ggml_backend_vk_buffer_type_interface = {
  188. /* .get_name = */ ggml_backend_vk_buffer_type_name,
  189. /* .alloc_buffer = */ ggml_backend_vk_buffer_type_alloc_buffer,
  190. /* .get_alignment = */ ggml_backend_vk_buffer_type_get_alignment,
  191. /* .get_max_size = */ ggml_backend_vk_buffer_type_get_max_size,
  192. /* .get_alloc_size = */ ggml_backend_vk_buffer_type_get_alloc_size,
  193. /* .is_host = */ NULL,
  194. };
  195. #ifdef GGML_VULKAN_MEMORY_DEBUG
  196. class vk_memory_logger;
  197. #endif
  198. class vk_perf_logger;
  199. static void ggml_vk_destroy_buffer(vk_buffer& buf);
  200. static void ggml_vk_synchronize(ggml_backend_vk_context * ctx);
  201. static constexpr uint32_t mul_mat_vec_max_cols = 8;
  202. static constexpr uint32_t p021_max_gqa_ratio = 8;
  203. enum vk_device_architecture {
  204. OTHER,
  205. AMD_GCN,
  206. AMD_RDNA1,
  207. AMD_RDNA2,
  208. AMD_RDNA3,
  209. INTEL_XE2,
  210. NVIDIA_PRE_TURING,
  211. };
  212. static vk_device_architecture get_device_architecture(const vk::PhysicalDevice& device) {
  213. vk::PhysicalDeviceProperties props = device.getProperties();
  214. if (props.vendorID == VK_VENDOR_ID_AMD) {
  215. const std::vector<vk::ExtensionProperties> ext_props = device.enumerateDeviceExtensionProperties();
  216. bool amd_shader_core_properties = false;
  217. bool integer_dot_product = false;
  218. bool subgroup_size_control = false;
  219. for (const auto& properties : ext_props) {
  220. if (strcmp("VK_AMD_shader_core_properties", properties.extensionName) == 0) {
  221. amd_shader_core_properties = true;
  222. } else if (strcmp("VK_KHR_shader_integer_dot_product", properties.extensionName) == 0) {
  223. integer_dot_product = true;
  224. } else if (strcmp("VK_EXT_subgroup_size_control", properties.extensionName) == 0) {
  225. subgroup_size_control = true;
  226. }
  227. }
  228. if (!amd_shader_core_properties || !integer_dot_product || !subgroup_size_control) {
  229. return vk_device_architecture::OTHER;
  230. }
  231. vk::PhysicalDeviceProperties2 props2;
  232. vk::PhysicalDeviceShaderCorePropertiesAMD shader_core_props_amd;
  233. vk::PhysicalDeviceShaderIntegerDotProductPropertiesKHR integer_dot_props;
  234. vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT subgroup_size_control_props;
  235. props2.pNext = &shader_core_props_amd;
  236. shader_core_props_amd.pNext = &integer_dot_props;
  237. integer_dot_props.pNext = &subgroup_size_control_props;
  238. device.getProperties2(&props2);
  239. if (subgroup_size_control_props.maxSubgroupSize == 64 && subgroup_size_control_props.minSubgroupSize == 64) {
  240. return vk_device_architecture::AMD_GCN;
  241. }
  242. if (subgroup_size_control_props.maxSubgroupSize == 64 && subgroup_size_control_props.minSubgroupSize == 32) {
  243. // RDNA
  244. if (shader_core_props_amd.wavefrontsPerSimd == 20) {
  245. return vk_device_architecture::AMD_RDNA1;
  246. }
  247. if (integer_dot_props.integerDotProduct4x8BitPackedMixedSignednessAccelerated) {
  248. return vk_device_architecture::AMD_RDNA3;
  249. }
  250. return vk_device_architecture::AMD_RDNA2;
  251. }
  252. } else if (props.vendorID == VK_VENDOR_ID_INTEL) {
  253. const std::vector<vk::ExtensionProperties> ext_props = device.enumerateDeviceExtensionProperties();
  254. bool subgroup_size_control = false;
  255. for (const auto& properties : ext_props) {
  256. if (strcmp("VK_EXT_subgroup_size_control", properties.extensionName) == 0) {
  257. subgroup_size_control = true;
  258. }
  259. }
  260. if (!subgroup_size_control) {
  261. return vk_device_architecture::OTHER;
  262. }
  263. vk::PhysicalDeviceProperties2 props2;
  264. vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT subgroup_size_control_props;
  265. props2.pNext = &subgroup_size_control_props;
  266. device.getProperties2(&props2);
  267. if (subgroup_size_control_props.minSubgroupSize == 16) {
  268. // Xe2 architecture uses SIMD16 while previous Xe and Gen architecture uses SIMD8.
  269. // Minimum subgroup size matches the SIMD width so we distinguish architecture by checking this value.
  270. // https://www.intel.com/content/www/us/en/content-details/824434/2024-intel-tech-tour-xe2-and-lunar-lake-s-gpu.html
  271. // https://www.intel.com/content/www/us/en/docs/oneapi/optimization-guide-gpu/2025-0/intel-xe-gpu-architecture.html
  272. return vk_device_architecture::INTEL_XE2;
  273. }
  274. } else if (props.vendorID == VK_VENDOR_ID_NVIDIA) {
  275. const std::vector<vk::ExtensionProperties> ext_props = device.enumerateDeviceExtensionProperties();
  276. bool cooperative_matrix = false;
  277. // Detect "pre-turing" based on lack of coopmat support.
  278. for (const auto& properties : ext_props) {
  279. if (strcmp("VK_KHR_cooperative_matrix", properties.extensionName) == 0) {
  280. cooperative_matrix = true;
  281. break;
  282. }
  283. }
  284. if (!cooperative_matrix) {
  285. return vk_device_architecture::NVIDIA_PRE_TURING;
  286. }
  287. }
  288. return vk_device_architecture::OTHER;
  289. }
  290. enum vk_conv_shapes {
  291. CONV_SHAPE_128x128,
  292. CONV_SHAPE_64x32,
  293. CONV_SHAPE_32x256,
  294. CONV_SHAPE_COUNT,
  295. };
  296. uint32_t conv_shapes_wg_denoms[][3] = {
  297. { 128, 128, 1 },
  298. { 64, 32, 1 },
  299. { 32, 256, 1 },
  300. };
  301. enum dmmv_wg_sizes {
  302. DMMV_WG_SIZE_SUBGROUP,
  303. DMMV_WG_SIZE_LARGE,
  304. DMMV_WG_SIZE_COUNT,
  305. };
  306. enum FaCodePath {
  307. FA_SCALAR,
  308. FA_COOPMAT1,
  309. FA_COOPMAT2,
  310. };
  311. struct vk_fa_pipeline_state {
  312. vk_fa_pipeline_state(uint32_t HSK, uint32_t HSV, bool small_rows, FaCodePath path, bool aligned, bool f32acc)
  313. : HSK(HSK), HSV(HSV), small_rows(small_rows), path(path), aligned(aligned), f32acc(f32acc) {}
  314. uint32_t HSK, HSV;
  315. bool small_rows;
  316. FaCodePath path;
  317. bool aligned;
  318. bool f32acc;
  319. bool operator<(const vk_fa_pipeline_state &b) const {
  320. return std::tie(HSK, HSV, small_rows, path, aligned, f32acc) <
  321. std::tie(b.HSK, b.HSV, b.small_rows, b.path, b.aligned, b.f32acc);
  322. }
  323. };
  324. struct vk_conv2d_pipeline_state {
  325. vk_conv2d_pipeline_state(uint32_t s0, uint32_t s1, uint32_t p0, uint32_t p1, uint32_t d0, uint32_t d1, uint32_t KW, uint32_t KH)
  326. : s0(s0), s1(s1), p0(p0), p1(p1), d0(d0), d1(d1), KW(KW), KH(KH) {}
  327. uint32_t s0, s1, p0, p1, d0, d1, KW, KH;
  328. bool operator<(const vk_conv2d_pipeline_state &b) const {
  329. return std::tie(s0, s1, p0, p1, d0, d1, KW, KH) <
  330. std::tie(b.s0, b.s1, b.p0, b.p1, b.d0, b.d1, b.KW, b.KH);
  331. }
  332. };
  333. struct vk_solve_tri_pipeline_state {
  334. vk_solve_tri_pipeline_state(uint32_t N, uint32_t K)
  335. : N(N), K(K) {}
  336. uint32_t N, K;
  337. bool operator<(const vk_solve_tri_pipeline_state &b) const {
  338. return std::tie(N, K) <
  339. std::tie(b.N, b.K);
  340. }
  341. };
  342. enum shader_reduction_mode {
  343. SHADER_REDUCTION_MODE_SHMEM,
  344. SHADER_REDUCTION_MODE_HYBRID,
  345. SHADER_REDUCTION_MODE_SUBGROUP,
  346. SHADER_REDUCTION_MODE_COUNT,
  347. };
  348. // argsort pipelines for up to 1<<10 invocations per workgroup
  349. static constexpr uint32_t num_argsort_pipelines = 11;
  350. static constexpr uint32_t num_topk_moe_pipelines = 10;
  351. static constexpr uint32_t num_topk_pipelines = 11;
  352. static constexpr std::initializer_list<ggml_op> topk_moe_early_softmax_norm{ GGML_OP_SOFT_MAX, GGML_OP_RESHAPE, GGML_OP_ARGSORT,
  353. GGML_OP_VIEW, GGML_OP_GET_ROWS, GGML_OP_RESHAPE,
  354. GGML_OP_SUM_ROWS, GGML_OP_CLAMP, GGML_OP_DIV,
  355. GGML_OP_RESHAPE };
  356. static constexpr std::initializer_list<ggml_op> topk_moe_early_softmax { GGML_OP_SOFT_MAX, GGML_OP_RESHAPE, GGML_OP_ARGSORT,
  357. GGML_OP_VIEW, GGML_OP_GET_ROWS };
  358. static constexpr std::initializer_list<ggml_op> topk_moe_late_softmax { GGML_OP_ARGSORT, GGML_OP_VIEW,
  359. GGML_OP_GET_ROWS, GGML_OP_RESHAPE,
  360. GGML_OP_SOFT_MAX, GGML_OP_RESHAPE };
  361. //node #978 ( SOFT_MAX): ffn_moe_probs-15 ( 0K) [Vulka ] use=2: ffn_moe_logits-15 ( 0K) [Vulka ]
  362. //node #979 ( RESHAPE): ffn_moe_probs-15 (re ( 0K) [Vulka ] use=1: ffn_moe_probs-15 ( 0K) [Vulka ]
  363. //node #980 ( ARGSORT): ffn_moe_argsort-15 ( 0K) [Vulka ] use=1: ffn_moe_probs-15 ( 0K) [Vulka ]
  364. //node #981 ( VIEW): ffn_moe_topk-15 ( 0K) [Vulka ] use=4: ffn_moe_argsort-15 ( 0K) [Vulka ]
  365. //node #982 ( GET_ROWS): ffn_moe_weights-15 ( 0K) [Vulka ] use=1: ffn_moe_probs-15 (re ( 0K) [Vulka ] ffn_moe_topk-15 ( 0K) [Vulka ]
  366. //node #983 ( RESHAPE): ffn_moe_weights-15 ( ( 0K) [Vulka ] use=2: ffn_moe_weights-15 ( 0K) [Vulka ]
  367. //node #984 ( SUM_ROWS): ffn_moe_weights_sum- ( 0K) [Vulka ] use=1: ffn_moe_weights-15 ( ( 0K) [Vulka ]
  368. //node #985 ( CLAMP): ffn_moe_weights_sum_ ( 0K) [Vulka ] use=1: ffn_moe_weights_sum- ( 0K) [Vulka ]
  369. //node #986 ( DIV): ffn_moe_weights_norm ( 0K) [Vulka ] use=1: ffn_moe_weights-15 ( ( 0K) [Vulka ] ffn_moe_weights_sum_ ( 0K) [Vulka ]
  370. //node #987 ( RESHAPE): ffn_moe_weights_norm ( 0K) [Vulka ] use=1: ffn_moe_weights_norm ( 0K) [Vulka ]
  371. static constexpr std::initializer_list<std::array<int, 3>> topk_moe_early_softmax_norm_edges {
  372. { 1, 0, 0 }, // reshape->src[0] == softmax
  373. { 2, 0, 0 }, // argsort->src[0] == softmax
  374. { 3, 0, 2 }, // view->src[0] == argsort
  375. { 4, 0, 1 }, // get_rows->src[0] == reshape
  376. { 4, 1, 3 }, // get_rows->src[1] == view
  377. { 5, 0, 4 }, // reshape->src[0] == get_rows
  378. { 6, 0, 5 }, // sum_rows->src[0] == reshape
  379. { 7, 0, 6 }, // clamp->src[0] == sum_rows
  380. { 8, 0, 5 }, // div->src[0] == reshape
  381. { 8, 1, 7 }, // div->src[1] == clamp
  382. { 9, 0, 8 }, // reshape->src[0] == div
  383. };
  384. // same as early_softmax_norm but ending after the get_rows
  385. static constexpr std::initializer_list<std::array<int, 3>> topk_moe_early_softmax_edges {
  386. { 1, 0, 0 }, // reshape->src[0] == softmax
  387. { 2, 0, 0 }, // argsort->src[0] == softmax
  388. { 3, 0, 2 }, // view->src[0] == argsort
  389. { 4, 0, 1 }, // get_rows->src[0] == reshape
  390. { 4, 1, 3 }, // get_rows->src[1] == view
  391. };
  392. //node #652 ( ARGSORT): ffn_moe_argsort-11 ( 0K) [Vulka ] use=1: ffn_moe_probs-11 ( 0K) [Vulka ]
  393. //node #653 ( VIEW): ffn_moe_topk-11 ( 0K) [Vulka ] use=7: ffn_moe_argsort-11 ( 0K) [Vulka ]
  394. //node #654 ( GET_ROWS): ffn_moe_weights-11 ( 0K) [Vulka ] use=1: ffn_moe_probs-11 (re ( 0K) [Vulka ] ffn_moe_topk-11 ( 0K) [Vulka ]
  395. //node #655 ( RESHAPE): ffn_moe_weights-11 ( ( 0K) [Vulka ] use=1: ffn_moe_weights-11 ( 0K) [Vulka ]
  396. //node #656 ( SOFT_MAX): node_656 ( 0K) [Vulka ] use=1: ffn_moe_weights-11 ( ( 0K) [Vulka ]
  397. //node #657 ( RESHAPE): ffn_moe_weights_soft ( 0K) [Vulka ] use=1: node_656 ( 0K) [Vulka ]
  398. static constexpr std::initializer_list<std::array<int, 3>> topk_moe_late_softmax_edges {
  399. { 1, 0, 0 }, // view->src[0] == argsort
  400. { 2, 1, 1 }, // get_rows->src[1] == view
  401. { 3, 0, 2 }, // reshape->src[0] == get_rows
  402. { 4, 0, 3 }, // soft_max->src[0] == reshape
  403. { 5, 0, 4 }, // reshape->src[0] == soft_max
  404. };
  405. enum topk_moe_mode {
  406. TOPK_MOE_EARLY_SOFTMAX,
  407. TOPK_MOE_EARLY_SOFTMAX_NORM,
  408. TOPK_MOE_LATE_SOFTMAX,
  409. TOPK_MOE_COUNT,
  410. };
  411. static topk_moe_mode ggml_vk_num_additional_ops_to_topk_moe_mode(uint32_t num) {
  412. topk_moe_mode mode = num == topk_moe_early_softmax_norm.size() - 1 ? TOPK_MOE_EARLY_SOFTMAX_NORM :
  413. num == topk_moe_early_softmax.size() - 1 ? TOPK_MOE_EARLY_SOFTMAX :
  414. TOPK_MOE_LATE_SOFTMAX;
  415. return mode;
  416. }
  417. static constexpr std::initializer_list<std::array<int, 3>> rope_view_set_rows_edges {
  418. { 1, 0, 0 }, // view->src[0] == rope
  419. { 2, 0, 1 }, // set_rows->src[0] == view
  420. };
  421. static constexpr std::initializer_list<std::array<int, 3>> rms_norm_mul_rope_view_set_rows_edges {
  422. { 1, 0, 0 }, // mul->src[0] == rms
  423. { 2, 0, 1 }, // rope->src[0] == mul
  424. { 3, 0, 2 }, // view->src[0] == rope
  425. { 4, 0, 3 }, // set_rows->src[0] == view
  426. };
  427. struct vk_device_struct {
  428. std::recursive_mutex mutex;
  429. vk::PhysicalDevice physical_device;
  430. vk::PhysicalDeviceProperties properties;
  431. std::string name;
  432. uint64_t max_memory_allocation_size;
  433. uint64_t max_buffer_size;
  434. uint64_t suballocation_block_size;
  435. bool fp16;
  436. bool bf16;
  437. bool pipeline_robustness;
  438. vk::Device device;
  439. uint32_t vendor_id;
  440. vk::DriverId driver_id;
  441. vk_device_architecture architecture;
  442. vk_queue compute_queue;
  443. vk_queue transfer_queue;
  444. bool single_queue;
  445. bool support_async;
  446. uint32_t subgroup_size;
  447. uint32_t subgroup_size_log2;
  448. uint32_t shader_core_count;
  449. bool uma;
  450. bool prefer_host_memory;
  451. bool float_controls_rte_fp16;
  452. bool subgroup_arithmetic;
  453. bool subgroup_shuffle;
  454. bool subgroup_ballot;
  455. bool subgroup_clustered;
  456. bool subgroup_vote;
  457. bool multi_add;
  458. bool shader_int64;
  459. bool buffer_device_address;
  460. bool vulkan_memory_model;
  461. bool add_rms_fusion;
  462. uint32_t partials_binding_alignment;
  463. bool integer_dot_product;
  464. // 0: default, 1: force mmvq, -1: disable mmvq
  465. int32_t mmvq_mode;
  466. bool subgroup_size_control;
  467. uint32_t subgroup_min_size;
  468. uint32_t subgroup_max_size;
  469. bool subgroup_require_full_support;
  470. // floor(log2(maxComputeWorkGroupInvocations))
  471. uint32_t max_workgroup_size_log2 {};
  472. bool coopmat_support;
  473. bool coopmat_acc_f32_support {};
  474. bool coopmat_acc_f16_support {};
  475. bool coopmat_bf16_support {};
  476. bool coopmat_support_16x16x16_f16acc {};
  477. bool coopmat_support_16x16x16_f32acc {};
  478. bool coopmat1_fa_support {};
  479. uint32_t coopmat_m;
  480. uint32_t coopmat_n;
  481. uint32_t coopmat_k;
  482. bool coopmat_int_support;
  483. uint32_t coopmat_int_m;
  484. uint32_t coopmat_int_n;
  485. uint32_t coopmat_int_k;
  486. bool coopmat2;
  487. bool pipeline_executable_properties_support {};
  488. size_t idx;
  489. bool mul_mat_l[GGML_TYPE_COUNT];
  490. bool mul_mat_m[GGML_TYPE_COUNT];
  491. bool mul_mat_s[GGML_TYPE_COUNT];
  492. bool mul_mat_id_l[GGML_TYPE_COUNT];
  493. bool mul_mat_id_m[GGML_TYPE_COUNT];
  494. bool mul_mat_id_s[GGML_TYPE_COUNT];
  495. vk::DescriptorSetLayout dsl;
  496. vk_matmul_pipeline pipeline_matmul_f32 {};
  497. vk_matmul_pipeline pipeline_matmul_f32_f16 {};
  498. vk_matmul_pipeline pipeline_matmul_bf16 {};
  499. vk_matmul_pipeline2 pipeline_matmul_f16;
  500. vk_matmul_pipeline2 pipeline_matmul_f16_f32;
  501. vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat[GGML_TYPE_COUNT];
  502. vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_COUNT];
  503. vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_COUNT];
  504. vk_matmul_pipeline pipeline_matmul_id_f32 {};
  505. vk_matmul_pipeline pipeline_matmul_id_bf16 {};
  506. vk_matmul_pipeline2 pipeline_matmul_id_f16;
  507. vk_matmul_pipeline2 pipeline_matmul_id_f16_f32;
  508. vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_id[GGML_TYPE_COUNT];
  509. vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_COUNT];
  510. vk_pipeline pipeline_matmul_split_k_reduce;
  511. vk_pipeline pipeline_quantize_q8_1_x4;
  512. vk_pipeline pipeline_dequant[GGML_TYPE_COUNT];
  513. vk_pipeline pipeline_dequant_mul_mat_vec_f32_f32[DMMV_WG_SIZE_COUNT][GGML_TYPE_COUNT][mul_mat_vec_max_cols];
  514. vk_pipeline pipeline_dequant_mul_mat_vec_f16_f32[DMMV_WG_SIZE_COUNT][GGML_TYPE_COUNT][mul_mat_vec_max_cols];
  515. vk_pipeline pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_COUNT];
  516. vk_pipeline pipeline_dequant_mul_mat_vec_q8_1_f32[DMMV_WG_SIZE_COUNT][GGML_TYPE_COUNT][mul_mat_vec_max_cols];
  517. vk_pipeline pipeline_mul_mat_vec_p021_f16_f32[p021_max_gqa_ratio];
  518. vk_pipeline pipeline_mul_mat_vec_nc_f16_f32;
  519. vk_pipeline pipeline_get_rows[GGML_TYPE_COUNT];
  520. vk_pipeline pipeline_get_rows_f32[GGML_TYPE_COUNT];
  521. vk_pipeline pipeline_acc_f32;
  522. // [src0 0=fp32,1=fp16][src1 0=fp32,1=fp16][dst 0=fp32,1=fp16]
  523. vk_pipeline pipeline_add[2][2][2];
  524. vk_pipeline pipeline_add_norepeat[2][2][2];
  525. vk_pipeline pipeline_sub[2][2][2];
  526. vk_pipeline pipeline_sub_norepeat[2][2][2];
  527. vk_pipeline pipeline_mul[2][2][2];
  528. vk_pipeline pipeline_mul_norepeat[2][2][2];
  529. vk_pipeline pipeline_div[2][2][2];
  530. vk_pipeline pipeline_div_norepeat[2][2][2];
  531. vk_pipeline pipeline_add_rms[2][2][2];
  532. vk_pipeline pipeline_add_rms_norepeat[2][2][2];
  533. // indexed by num_additional_fused_ops == num_adds - 1
  534. vk_pipeline pipeline_multi_add[MAX_FUSED_ADDS];
  535. vk_pipeline pipeline_multi_add_rms[MAX_FUSED_ADDS];
  536. vk_pipeline pipeline_add_id_f32;
  537. vk_pipeline pipeline_concat_f32, pipeline_concat_f16, pipeline_concat_i32;
  538. vk_pipeline pipeline_upscale_nearest_f32, pipeline_upscale_bilinear_f32, pipeline_upscale_bicubic_f32;
  539. vk_pipeline pipeline_scale_f32;
  540. vk_pipeline pipeline_sqr_f32;
  541. vk_pipeline pipeline_sqrt_f32;
  542. vk_pipeline pipeline_sin_f32;
  543. vk_pipeline pipeline_cos_f32;
  544. vk_pipeline pipeline_log[2];
  545. vk_pipeline pipeline_tri[2];
  546. vk_pipeline pipeline_clamp_f32;
  547. vk_pipeline pipeline_pad_f32;
  548. vk_pipeline pipeline_roll_f32;
  549. vk_pipeline pipeline_repeat_f32, pipeline_repeat_back_f32;
  550. vk_pipeline pipeline_cpy_f32_f32, pipeline_cpy_f32_f16, pipeline_cpy_f16_f16, pipeline_cpy_f16_f32, pipeline_cpy_f32_bf16, pipeline_cpy_f32_i32, pipeline_cpy_i32_f32;
  551. vk_pipeline pipeline_contig_cpy_f32_f32, pipeline_contig_cpy_f32_f16, pipeline_contig_cpy_f16_f16, pipeline_contig_cpy_f16_f32, pipeline_contig_cpy_f32_bf16, pipeline_contig_cpy_f32_i32, pipeline_contig_cpy_i32_f32;
  552. vk_pipeline pipeline_cpy_f32_quant[GGML_TYPE_COUNT];
  553. vk_pipeline pipeline_cpy_quant_f32[GGML_TYPE_COUNT];
  554. vk_pipeline pipeline_cpy_transpose_16, pipeline_cpy_transpose_32;
  555. vk_pipeline pipeline_set_rows_i32[GGML_TYPE_COUNT];
  556. vk_pipeline pipeline_set_rows_i64[GGML_TYPE_COUNT];
  557. vk_pipeline pipeline_norm_f32;
  558. vk_pipeline pipeline_group_norm_f32;
  559. vk_pipeline pipeline_rms_norm_f32;
  560. vk_pipeline pipeline_rms_norm_mul_f32;
  561. vk_pipeline pipeline_rms_norm_partials_f32;
  562. vk_pipeline pipeline_rms_norm_mul_partials_f32;
  563. vk_pipeline pipeline_rms_norm_mul_rope_f32_f32;
  564. vk_pipeline pipeline_rms_norm_mul_rope_f32_f16;
  565. vk_pipeline pipeline_rms_norm_back_f32;
  566. vk_pipeline pipeline_l2_norm_f32;
  567. // [src/dst 0=fp32,1=fp16]
  568. vk_pipeline pipeline_exp[2];
  569. vk_pipeline pipeline_gelu[2];
  570. vk_pipeline pipeline_gelu_erf[2];
  571. vk_pipeline pipeline_gelu_quick[2];
  572. vk_pipeline pipeline_silu[2];
  573. vk_pipeline pipeline_relu[2];
  574. vk_pipeline pipeline_neg[2];
  575. vk_pipeline pipeline_tanh[2];
  576. vk_pipeline pipeline_sigmoid[2];
  577. vk_pipeline pipeline_hardsigmoid[2];
  578. vk_pipeline pipeline_hardswish[2];
  579. vk_pipeline pipeline_abs[2];
  580. vk_pipeline pipeline_softplus[2];
  581. vk_pipeline pipeline_step[2];
  582. vk_pipeline pipeline_round[2];
  583. vk_pipeline pipeline_ceil[2];
  584. vk_pipeline pipeline_floor[2];
  585. vk_pipeline pipeline_trunc[2];
  586. vk_pipeline pipeline_add1_f16_f16;
  587. vk_pipeline pipeline_add1_f16_f32;
  588. vk_pipeline pipeline_add1_f32_f32;
  589. vk_pipeline pipeline_arange_f32;
  590. vk_pipeline pipeline_fill_f32;
  591. vk_pipeline pipeline_geglu[2];
  592. vk_pipeline pipeline_reglu[2];
  593. vk_pipeline pipeline_swiglu[2];
  594. vk_pipeline pipeline_swiglu_oai[2];
  595. vk_pipeline pipeline_geglu_erf[2];
  596. vk_pipeline pipeline_geglu_quick[2];
  597. vk_pipeline pipeline_leaky_relu_f32;
  598. vk_pipeline pipeline_silu_back_f32;
  599. vk_pipeline pipeline_diag_mask_inf_f32;
  600. vk_pipeline pipeline_soft_max_f32, pipeline_soft_max_f32_f16;
  601. vk_pipeline pipeline_soft_max_f32_wg512, pipeline_soft_max_f32_f16_wg512;
  602. vk_pipeline pipeline_soft_max_back_f32;
  603. vk_pipeline pipeline_rope_norm_f32, pipeline_rope_norm_f16, pipeline_rope_norm_f32_f16;
  604. vk_pipeline pipeline_rope_neox_f32, pipeline_rope_neox_f16, pipeline_rope_neox_f32_f16;
  605. vk_pipeline pipeline_rope_multi_f32, pipeline_rope_multi_f16;
  606. vk_pipeline pipeline_rope_vision_f32, pipeline_rope_vision_f16;
  607. vk_pipeline pipeline_argsort_f32[num_argsort_pipelines];
  608. vk_pipeline pipeline_argsort_large_f32[num_argsort_pipelines];
  609. vk_pipeline pipeline_topk_f32[num_topk_pipelines];
  610. vk_pipeline pipeline_sum_rows_f32;
  611. vk_pipeline pipeline_cumsum_f32;
  612. vk_pipeline pipeline_argmax_f32;
  613. vk_pipeline pipeline_count_equal_i32;
  614. std::map<vk_solve_tri_pipeline_state, vk_pipeline> pipeline_solve_tri_f32;
  615. vk_pipeline pipeline_im2col_f32, pipeline_im2col_f32_f16;
  616. vk_pipeline pipeline_im2col_3d_f32, pipeline_im2col_3d_f32_f16;
  617. vk_pipeline pipeline_timestep_embedding_f32;
  618. vk_pipeline pipeline_conv_transpose_1d_f32;
  619. vk_pipeline pipeline_pool2d_f32;
  620. vk_pipeline pipeline_rwkv_wkv6_f32;
  621. vk_pipeline pipeline_rwkv_wkv7_f32;
  622. vk_pipeline pipeline_ssm_scan_f32_d128;
  623. vk_pipeline pipeline_ssm_scan_f32_d256;
  624. vk_pipeline pipeline_ssm_conv_f32;
  625. vk_pipeline pipeline_opt_step_adamw_f32;
  626. vk_pipeline pipeline_opt_step_sgd_f32;
  627. std::map<vk_conv2d_pipeline_state, vk_pipeline> pipeline_conv2d_f32[CONV_SHAPE_COUNT];
  628. std::map<vk_conv2d_pipeline_state, vk_pipeline> pipeline_conv2d_f16_f32[CONV_SHAPE_COUNT];
  629. std::map<vk_conv2d_pipeline_state, vk_pipeline> pipeline_conv_transpose_2d_f32[CONV_SHAPE_COUNT];
  630. std::map<vk_conv2d_pipeline_state, vk_pipeline> pipeline_conv_transpose_2d_f16_f32[CONV_SHAPE_COUNT];
  631. vk_pipeline pipeline_conv2d_dw_whcn_f32, pipeline_conv2d_dw_whcn_f16_f32;
  632. vk_pipeline pipeline_conv2d_dw_cwhn_f32, pipeline_conv2d_dw_cwhn_f16_f32;
  633. std::map<vk_fa_pipeline_state, vk_pipeline> pipeline_flash_attn_f32_f16[GGML_TYPE_COUNT];
  634. vk_pipeline pipeline_flash_attn_split_k_reduce;
  635. vk_pipeline pipeline_topk_moe[num_topk_moe_pipelines][TOPK_MOE_COUNT];
  636. std::vector<vk_pipeline_ref> all_pipelines;
  637. std::vector<std::tuple<void*, size_t, vk_buffer>> pinned_memory;
  638. vk::Fence fence;
  639. vk_buffer sync_staging;
  640. ggml_backend_buffer_type buffer_type;
  641. bool disable_fusion;
  642. bool disable_host_visible_vidmem;
  643. bool allow_sysmem_fallback;
  644. bool disable_graph_optimize;
  645. #ifdef GGML_VULKAN_MEMORY_DEBUG
  646. std::unique_ptr<vk_memory_logger> memory_logger;
  647. #endif
  648. // for GGML_VK_PERF_LOGGER
  649. std::unique_ptr<vk_perf_logger> perf_logger;
  650. vk::QueryPool query_pool;
  651. int32_t num_queries;
  652. ~vk_device_struct() {
  653. VK_LOG_DEBUG("destroy device " << name);
  654. device.destroyFence(fence);
  655. ggml_vk_destroy_buffer(sync_staging);
  656. compute_queue.cmd_pool.destroy(device);
  657. transfer_queue.cmd_pool.destroy(device);
  658. for (auto& pipeline : all_pipelines) {
  659. if (pipeline.expired()) {
  660. continue;
  661. }
  662. vk_pipeline pl = pipeline.lock();
  663. ggml_vk_destroy_pipeline(device, pl);
  664. }
  665. all_pipelines.clear();
  666. device.destroyDescriptorSetLayout(dsl);
  667. device.destroy();
  668. }
  669. };
  670. void vk_command_pool::init(vk_device& device, vk_queue *q_) {
  671. cmd_buffer_idx = 0;
  672. q = q_;
  673. vk::CommandPoolCreateInfo command_pool_create_info(vk::CommandPoolCreateFlags(VK_COMMAND_POOL_CREATE_TRANSIENT_BIT), q->queue_family_index);
  674. pool = device->device.createCommandPool(command_pool_create_info);
  675. }
  676. void vk_command_pool::destroy(vk::Device& device) {
  677. device.destroyCommandPool(pool);
  678. pool = nullptr;
  679. cmd_buffers.clear();
  680. }
  681. struct vk_buffer_struct {
  682. vk::Buffer buffer = VK_NULL_HANDLE;
  683. vk::DeviceMemory device_memory = VK_NULL_HANDLE;
  684. vk::MemoryPropertyFlags memory_property_flags;
  685. void * ptr;
  686. size_t size = 0;
  687. vk::DeviceAddress bda_addr {};
  688. vk_device device;
  689. ~vk_buffer_struct() {
  690. if (size == 0) {
  691. return;
  692. }
  693. VK_LOG_DEBUG("~vk_buffer_struct(" << buffer << ", " << size << ")");
  694. device->device.freeMemory(device_memory);
  695. device->device.destroyBuffer(buffer);
  696. }
  697. };
  698. struct vk_subbuffer {
  699. vk_buffer buffer;
  700. uint64_t offset;
  701. uint64_t size;
  702. operator vk::DescriptorBufferInfo() const {
  703. return { buffer->buffer, offset, size };
  704. }
  705. };
  706. struct vk_semaphore {
  707. vk::Semaphore s;
  708. uint64_t value;
  709. };
  710. struct vk_submission {
  711. vk::CommandBuffer buffer;
  712. std::vector<vk_semaphore> wait_semaphores;
  713. std::vector<vk_semaphore> signal_semaphores;
  714. };
  715. typedef std::vector<vk_submission> vk_sequence;
  716. struct vk_mat_mat_push_constants {
  717. uint32_t M; uint32_t N; uint32_t K;
  718. uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  719. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  720. uint32_t k_split;
  721. uint32_t ne02; uint32_t ne12; uint32_t broadcast2; uint32_t broadcast3;
  722. uint32_t padded_N;
  723. };
  724. #define MAT_VEC_FUSION_FLAGS_BIAS0 0x1
  725. #define MAT_VEC_FUSION_FLAGS_BIAS1 0x2
  726. #define MAT_VEC_FUSION_FLAGS_SCALE0 0x4
  727. #define MAT_VEC_FUSION_FLAGS_SCALE1 0x8
  728. struct vk_mat_vec_push_constants {
  729. uint32_t ncols;
  730. uint32_t stride_a;
  731. uint32_t stride_b;
  732. uint32_t stride_d;
  733. uint32_t batch_stride_a;
  734. uint32_t batch_stride_b;
  735. uint32_t batch_stride_d;
  736. uint32_t fusion_flags;
  737. uint32_t ne02;
  738. uint32_t ne12;
  739. uint32_t broadcast2;
  740. uint32_t broadcast3;
  741. };
  742. struct vk_mat_vec_p021_push_constants {
  743. uint32_t ncols_x;
  744. uint32_t nrows_x;
  745. uint32_t nchannels_x;
  746. uint32_t nchannels_y;
  747. uint32_t b_offset;
  748. uint32_t d_offset;
  749. uint32_t fusion_flags;
  750. };
  751. struct vk_mat_vec_nc_push_constants {
  752. uint32_t ncols_x;
  753. uint32_t nrows_x;
  754. uint32_t row_stride_x;
  755. uint32_t channel_stride_x;
  756. uint32_t channel_stride_y;
  757. uint32_t channel_x_divisor;
  758. uint32_t ne12;
  759. uint32_t b_offset;
  760. uint32_t d_offset;
  761. uint32_t nb03;
  762. uint32_t nb13;
  763. uint32_t nb23;
  764. uint32_t fusion_flags;
  765. };
  766. struct vk_mat_mat_id_push_constants {
  767. uint32_t M; uint32_t N; uint32_t K;
  768. uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  769. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  770. uint32_t nei0; uint32_t nei1; uint32_t nbi1; uint32_t ne11;
  771. uint32_t padded_N;
  772. };
  773. struct vk_mat_vec_id_push_constants {
  774. uint32_t ncols;
  775. uint32_t stride_a;
  776. uint32_t stride_b;
  777. uint32_t stride_d;
  778. uint32_t batch_stride_a;
  779. uint32_t batch_stride_b;
  780. uint32_t batch_stride_d;
  781. uint32_t fusion_flags;
  782. uint32_t nei0;
  783. uint32_t ne11;
  784. };
  785. struct vk_flash_attn_push_constants {
  786. uint32_t N;
  787. uint32_t KV;
  788. uint32_t ne1;
  789. uint32_t ne2;
  790. uint32_t ne3;
  791. uint32_t neq2;
  792. uint32_t neq3;
  793. uint32_t nek2;
  794. uint32_t nek3;
  795. uint32_t nev2;
  796. uint32_t nev3;
  797. uint32_t nem1;
  798. uint32_t nem2;
  799. uint32_t nem3;
  800. uint32_t nb01;
  801. uint32_t nb02;
  802. uint32_t nb03;
  803. uint32_t nb11;
  804. uint32_t nb12;
  805. uint32_t nb13;
  806. uint32_t nb21;
  807. uint32_t nb22;
  808. uint32_t nb23;
  809. float scale;
  810. float max_bias;
  811. float logit_softcap;
  812. uint32_t mask_n_head_log2;
  813. float m0;
  814. float m1;
  815. uint32_t gqa_ratio;
  816. uint32_t split_kv;
  817. uint32_t k_num;
  818. };
  819. static_assert(sizeof(vk_flash_attn_push_constants) <= 128, "sizeof(vk_flash_attn_push_constants) must be <= 128");
  820. struct vk_op_push_constants {
  821. uint32_t KX;
  822. uint32_t KY;
  823. float param1;
  824. float param2;
  825. };
  826. struct vk_op_glu_push_constants {
  827. uint32_t N;
  828. uint32_t ne00;
  829. uint32_t ne20;
  830. uint32_t mode; // 0: default, 1: swapped, 2: split
  831. float alpha; // for swiglu_oai
  832. float limit;
  833. };
  834. struct vk_op_unary_push_constants {
  835. uint32_t ne;
  836. uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  837. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13;
  838. uint32_t misalign_offsets;
  839. float param1; float param2;
  840. uint32_t ne0_012mp; uint32_t ne0_012L;
  841. uint32_t ne0_01mp; uint32_t ne0_01L;
  842. uint32_t ne0_0mp; uint32_t ne0_0L;
  843. uint32_t ne1_012mp; uint32_t ne1_012L;
  844. uint32_t ne1_01mp; uint32_t ne1_01L;
  845. uint32_t ne1_0mp; uint32_t ne1_0L;
  846. };
  847. static_assert(sizeof(vk_op_unary_push_constants) <= 128, "sizeof(vk_op_unary_push_constants) must be <= 128");
  848. static vk_op_unary_push_constants vk_op_unary_push_constants_init(const ggml_tensor * src0, const ggml_tensor * dst, int64_t ne = 0) {
  849. GGML_ASSERT(ne != 0 || (ggml_nelements(src0) == ggml_nelements(dst)));
  850. ne = ne != 0 ? ne : ggml_nelements(dst);
  851. GGML_ASSERT(ne <= (int64_t)std::numeric_limits<uint32_t>::max());
  852. vk_op_unary_push_constants p{};
  853. p.ne = (uint32_t)ne;
  854. size_t src0_tsize = ggml_type_size(src0->type);
  855. p.ne00 = (uint32_t)src0->ne[0];
  856. p.ne01 = (uint32_t)src0->ne[1];
  857. p.ne02 = (uint32_t)src0->ne[2];
  858. p.ne03 = (uint32_t)src0->ne[3];
  859. p.nb00 = (uint32_t)(src0->nb[0] / src0_tsize);
  860. p.nb01 = (uint32_t)(src0->nb[1] / src0_tsize);
  861. p.nb02 = (uint32_t)(src0->nb[2] / src0_tsize);
  862. p.nb03 = (uint32_t)(src0->nb[3] / src0_tsize);
  863. size_t dst_tsize = ggml_type_size(dst->type);
  864. p.ne10 = (uint32_t)dst->ne[0];
  865. p.ne11 = (uint32_t)dst->ne[1];
  866. p.ne12 = (uint32_t)dst->ne[2];
  867. p.ne13 = (uint32_t)dst->ne[3];
  868. p.nb10 = (uint32_t)(dst->nb[0] / dst_tsize);
  869. p.nb11 = (uint32_t)(dst->nb[1] / dst_tsize);
  870. p.nb12 = (uint32_t)(dst->nb[2] / dst_tsize);
  871. p.nb13 = (uint32_t)(dst->nb[3] / dst_tsize);
  872. return p; // offsets are initialized later in ggml_vk_op
  873. }
  874. struct vk_op_pad_push_constants {
  875. uint32_t ne;
  876. uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  877. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13;
  878. uint32_t misalign_offsets;
  879. uint32_t lp0; uint32_t rp0;
  880. uint32_t lp1; uint32_t rp1;
  881. uint32_t lp2; uint32_t rp2;
  882. uint32_t lp3; uint32_t rp3;
  883. };
  884. static vk_op_pad_push_constants vk_op_pad_push_constants_init(const ggml_tensor * src0, const ggml_tensor * dst) {
  885. int64_t ne = ggml_nelements(dst);
  886. GGML_ASSERT(ne <= (int64_t)std::numeric_limits<uint32_t>::max());
  887. vk_op_pad_push_constants p{};
  888. p.ne = (uint32_t)ne;
  889. size_t src0_tsize = ggml_type_size(src0->type);
  890. p.ne00 = (uint32_t)src0->ne[0];
  891. p.ne01 = (uint32_t)src0->ne[1];
  892. p.ne02 = (uint32_t)src0->ne[2];
  893. p.ne03 = (uint32_t)src0->ne[3];
  894. p.nb00 = (uint32_t)(src0->nb[0] / src0_tsize);
  895. p.nb01 = (uint32_t)(src0->nb[1] / src0_tsize);
  896. p.nb02 = (uint32_t)(src0->nb[2] / src0_tsize);
  897. p.nb03 = (uint32_t)(src0->nb[3] / src0_tsize);
  898. size_t dst_tsize = ggml_type_size(dst->type);
  899. p.ne10 = (uint32_t)dst->ne[0];
  900. p.ne11 = (uint32_t)dst->ne[1];
  901. p.ne12 = (uint32_t)dst->ne[2];
  902. p.ne13 = (uint32_t)dst->ne[3];
  903. p.nb10 = (uint32_t)(dst->nb[0] / dst_tsize);
  904. p.nb11 = (uint32_t)(dst->nb[1] / dst_tsize);
  905. p.nb12 = (uint32_t)(dst->nb[2] / dst_tsize);
  906. p.nb13 = (uint32_t)(dst->nb[3] / dst_tsize);
  907. p.lp0 = dst->op_params[0];
  908. p.rp0 = dst->op_params[1];
  909. p.lp1 = dst->op_params[2];
  910. p.rp1 = dst->op_params[3];
  911. p.lp2 = dst->op_params[4];
  912. p.rp2 = dst->op_params[5];
  913. p.lp3 = dst->op_params[6];
  914. p.rp3 = dst->op_params[7];
  915. return p; // fastdiv values and offsets are initialized later in ggml_vk_op
  916. }
  917. // See https://gmplib.org/~tege/divcnst-pldi94.pdf figure 4.1.
  918. // Precompute mp (m' in the paper) and L such that division
  919. // can be computed using a multiply (high 32b of 64b result)
  920. // and a shift:
  921. //
  922. // n/d = (mulhi(n, mp) + n) >> L;
  923. static void init_fastdiv_values(uint32_t d, uint32_t &mp, uint32_t &L)
  924. {
  925. // compute L = ceil(log2(d));
  926. L = 0;
  927. while (L < 32 && (uint32_t{1} << L) < d) {
  928. L++;
  929. }
  930. mp = (uint32_t)((uint64_t{1} << 32) * ((uint64_t{1} << L) - d) / d + 1);
  931. }
  932. template <typename T> void init_pushconst_fastdiv(T &p) {
  933. GGML_UNUSED(p);
  934. static_assert(!std::is_const<T>::value, "unexpected type");
  935. }
  936. template <> void init_pushconst_fastdiv(vk_op_unary_push_constants &p) {
  937. // Compute magic values to divide by these six numbers.
  938. init_fastdiv_values(p.ne02*p.ne01*p.ne00, p.ne0_012mp, p.ne0_012L);
  939. init_fastdiv_values(p.ne01*p.ne00, p.ne0_01mp, p.ne0_01L);
  940. init_fastdiv_values(p.ne00, p.ne0_0mp, p.ne0_0L);
  941. init_fastdiv_values(p.ne12*p.ne11*p.ne10, p.ne1_012mp, p.ne1_012L);
  942. init_fastdiv_values(p.ne11*p.ne10, p.ne1_01mp, p.ne1_01L);
  943. init_fastdiv_values(p.ne10, p.ne1_0mp, p.ne1_0L);
  944. }
  945. struct vk_op_binary_push_constants {
  946. uint32_t ne;
  947. uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  948. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13;
  949. uint32_t ne20; uint32_t ne21; uint32_t ne22; uint32_t ne23; uint32_t nb20; uint32_t nb21; uint32_t nb22; uint32_t nb23;
  950. uint32_t misalign_offsets;
  951. float param1; float param2; int32_t param3;
  952. };
  953. struct vk_op_multi_add_push_constants {
  954. // shape for dst
  955. uint32_t ne20; uint32_t ne21; uint32_t ne22; uint32_t ne23;
  956. // strides for srcs+dst
  957. uint32_t nb[MAX_PARAMETER_COUNT][4];
  958. uint32_t rms_partials;
  959. };
  960. // update multi_add.comp if this changes
  961. static_assert(MAX_PARAMETER_COUNT == 12);
  962. static_assert(sizeof(vk_op_multi_add_push_constants) <= 256);
  963. struct vk_op_topk_moe_push_constants {
  964. uint32_t n_rows;
  965. uint32_t n_expert_used;
  966. float clamp_min;
  967. float clamp_max;
  968. };
  969. struct vk_op_add_id_push_constants {
  970. uint32_t ne0;
  971. uint32_t ne1;
  972. uint32_t s01;
  973. uint32_t s02;
  974. uint32_t s11;
  975. uint32_t s21;
  976. };
  977. struct vk_op_diag_mask_push_constants {
  978. uint32_t ncols;
  979. uint32_t rows_per_channel;
  980. int32_t n_past;
  981. };
  982. struct vk_op_rope_push_constants {
  983. uint32_t rope_mode;
  984. uint32_t ncols;
  985. uint32_t n_dims;
  986. float freq_scale;
  987. uint32_t p_delta_rows;
  988. float freq_base;
  989. float ext_factor;
  990. float attn_factor;
  991. float corr_dims[2];
  992. float theta_scale;
  993. uint32_t has_ff;
  994. uint32_t ne02;
  995. uint32_t s1;
  996. uint32_t s2;
  997. int32_t sections[4];
  998. uint32_t is_imrope;
  999. uint32_t is_back;
  1000. uint32_t set_rows_stride;
  1001. };
  1002. // For fused rms_norm+mul+rope(+view+set_rows)
  1003. struct vk_op_rms_norm_mul_rope_push_constants {
  1004. vk_op_binary_push_constants bin;
  1005. vk_op_rope_push_constants rope;
  1006. };
  1007. struct vk_op_soft_max_push_constants {
  1008. uint32_t KX;
  1009. uint32_t KY;
  1010. uint32_t ne00;
  1011. uint32_t ne01;
  1012. uint32_t ne02;
  1013. uint32_t ne12;
  1014. uint32_t ne13;
  1015. uint32_t nb11;
  1016. uint32_t nb12;
  1017. uint32_t nb13;
  1018. float scale;
  1019. float max_bias;
  1020. float m0;
  1021. float m1;
  1022. uint32_t n_head_log2;
  1023. uint32_t nrows_x;
  1024. uint32_t has_sinks;
  1025. };
  1026. struct vk_op_argsort_push_constants {
  1027. uint32_t ncols;
  1028. uint32_t ncols_padded;
  1029. uint32_t ncols_padded_log2;
  1030. uint32_t nrows;
  1031. uint32_t order;
  1032. uint32_t outer_start;
  1033. uint32_t outer_end;
  1034. uint32_t inner_start;
  1035. uint32_t inner_end;
  1036. };
  1037. struct vk_op_topk_push_constants {
  1038. uint32_t orig_ncols;
  1039. uint32_t ncols_input;
  1040. uint32_t ncols_output;
  1041. uint32_t nrows;
  1042. uint32_t first_pass;
  1043. uint32_t last_pass;
  1044. };
  1045. struct vk_op_im2col_push_constants {
  1046. uint64_t dst_addr;
  1047. uint32_t batch_offset; uint32_t offset_delta;
  1048. uint32_t IC;
  1049. uint32_t IW; uint32_t IH;
  1050. uint32_t OW; uint32_t OH;
  1051. uint32_t KW; uint32_t KH;
  1052. uint32_t pelements;
  1053. uint32_t CHW;
  1054. int32_t s0; int32_t s1;
  1055. int32_t p0; int32_t p1;
  1056. int32_t d0; int32_t d1;
  1057. };
  1058. struct vk_op_im2col_3d_push_constants {
  1059. uint64_t dst_addr;
  1060. uint32_t nb10;
  1061. uint32_t nb11;
  1062. uint32_t nb12;
  1063. uint32_t nb13;
  1064. uint32_t s0;
  1065. uint32_t s1;
  1066. uint32_t s2;
  1067. uint32_t p0;
  1068. uint32_t p1;
  1069. uint32_t p2;
  1070. uint32_t d0;
  1071. uint32_t d1;
  1072. uint32_t d2;
  1073. uint32_t IW;
  1074. uint32_t IH;
  1075. uint32_t ID;
  1076. uint32_t IC;
  1077. uint32_t KW;
  1078. uint32_t OH;
  1079. uint32_t KD_KH_KW;
  1080. uint32_t KH_KW;
  1081. uint32_t IC_KD_KH_KW;
  1082. uint32_t N_OD_OH;
  1083. uint32_t OD_OH;
  1084. uint32_t OD_OH_OW_IC_KD_KH_KW;
  1085. uint32_t OH_OW_IC_KD_KH_KW;
  1086. uint32_t OW_IC_KD_KH_KW;
  1087. uint32_t misalign_offsets;
  1088. };
  1089. struct vk_op_timestep_embedding_push_constants {
  1090. uint32_t nb1;
  1091. uint32_t dim;
  1092. uint32_t max_period;
  1093. };
  1094. struct vk_op_conv_transpose_1d_push_constants {
  1095. uint32_t Cout;
  1096. uint32_t Cin;
  1097. uint32_t K;
  1098. uint32_t L;
  1099. uint32_t KL;
  1100. uint32_t nb01;
  1101. uint32_t nb02;
  1102. uint32_t nb11;
  1103. uint32_t nb1;
  1104. int32_t s0;
  1105. };
  1106. struct vk_op_pool2d_push_constants {
  1107. uint32_t IW; uint32_t IH;
  1108. uint32_t OW; uint32_t OH;
  1109. uint32_t OC;
  1110. uint32_t pelements;
  1111. uint32_t op;
  1112. int32_t k0; int32_t k1;
  1113. int32_t s0; int32_t s1;
  1114. int32_t p0; int32_t p1;
  1115. };
  1116. struct vk_op_rwkv_wkv6_push_constants {
  1117. uint32_t B;
  1118. uint32_t T;
  1119. uint32_t C;
  1120. uint32_t H;
  1121. };
  1122. struct vk_op_rwkv_wkv7_push_constants {
  1123. uint32_t B;
  1124. uint32_t T;
  1125. uint32_t C;
  1126. uint32_t H;
  1127. };
  1128. struct vk_op_ssm_scan_push_constants {
  1129. uint32_t nb02, nb03, nb12, nb13;
  1130. uint32_t nb21, nb22, nb31;
  1131. uint32_t nb42, nb43, nb52, nb53;
  1132. uint32_t s_off;
  1133. uint32_t n_head, d_head, n_group, n_tok;
  1134. };
  1135. struct vk_op_ssm_conv_push_constants {
  1136. uint32_t nb01, nb02;
  1137. uint32_t nb11;
  1138. uint32_t dst_nb0, dst_nb1, dst_nb2;
  1139. uint32_t nc, ncs, nr, n_t, n_s;
  1140. };
  1141. struct vk_op_conv2d_push_constants {
  1142. uint32_t Cout;
  1143. uint32_t Cin;
  1144. uint32_t N;
  1145. uint32_t KW;
  1146. uint32_t KH;
  1147. uint32_t W;
  1148. uint32_t H;
  1149. uint32_t OW;
  1150. uint32_t OH;
  1151. uint32_t s0;
  1152. uint32_t s1;
  1153. uint32_t p0;
  1154. uint32_t p1;
  1155. uint32_t d0;
  1156. uint32_t d1;
  1157. uint32_t nb01;
  1158. uint32_t nb02;
  1159. uint32_t nb03;
  1160. uint32_t nb11;
  1161. uint32_t nb12;
  1162. uint32_t nb13;
  1163. uint32_t nb1;
  1164. uint32_t nb2;
  1165. uint32_t nb3;
  1166. // init_fastdiv_values constants for dividing by OW, OW*OH
  1167. uint32_t OWmp; uint32_t OWL;
  1168. uint32_t OWOHmp; uint32_t OWOHL;
  1169. };
  1170. template <> void init_pushconst_fastdiv(vk_op_conv2d_push_constants &p) {
  1171. // Compute magic values to divide by OW, OW*OH
  1172. init_fastdiv_values(p.OW, p.OWmp, p.OWL);
  1173. init_fastdiv_values(p.OW*p.OH, p.OWOHmp, p.OWOHL);
  1174. }
  1175. struct vk_op_conv_transpose_2d_push_constants {
  1176. uint32_t Cout;
  1177. uint32_t Cin;
  1178. uint32_t N;
  1179. uint32_t KW;
  1180. uint32_t KH;
  1181. uint32_t W;
  1182. uint32_t H;
  1183. uint32_t OW;
  1184. uint32_t OH;
  1185. uint32_t s0;
  1186. uint32_t s1;
  1187. uint32_t p0;
  1188. uint32_t p1;
  1189. uint32_t d0;
  1190. uint32_t d1;
  1191. uint32_t nb01;
  1192. uint32_t nb02;
  1193. uint32_t nb03;
  1194. uint32_t nb11;
  1195. uint32_t nb12;
  1196. uint32_t nb13;
  1197. uint32_t nb1;
  1198. uint32_t nb2;
  1199. uint32_t nb3;
  1200. // init_fastdiv_values constants for dividing by OW, OW*OH
  1201. uint32_t OWmp; uint32_t OWL;
  1202. uint32_t OWOHmp; uint32_t OWOHL;
  1203. };
  1204. template <> void init_pushconst_fastdiv(vk_op_conv_transpose_2d_push_constants &p) {
  1205. // Compute magic values to divide by OW, OW*OH
  1206. init_fastdiv_values(p.OW, p.OWmp, p.OWL);
  1207. init_fastdiv_values(p.OW*p.OH, p.OWOHmp, p.OWOHL);
  1208. }
  1209. struct vk_op_conv2d_dw_push_constants {
  1210. uint32_t ne;
  1211. uint32_t batches;
  1212. uint32_t channels;
  1213. uint32_t dst_w;
  1214. uint32_t dst_h;
  1215. uint32_t src_w;
  1216. uint32_t src_h;
  1217. uint32_t knl_w;
  1218. uint32_t knl_h;
  1219. int32_t stride_x;
  1220. int32_t stride_y;
  1221. int32_t pad_x;
  1222. int32_t pad_y;
  1223. int32_t dilation_x;
  1224. int32_t dilation_y;
  1225. };
  1226. struct vk_op_upscale_push_constants {
  1227. uint32_t ne; uint32_t a_offset; uint32_t d_offset;
  1228. uint32_t ne00; uint32_t ne01;
  1229. uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  1230. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13;
  1231. float sf0; float sf1; float sf2; float sf3;
  1232. float pixel_offset;
  1233. };
  1234. struct vk_op_sum_rows_push_constants
  1235. {
  1236. uint32_t n_cols;
  1237. uint32_t ne01, ne02;
  1238. uint32_t nb01, nb02, nb03;
  1239. uint32_t nb11, nb12, nb13;
  1240. float weight;
  1241. uint32_t misalign_offsets;
  1242. uint32_t ne0_12mp, ne0_12L;
  1243. uint32_t ne0_1mp, ne0_1L;
  1244. };
  1245. static vk_op_sum_rows_push_constants vk_op_sum_rows_push_constants_init(const ggml_tensor * src, const ggml_tensor * dst, int64_t n_cols) {
  1246. uint32_t type_size = (uint32_t)ggml_type_size(src->type);
  1247. vk_op_sum_rows_push_constants p = {};
  1248. p.n_cols = (uint32_t)n_cols;
  1249. p.ne01 = (uint32_t)src->ne[1];
  1250. p.ne02 = (uint32_t)src->ne[2];
  1251. p.nb01 = (uint32_t)src->nb[1] / type_size;
  1252. p.nb02 = (uint32_t)src->nb[2] / type_size;
  1253. p.nb03 = (uint32_t)src->nb[3] / type_size;
  1254. p.nb11 = (uint32_t)dst->nb[1] / type_size;
  1255. p.nb12 = (uint32_t)dst->nb[2] / type_size;
  1256. p.nb13 = (uint32_t)dst->nb[3] / type_size;
  1257. p.weight = 1.0f;
  1258. return p;
  1259. }
  1260. template <> void init_pushconst_fastdiv(vk_op_sum_rows_push_constants &p) {
  1261. init_fastdiv_values(p.ne01*p.ne02, p.ne0_12mp, p.ne0_12L);
  1262. init_fastdiv_values(p.ne01, p.ne0_1mp, p.ne0_1L);
  1263. }
  1264. // Allow pre-recording command buffers
  1265. struct vk_staging_memcpy {
  1266. vk_staging_memcpy(void * _dst, const void * _src, size_t _n) : dst(_dst), src(_src), n(_n) {}
  1267. void * dst;
  1268. const void * src;
  1269. size_t n;
  1270. };
  1271. struct vk_staging_memset {
  1272. vk_staging_memset(void * _dst, uint32_t _val, size_t _n) : dst(_dst), val(_val), n(_n) {}
  1273. void * dst;
  1274. uint32_t val;
  1275. size_t n;
  1276. };
  1277. struct vk_context_struct {
  1278. vk_submission * s;
  1279. std::vector<vk_sequence> seqs;
  1280. int exit_tensor_idx;
  1281. std::vector<vk_staging_memcpy> in_memcpys;
  1282. std::vector<vk_staging_memcpy> out_memcpys;
  1283. std::vector<vk_staging_memset> memsets;
  1284. vk_command_pool * p {};
  1285. };
  1286. typedef std::shared_ptr<vk_context_struct> vk_context;
  1287. typedef std::weak_ptr<vk_context_struct> vk_context_ref;
  1288. struct ggml_vk_garbage_collector {
  1289. std::vector<vk_semaphore> tl_semaphores;
  1290. std::vector<vk_semaphore> semaphores;
  1291. std::vector<vk::Event> events;
  1292. std::vector<vk_context> contexts;
  1293. };
  1294. static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx, vk_context subctx);
  1295. static void ggml_vk_load_shaders(vk_device& device);
  1296. static void ggml_pipeline_allocate_descriptor_sets(ggml_backend_vk_context * ctx);
  1297. #if defined(GGML_VULKAN_MEMORY_DEBUG) || defined(GGML_VULKAN_DEBUG)
  1298. #define VK_LOG_MEMORY(msg) std::cerr << "ggml_vulkan memory: " << msg << std::endl
  1299. static std::string format_size(size_t size) {
  1300. const size_t kib = 1024;
  1301. const size_t mib = kib * 1024;
  1302. const size_t gib = mib * 1024;
  1303. std::ostringstream oss;
  1304. oss << std::fixed << std::setprecision(2);
  1305. if (size >= gib) {
  1306. oss << static_cast<double>(size) / gib << " GiB";
  1307. } else if (size >= mib) {
  1308. oss << static_cast<double>(size) / mib << " MiB";
  1309. } else if (size >= kib) {
  1310. oss << static_cast<double>(size) / kib << " KiB";
  1311. } else {
  1312. oss << size << " B";
  1313. }
  1314. return oss.str();
  1315. }
  1316. class vk_memory_logger {
  1317. public:
  1318. vk_memory_logger(): total_device(0), total_host(0) {}
  1319. void log_allocation(vk_buffer_ref buf_ref, size_t size);
  1320. void log_deallocation(vk_buffer_ref buf_ref);
  1321. private:
  1322. std::map<vk::Buffer, size_t> allocations; // Track allocations
  1323. size_t total_device;
  1324. size_t total_host;
  1325. };
  1326. #else
  1327. #define VK_LOG_MEMORY(msg) ((void) 0)
  1328. #endif // GGML_VULKAN_MEMORY_DEBUG
  1329. class vk_perf_logger {
  1330. public:
  1331. void print_timings() {
  1332. if (timings.empty()) {
  1333. return;
  1334. }
  1335. uint64_t total_all_op_times = 0;
  1336. std::cerr << "----------------\nVulkan Timings:" << std::endl;
  1337. for (const auto & t : timings) {
  1338. uint64_t total_op_times = 0;
  1339. for (const auto & time : t.second) {
  1340. total_op_times += time;
  1341. }
  1342. std::cerr << t.first << ": " << t.second.size() << " x " << (total_op_times / t.second.size() / 1000.0)
  1343. << " us";
  1344. // If we have as many flops entries as timing entries for the op, then compute and log the flops/S.
  1345. auto it = flops.find(t.first);
  1346. if (it != flops.end() && (it->second).size() == t.second.size()) {
  1347. uint64_t total_op_flops = 0;
  1348. for (const auto & elem : it->second) {
  1349. total_op_flops += elem;
  1350. }
  1351. std::cerr << " ("
  1352. << (double(total_op_flops) / (1000.0 * 1000.0 * 1000.0)) /
  1353. (double(total_op_times) / (1000.0 * 1000.0 * 1000.0))
  1354. << " GFLOPS/s)";
  1355. }
  1356. total_all_op_times += total_op_times;
  1357. std::cerr << std::endl;
  1358. }
  1359. if (timings.size() > 0) {
  1360. std::cerr << "Total time: " << total_all_op_times / 1000.0 << " us." << std::endl;
  1361. }
  1362. timings.clear();
  1363. flops.clear();
  1364. }
  1365. void log_timing(const ggml_tensor * node, uint64_t time) {
  1366. if (node->op == GGML_OP_UNARY) {
  1367. timings[ggml_unary_op_name(ggml_get_unary_op(node))].push_back(time);
  1368. return;
  1369. }
  1370. if (node->op == GGML_OP_MUL_MAT || node->op == GGML_OP_MUL_MAT_ID) {
  1371. const uint64_t m = node->src[0]->ne[1];
  1372. const uint64_t n = node->ne[1];
  1373. const uint64_t k = node->src[1]->ne[0];
  1374. const uint64_t batch = node->src[1]->ne[2] * node->src[1]->ne[3];
  1375. std::string name = ggml_op_name(node->op);
  1376. if ((node->op == GGML_OP_MUL_MAT && n <= mul_mat_vec_max_cols) ||
  1377. (node->op == GGML_OP_MUL_MAT_ID && node->src[2]->ne[1] == 1)) {
  1378. name += "_VEC";
  1379. }
  1380. name += " ";
  1381. name += ggml_type_name(node->src[0]->type);
  1382. name += " m=" + std::to_string(m) + " n=" + std::to_string(n) + " k=" + std::to_string(k);
  1383. if (batch > 1) {
  1384. name += " batch=" + std::to_string(batch);
  1385. }
  1386. timings[name].push_back(time);
  1387. flops[name].push_back(m * n * (k + (k - 1)) * batch);
  1388. return;
  1389. }
  1390. if (node->op == GGML_OP_CONV_2D || node->op == GGML_OP_CONV_TRANSPOSE_2D) {
  1391. std::string name = ggml_op_name(node->op);
  1392. ggml_tensor * knl = node->src[0];
  1393. uint64_t OW = node->ne[0];
  1394. uint64_t OH = node->ne[1];
  1395. uint64_t N = node->ne[3];
  1396. uint64_t Cout = node->ne[2];
  1397. uint64_t KW = knl->ne[0];
  1398. uint64_t KH = knl->ne[1];
  1399. uint64_t Cin = node->src[1]->ne[2];
  1400. // KxCRS @ CRSxNPQ = KxNPQ -> M=K, K=CRS, N=NPQ
  1401. uint64_t size_M = Cout;
  1402. uint64_t size_K = Cin * KW * KH;
  1403. uint64_t size_N = N * OW * OH;
  1404. uint64_t n_flops = size_M * size_N * (size_K + (size_K - 1));
  1405. name += " M=Cout=" + std::to_string(size_M) + ", K=Cin*KW*KH=" + std::to_string(size_K) +
  1406. ", N=N*OW*OH=" + std::to_string(size_N);
  1407. flops[name].push_back(n_flops);
  1408. timings[name].push_back(time);
  1409. return;
  1410. }
  1411. if (node->op == GGML_OP_RMS_NORM) {
  1412. std::string name = ggml_op_name(node->op);
  1413. name += "(" + std::to_string(node->ne[0]) + "," + std::to_string(node->ne[1]) + "," + std::to_string(node->ne[2]) + "," + std::to_string(node->ne[3]) + ")";
  1414. timings[name].push_back(time);
  1415. return;
  1416. }
  1417. if (node->op == GGML_OP_FLASH_ATTN_EXT) {
  1418. const ggml_tensor * dst = node;
  1419. const ggml_tensor * q = node->src[0];
  1420. const ggml_tensor * k = node->src[1];
  1421. const ggml_tensor * v = node->src[2];
  1422. const ggml_tensor * m = node->src[3];
  1423. std::stringstream name;
  1424. name << ggml_op_name(node->op) <<
  1425. " dst(" << dst->ne[0] << "," << dst->ne[1] << "," << dst->ne[2] << "," << dst->ne[3] << "), " <<
  1426. " q(" << q->ne[0] << "," << q->ne[1] << "," << q->ne[2] << "," << q->ne[3] << "), " <<
  1427. " k(" << k->ne[0] << "," << k->ne[1] << "," << k->ne[2] << "," << k->ne[3] << "), " <<
  1428. " v(" << v->ne[0] << "," << v->ne[1] << "," << v->ne[2] << "," << v->ne[3] << "), " <<
  1429. " m(" << (m?m->ne[0]:0) << "," << (m?m->ne[1]:0) << "," << (m?m->ne[2]:0) << "," << (m?m->ne[3]:0) << ")";
  1430. timings[name.str()].push_back(time);
  1431. return;
  1432. }
  1433. timings[ggml_op_name(node->op)].push_back(time);
  1434. }
  1435. private:
  1436. std::map<std::string, std::vector<uint64_t>> timings;
  1437. std::map<std::string, std::vector<uint64_t>> flops;
  1438. };
  1439. struct ggml_backend_vk_context {
  1440. std::string name;
  1441. vk_device device;
  1442. size_t semaphore_idx, event_idx;
  1443. ggml_vk_garbage_collector gc;
  1444. size_t prealloc_size_x, prealloc_size_y, prealloc_size_split_k, prealloc_size_add_rms_partials, prealloc_size_add_rms_partials_offset;
  1445. vk_buffer prealloc_x, prealloc_y, prealloc_split_k, prealloc_add_rms_partials, sync_staging;
  1446. vk::Fence fence, almost_ready_fence;
  1447. bool submit_pending {};
  1448. bool almost_ready_fence_pending {};
  1449. // Set before op_add and unset after op_rms_norm to indicate that the add should
  1450. // write partial sums to accumulate the square of the vector components
  1451. bool do_add_rms_partials_offset_calculation;
  1452. bool do_add_rms_partials;
  1453. uint64_t last_total_mul_mat_bytes {};
  1454. // Cache most recent tensor that was converted into prealloc_y, and what pipeline it used to convert.
  1455. vk_pipeline_struct * prealloc_y_last_pipeline_used {};
  1456. const ggml_tensor * prealloc_y_last_tensor_used {};
  1457. // Track which nodes have been used since the last sync, and whether they were written to
  1458. std::vector<const ggml_tensor *> unsynced_nodes_written;
  1459. std::vector<const ggml_tensor *> unsynced_nodes_read;
  1460. // Track which prealloc buffers have pending reads that need to be synchronized.
  1461. // These are checked before writing to the buffer (and call ggml_vk_sync_buffers if set),
  1462. // and set to true after the buffer contents are consumed.
  1463. bool prealloc_x_need_sync, prealloc_y_need_sync, prealloc_split_k_need_sync;
  1464. vk_context_ref compute_ctx;
  1465. vk_context_ref transfer_ctx;
  1466. std::vector<vk_context_ref> tensor_ctxs;
  1467. std::vector<vk::DescriptorPool> descriptor_pools;
  1468. std::vector<vk::DescriptorSet> descriptor_sets;
  1469. uint32_t descriptor_set_idx {};
  1470. uint32_t pipeline_descriptor_set_requirements {};
  1471. vk_command_pool compute_cmd_pool;
  1472. vk_command_pool transfer_cmd_pool;
  1473. // number of additional consecutive nodes that are being fused with the
  1474. // node currently being processed
  1475. int num_additional_fused_ops {};
  1476. // Bitmask of which fused ops need to write an intermediate value to memory.
  1477. // Bit 'i' means nodes[start_of_fusion + i] writes to memory.
  1478. // If there's no fusion, bit 0 is still set.
  1479. int fused_ops_write_mask {};
  1480. };
  1481. static void * const vk_ptr_base = (void *)(uintptr_t) 0x1000; // NOLINT
  1482. static uint64_t vk_tensor_offset(const ggml_tensor * tensor) {
  1483. if (tensor->view_src) {
  1484. return (uint8_t *) tensor->view_src->data - (uint8_t *) vk_ptr_base;
  1485. }
  1486. return (uint8_t *) tensor->data - (uint8_t *) vk_ptr_base;
  1487. }
  1488. static uint32_t get_misalign_bytes(const ggml_backend_vk_context * ctx, const ggml_tensor * t)
  1489. {
  1490. return ((vk_tensor_offset(t) + t->view_offs) & (ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1));;
  1491. }
  1492. template <typename T> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, T &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  1493. GGML_UNUSED(p);
  1494. GGML_UNUSED(src0);
  1495. GGML_UNUSED(src1);
  1496. GGML_UNUSED(src2);
  1497. GGML_UNUSED(src3);
  1498. GGML_UNUSED(dst);
  1499. static_assert(!std::is_const<T>::value, "unexpected type");
  1500. GGML_ASSERT(!src0 || get_misalign_bytes(ctx, src0) == 0);
  1501. GGML_ASSERT(!src1 || get_misalign_bytes(ctx, src1) == 0);
  1502. GGML_ASSERT(!src2 || get_misalign_bytes(ctx, src2) == 0);
  1503. GGML_ASSERT(!src3 || get_misalign_bytes(ctx, src3) == 0);
  1504. GGML_ASSERT(!dst || get_misalign_bytes(ctx, dst) == 0);
  1505. }
  1506. template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_mat_vec_p021_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  1507. const uint32_t b_offset = get_misalign_bytes(ctx, src1) / ggml_type_size(src1->type);
  1508. const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
  1509. p.b_offset = b_offset;
  1510. p.d_offset = d_offset;
  1511. GGML_UNUSED(src0);
  1512. GGML_UNUSED(src2);
  1513. GGML_UNUSED(src3);
  1514. }
  1515. template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_mat_vec_nc_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  1516. const uint32_t b_offset = get_misalign_bytes(ctx, src1) / ggml_type_size(src1->type);
  1517. const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
  1518. p.b_offset = b_offset;
  1519. p.d_offset = d_offset;
  1520. GGML_UNUSED(src0);
  1521. GGML_UNUSED(src2);
  1522. GGML_UNUSED(src3);
  1523. }
  1524. struct ggml_backend_vk_buffer_context {
  1525. vk_device_ref device;
  1526. vk_buffer dev_buffer;
  1527. std::string name;
  1528. ggml_backend_vk_buffer_context(vk_device_ref device, vk_buffer&& dev_buffer, std::string& name) :
  1529. device(device),
  1530. dev_buffer(dev_buffer),
  1531. name(name) {
  1532. }
  1533. ~ggml_backend_vk_buffer_context() {
  1534. ggml_vk_destroy_buffer(dev_buffer);
  1535. }
  1536. };
  1537. #ifdef GGML_VULKAN_MEMORY_DEBUG
  1538. static std::mutex log_mutex;
  1539. void vk_memory_logger::log_allocation(vk_buffer_ref buf_ref, size_t size) {
  1540. std::lock_guard<std::mutex> guard(log_mutex);
  1541. vk_buffer buf = buf_ref.lock();
  1542. const bool device = bool(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eDeviceLocal);
  1543. const std::string type = device ? "device" : "host";
  1544. allocations[buf->buffer] = size;
  1545. total_device += device ? size : 0;
  1546. total_host += device ? 0 : size;
  1547. VK_LOG_MEMORY(buf->device->name << ": +" << format_size(size) << " " << type << " at " << buf->buffer << ". Total device: " << format_size(total_device) << ", total host: " << format_size(total_host));
  1548. }
  1549. void vk_memory_logger::log_deallocation(vk_buffer_ref buf_ref) {
  1550. if (buf_ref.expired() || buf_ref.lock()->size == 0) {
  1551. return;
  1552. }
  1553. std::lock_guard<std::mutex> guard(log_mutex);
  1554. vk_buffer buf = buf_ref.lock();
  1555. const bool device = bool(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eDeviceLocal);
  1556. std::string type = device ? "device" : "host";
  1557. auto it = allocations.find(buf->buffer);
  1558. total_device -= device ? it->second : 0;
  1559. total_host -= device ? 0 : it->second;
  1560. if (it != allocations.end()) {
  1561. VK_LOG_MEMORY(buf->device->name << ": -" << format_size(it->second) << " " << type << " at " << buf->buffer << ". Total device: " << format_size(total_device) << ", total host: " << format_size(total_host));
  1562. allocations.erase(it);
  1563. } else {
  1564. VK_LOG_MEMORY("ERROR " << buf->device->name << ": Attempted to deallocate unknown " << type << " memory at " << buf->buffer);
  1565. }
  1566. }
  1567. #endif // GGML_VULKAN_MEMORY_DEBUG
  1568. struct vk_instance_t {
  1569. vk::Instance instance;
  1570. bool debug_utils_support = false; // VK_EXT_debug_utils enabled
  1571. PFN_vkSetDebugUtilsObjectNameEXT pfn_vkSetDebugUtilsObjectNameEXT = {};
  1572. PFN_vkQueueBeginDebugUtilsLabelEXT pfn_vkQueueBeginDebugUtilsLabelEXT = {};
  1573. PFN_vkQueueEndDebugUtilsLabelEXT pfn_vkQueueEndDebugUtilsLabelEXT = {};
  1574. PFN_vkCmdBeginDebugUtilsLabelEXT pfn_vkCmdBeginDebugUtilsLabelEXT = {};
  1575. PFN_vkCmdEndDebugUtilsLabelEXT pfn_vkCmdEndDebugUtilsLabelEXT = {};
  1576. PFN_vkCmdInsertDebugUtilsLabelEXT pfn_vkCmdInsertDebugUtilsLabelEXT = {};
  1577. std::vector<size_t> device_indices;
  1578. std::vector<bool> device_supports_membudget;
  1579. vk_device devices[GGML_VK_MAX_DEVICES];
  1580. };
  1581. static bool vk_instance_initialized = false;
  1582. static vk_instance_t vk_instance;
  1583. static bool vk_perf_logger_enabled = false;
  1584. #ifdef GGML_VULKAN_CHECK_RESULTS
  1585. static size_t vk_skip_checks;
  1586. static size_t vk_output_tensor;
  1587. static void ggml_vk_print_tensor(const ggml_tensor * tensor, const char * name);
  1588. static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, int tensor_idx);
  1589. static void ggml_vk_check_results_1(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, int tensor_idx);
  1590. #endif
  1591. typedef void (*ggml_vk_func_t)(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
  1592. static void ggml_backend_vk_free(ggml_backend_t backend);
  1593. static VkDeviceSize ggml_vk_get_max_buffer_range(const ggml_backend_vk_context * ctx, const vk_buffer &buf, const VkDeviceSize offset) {
  1594. const VkDeviceSize range = std::min(VkDeviceSize{buf->size - offset},
  1595. VkDeviceSize{ctx->device->properties.limits.maxStorageBufferRange});
  1596. return range;
  1597. }
  1598. // Wait for ctx->fence to be signaled.
  1599. static void ggml_vk_wait_for_fence(ggml_backend_vk_context * ctx) {
  1600. // Use waitForFences while most of the graph executes. Hopefully the CPU can sleep
  1601. // during this wait.
  1602. if (ctx->almost_ready_fence_pending) {
  1603. VK_CHECK(ctx->device->device.waitForFences({ ctx->almost_ready_fence }, true, UINT64_MAX), "almost_ready_fence");
  1604. ctx->device->device.resetFences({ ctx->almost_ready_fence });
  1605. ctx->almost_ready_fence_pending = false;
  1606. }
  1607. // Spin (w/pause) waiting for the graph to finish executing.
  1608. vk::Result result;
  1609. while ((result = ctx->device->device.getFenceStatus(ctx->fence)) != vk::Result::eSuccess) {
  1610. if (result != vk::Result::eNotReady) {
  1611. fprintf(stderr, "ggml_vulkan: error %s at %s:%d\n", to_string(result).c_str(), __FILE__, __LINE__);
  1612. exit(1);
  1613. }
  1614. for (uint32_t i = 0; i < 100; ++i) {
  1615. YIELD();
  1616. YIELD();
  1617. YIELD();
  1618. YIELD();
  1619. YIELD();
  1620. YIELD();
  1621. YIELD();
  1622. YIELD();
  1623. YIELD();
  1624. YIELD();
  1625. }
  1626. }
  1627. ctx->device->device.resetFences({ ctx->fence });
  1628. }
  1629. // variables to track number of compiles in progress
  1630. static uint32_t compile_count = 0;
  1631. static std::mutex compile_count_mutex;
  1632. static std::condition_variable compile_count_cond;
  1633. static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipeline, size_t spv_size, const void* spv_data, const std::string entrypoint,
  1634. uint32_t parameter_count, std::array<uint32_t, 3> wg_denoms, std::vector<uint32_t> specialization_constants,
  1635. bool disable_robustness, bool require_full_subgroups, uint32_t required_subgroup_size) {
  1636. VK_LOG_DEBUG("ggml_vk_create_pipeline(" << device->name << ", " << pipeline->name << ", " << entrypoint << ", " << parameter_count <<
  1637. ", (" << wg_denoms[0] << "," << wg_denoms[1] << "," << wg_denoms[2] << "), specialization_constants, " <<
  1638. disable_robustness << ", " << require_full_subgroups << ", " << required_subgroup_size << ")");
  1639. GGML_ASSERT(parameter_count > 0);
  1640. GGML_ASSERT(parameter_count <= MAX_PARAMETER_COUNT);
  1641. GGML_ASSERT(wg_denoms[0] > 0 && wg_denoms[1] > 0 && wg_denoms[2] > 0); // NOLINT
  1642. vk::ShaderModuleCreateInfo shader_module_create_info({}, spv_size, reinterpret_cast<const uint32_t *>(spv_data));
  1643. pipeline->shader_module = device->device.createShaderModule(shader_module_create_info);
  1644. vk::PushConstantRange pcr(
  1645. vk::ShaderStageFlagBits::eCompute,
  1646. 0,
  1647. pipeline->push_constant_size
  1648. );
  1649. vk::PipelineLayoutCreateInfo pipeline_layout_create_info(vk::PipelineLayoutCreateFlags(), device->dsl, pcr);
  1650. pipeline->layout = device->device.createPipelineLayout(pipeline_layout_create_info);
  1651. std::vector<vk::SpecializationMapEntry> specialization_entries(specialization_constants.size());
  1652. for (size_t i = 0; i < specialization_constants.size(); i++) {
  1653. specialization_entries[i].constantID = i;
  1654. specialization_entries[i].offset = i * sizeof(uint32_t);
  1655. specialization_entries[i].size = sizeof(uint32_t);
  1656. }
  1657. vk::SpecializationInfo specialization_info(
  1658. specialization_entries.size(),
  1659. specialization_entries.data(),
  1660. specialization_constants.size() * sizeof(uint32_t),
  1661. specialization_constants.data()
  1662. );
  1663. vk::PipelineShaderStageCreateFlags pipeline_shader_stage_create_flags{};
  1664. if (device->subgroup_require_full_support && require_full_subgroups) {
  1665. pipeline_shader_stage_create_flags |= vk::PipelineShaderStageCreateFlagBits::eRequireFullSubgroupsEXT;
  1666. }
  1667. vk::PipelineShaderStageCreateInfo pipeline_shader_create_info(
  1668. pipeline_shader_stage_create_flags,
  1669. vk::ShaderStageFlagBits::eCompute,
  1670. pipeline->shader_module,
  1671. entrypoint.c_str(),
  1672. &specialization_info);
  1673. vk::PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT pipeline_shader_stage_required_subgroup_size_create_info;
  1674. pipeline_shader_stage_required_subgroup_size_create_info.requiredSubgroupSize = required_subgroup_size;
  1675. if (device->subgroup_size_control && required_subgroup_size > 0) {
  1676. GGML_ASSERT(device->subgroup_min_size <= required_subgroup_size && required_subgroup_size <= device->subgroup_max_size);
  1677. pipeline_shader_create_info.setPNext(&pipeline_shader_stage_required_subgroup_size_create_info);
  1678. }
  1679. vk::ComputePipelineCreateInfo compute_pipeline_create_info(
  1680. device->pipeline_executable_properties_support ?
  1681. vk::PipelineCreateFlagBits::eCaptureStatisticsKHR :
  1682. vk::PipelineCreateFlags{},
  1683. pipeline_shader_create_info,
  1684. pipeline->layout);
  1685. vk::PipelineRobustnessCreateInfoEXT rci;
  1686. if (device->pipeline_robustness && disable_robustness) {
  1687. rci.storageBuffers = vk::PipelineRobustnessBufferBehaviorEXT::eDisabled;
  1688. rci.uniformBuffers = vk::PipelineRobustnessBufferBehaviorEXT::eDisabled;
  1689. compute_pipeline_create_info.setPNext(&rci);
  1690. }
  1691. try {
  1692. pipeline->pipeline = device->device.createComputePipeline(VK_NULL_HANDLE, compute_pipeline_create_info).value;
  1693. } catch (const vk::SystemError& e) {
  1694. std::cerr << "ggml_vulkan: Compute pipeline creation failed for " << pipeline->name << std::endl;
  1695. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  1696. throw e;
  1697. }
  1698. pipeline->compiled = true;
  1699. if (vk_instance.debug_utils_support) {
  1700. vk::DebugUtilsObjectNameInfoEXT duoni;
  1701. duoni.objectType = vk::ObjectType::ePipeline;
  1702. duoni.pObjectName = pipeline->name.c_str();
  1703. duoni.objectHandle = /*reinterpret_cast*/(uint64_t)(static_cast<VkPipeline>(pipeline->pipeline));
  1704. vk_instance.pfn_vkSetDebugUtilsObjectNameEXT(device->device, &static_cast<VkDebugUtilsObjectNameInfoEXT &>(duoni));
  1705. }
  1706. if (device->pipeline_executable_properties_support) {
  1707. vk::PipelineExecutableInfoKHR executableInfo;
  1708. executableInfo.pipeline = pipeline->pipeline;
  1709. auto statistics = device->device.getPipelineExecutableStatisticsKHR(executableInfo);
  1710. for (auto & s : statistics) {
  1711. // "Register Count" is reported by NVIDIA drivers.
  1712. if (strcmp(s.name, "Register Count") == 0) {
  1713. VK_LOG_DEBUG(pipeline->name << " " << s.name << ": " << s.value.u64 << " registers");
  1714. pipeline->register_count = (uint32_t)s.value.u64;
  1715. }
  1716. }
  1717. }
  1718. device->all_pipelines.push_back(pipeline);
  1719. {
  1720. std::lock_guard<std::mutex> guard(compile_count_mutex);
  1721. assert(compile_count > 0);
  1722. compile_count--;
  1723. }
  1724. compile_count_cond.notify_all();
  1725. }
  1726. static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline) {
  1727. VK_LOG_DEBUG("ggml_pipeline_destroy_pipeline(" << pipeline->name << ")");
  1728. device.destroyPipelineLayout(pipeline->layout);
  1729. device.destroyShaderModule(pipeline->shader_module);
  1730. device.destroyPipeline(pipeline->pipeline);
  1731. }
  1732. static void ggml_pipeline_request_descriptor_sets(ggml_backend_vk_context *ctx, vk_pipeline& pipeline, uint32_t n) {
  1733. VK_LOG_DEBUG("ggml_pipeline_request_descriptor_sets(" << pipeline->name << ", " << n << ")");
  1734. ctx->pipeline_descriptor_set_requirements += n;
  1735. if (!pipeline->compiled) {
  1736. pipeline->needed = true;
  1737. ggml_vk_load_shaders(ctx->device);
  1738. }
  1739. ggml_pipeline_allocate_descriptor_sets(ctx);
  1740. }
  1741. static void ggml_pipeline_allocate_descriptor_sets(ggml_backend_vk_context * ctx) {
  1742. if (ctx->descriptor_sets.size() >= ctx->pipeline_descriptor_set_requirements) {
  1743. // Enough descriptors are available
  1744. return;
  1745. }
  1746. vk_device& device = ctx->device;
  1747. // Grow by 50% to avoid frequent allocations
  1748. uint32_t needed = std::max(3 * ctx->descriptor_sets.size() / 2, size_t{ctx->pipeline_descriptor_set_requirements});
  1749. uint32_t to_alloc = needed - ctx->descriptor_sets.size();
  1750. uint32_t pool_remaining = VK_DEVICE_DESCRIPTOR_POOL_SIZE - ctx->descriptor_sets.size() % VK_DEVICE_DESCRIPTOR_POOL_SIZE;
  1751. uint32_t pool_idx = ctx->descriptor_sets.size() / VK_DEVICE_DESCRIPTOR_POOL_SIZE;
  1752. while (to_alloc > 0) {
  1753. const uint32_t alloc_count = std::min(pool_remaining, to_alloc);
  1754. to_alloc -= alloc_count;
  1755. pool_remaining = VK_DEVICE_DESCRIPTOR_POOL_SIZE;
  1756. if (pool_idx >= ctx->descriptor_pools.size()) {
  1757. vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, MAX_PARAMETER_COUNT * VK_DEVICE_DESCRIPTOR_POOL_SIZE);
  1758. vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, VK_DEVICE_DESCRIPTOR_POOL_SIZE, descriptor_pool_size);
  1759. ctx->descriptor_pools.push_back(device->device.createDescriptorPool(descriptor_pool_create_info));
  1760. }
  1761. std::vector<vk::DescriptorSetLayout> layouts(alloc_count);
  1762. for (uint32_t i = 0; i < alloc_count; i++) {
  1763. layouts[i] = device->dsl;
  1764. }
  1765. vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(ctx->descriptor_pools[pool_idx], alloc_count, layouts.data());
  1766. std::vector<vk::DescriptorSet> sets = device->device.allocateDescriptorSets(descriptor_set_alloc_info);
  1767. ctx->descriptor_sets.insert(ctx->descriptor_sets.end(), sets.begin(), sets.end());
  1768. pool_idx++;
  1769. }
  1770. }
  1771. static vk::CommandBuffer ggml_vk_create_cmd_buffer(vk_device& device, vk_command_pool& p) {
  1772. VK_LOG_DEBUG("ggml_vk_create_cmd_buffer()");
  1773. if (p.cmd_buffers.size() > p.cmd_buffer_idx) {
  1774. // Reuse command buffer
  1775. return p.cmd_buffers[p.cmd_buffer_idx++];
  1776. }
  1777. vk::CommandBufferAllocateInfo command_buffer_alloc_info(
  1778. p.pool,
  1779. vk::CommandBufferLevel::ePrimary,
  1780. 1);
  1781. const std::vector<vk::CommandBuffer> cmd_buffers = device->device.allocateCommandBuffers(command_buffer_alloc_info);
  1782. auto buf = cmd_buffers.front();
  1783. p.cmd_buffers.push_back(buf);
  1784. p.cmd_buffer_idx++;
  1785. return buf;
  1786. }
  1787. static void ggml_vk_submit(vk_context& ctx, vk::Fence fence) {
  1788. if (ctx->seqs.empty()) {
  1789. if (fence) {
  1790. std::lock_guard<std::mutex> guard(queue_mutex);
  1791. ctx->p->q->queue.submit({}, fence);
  1792. }
  1793. return;
  1794. }
  1795. VK_LOG_DEBUG("ggml_vk_submit(" << ctx << ", " << fence << ")");
  1796. std::vector<std::vector<uint64_t>> tl_wait_vals;
  1797. std::vector<std::vector<uint64_t>> tl_signal_vals;
  1798. std::vector<std::vector<vk::Semaphore>> tl_wait_semaphores;
  1799. std::vector<std::vector<vk::Semaphore>> tl_signal_semaphores;
  1800. std::vector<vk::TimelineSemaphoreSubmitInfo> tl_submit_infos;
  1801. std::vector<vk::SubmitInfo> submit_infos;
  1802. int idx = -1;
  1803. std::vector<std::vector<vk::PipelineStageFlags>> stage_flags;
  1804. size_t reserve = 0;
  1805. for (const auto& sequence : ctx->seqs) {
  1806. reserve += sequence.size();
  1807. }
  1808. // Pre-reserve vectors to prevent reallocation, which invalidates pointers
  1809. tl_wait_semaphores.reserve(reserve);
  1810. tl_wait_vals.reserve(reserve);
  1811. tl_signal_semaphores.reserve(reserve);
  1812. tl_signal_vals.reserve(reserve);
  1813. tl_submit_infos.reserve(reserve);
  1814. submit_infos.reserve(reserve);
  1815. stage_flags.reserve(reserve);
  1816. for (const auto& sequence : ctx->seqs) {
  1817. for (const auto& submission : sequence) {
  1818. stage_flags.push_back({});
  1819. idx++;
  1820. tl_wait_vals.push_back({});
  1821. tl_wait_semaphores.push_back({});
  1822. tl_signal_vals.push_back({});
  1823. tl_signal_semaphores.push_back({});
  1824. for (size_t i = 0; i < submission.wait_semaphores.size(); i++) {
  1825. stage_flags[idx].push_back(ctx->p->q->stage_flags);
  1826. tl_wait_vals[idx].push_back(submission.wait_semaphores[i].value);
  1827. tl_wait_semaphores[idx].push_back(submission.wait_semaphores[i].s);
  1828. }
  1829. for (size_t i = 0; i < submission.signal_semaphores.size(); i++) {
  1830. tl_signal_vals[idx].push_back(submission.signal_semaphores[i].value);
  1831. tl_signal_semaphores[idx].push_back(submission.signal_semaphores[i].s);
  1832. }
  1833. tl_submit_infos.push_back({
  1834. (uint32_t) submission.wait_semaphores.size(),
  1835. tl_wait_vals[idx].data(),
  1836. (uint32_t) submission.signal_semaphores.size(),
  1837. tl_signal_vals[idx].data(),
  1838. });
  1839. tl_submit_infos[idx].sType = vk::StructureType::eTimelineSemaphoreSubmitInfo;
  1840. tl_submit_infos[idx].pNext = nullptr;
  1841. vk::SubmitInfo si{
  1842. (uint32_t) submission.wait_semaphores.size(),
  1843. tl_wait_semaphores[idx].data(),
  1844. stage_flags[idx].data(),
  1845. 1,
  1846. &submission.buffer,
  1847. (uint32_t) submission.signal_semaphores.size(),
  1848. tl_signal_semaphores[idx].data(),
  1849. };
  1850. si.setPNext(&tl_submit_infos[idx]);
  1851. submit_infos.push_back(si);
  1852. }
  1853. }
  1854. std::lock_guard<std::mutex> guard(queue_mutex);
  1855. ctx->p->q->queue.submit(submit_infos, fence);
  1856. ctx->seqs.clear();
  1857. }
  1858. static uint32_t ggml_vk_find_queue_family_index(std::vector<vk::QueueFamilyProperties>& queue_family_props, const vk::QueueFlags& required, const vk::QueueFlags& avoid, int32_t compute_index, uint32_t min_num_queues) {
  1859. VK_LOG_DEBUG("ggml_vk_find_queue_family_index()");
  1860. const uint32_t qfsize = queue_family_props.size();
  1861. // Try with avoid preferences first
  1862. for (uint32_t i = 0; i < qfsize; i++) {
  1863. if (queue_family_props[i].queueCount >= min_num_queues && (compute_index < 0 || i != (uint32_t) compute_index) && queue_family_props[i].queueFlags & required && !(queue_family_props[i].queueFlags & avoid)) {
  1864. return i;
  1865. }
  1866. }
  1867. // Fall back to only required
  1868. for (size_t i = 0; i < qfsize; i++) {
  1869. if (queue_family_props[i].queueCount >= min_num_queues && (compute_index < 0 || i != (uint32_t) compute_index) && queue_family_props[i].queueFlags & required) {
  1870. return i;
  1871. }
  1872. }
  1873. // Fall back to reusing compute queue
  1874. for (size_t i = 0; i < qfsize; i++) {
  1875. if (queue_family_props[i].queueCount >= min_num_queues && queue_family_props[i].queueFlags & required) {
  1876. return i;
  1877. }
  1878. }
  1879. // Fall back to ignoring min_num_queries
  1880. for (size_t i = 0; i < qfsize; i++) {
  1881. if (queue_family_props[i].queueFlags & required) {
  1882. return i;
  1883. }
  1884. }
  1885. // All commands that are allowed on a queue that supports transfer operations are also allowed on a queue that supports either graphics or compute operations.
  1886. // Thus, if the capabilities of a queue family include VK_QUEUE_GRAPHICS_BIT or VK_QUEUE_COMPUTE_BIT, then reporting the VK_QUEUE_TRANSFER_BIT capability separately for that queue family is optional.
  1887. if (compute_index >= 0) {
  1888. return compute_index;
  1889. }
  1890. std::cerr << "ggml_vulkan: No suitable queue family index found." << std::endl;
  1891. for(auto &q_family : queue_family_props) {
  1892. std::cerr << "Queue number: " + std::to_string(q_family.queueCount) << " flags: " + to_string(q_family.queueFlags) << std::endl;
  1893. }
  1894. abort();
  1895. }
  1896. static void ggml_vk_create_queue(vk_device& device, vk_queue& q, uint32_t queue_family_index, uint32_t queue_index, vk::PipelineStageFlags&& stage_flags, bool transfer_only) {
  1897. VK_LOG_DEBUG("ggml_vk_create_queue()");
  1898. std::lock_guard<std::recursive_mutex> guard(device->mutex);
  1899. q.queue_family_index = queue_family_index;
  1900. q.transfer_only = transfer_only;
  1901. q.cmd_pool.init(device, &q);
  1902. q.queue = device->device.getQueue(queue_family_index, queue_index);
  1903. q.stage_flags = stage_flags;
  1904. }
  1905. static vk_context ggml_vk_create_context(ggml_backend_vk_context * ctx, vk_command_pool& p) {
  1906. vk_context result = std::make_shared<vk_context_struct>();
  1907. VK_LOG_DEBUG("ggml_vk_create_context(" << result << ")");
  1908. ctx->gc.contexts.emplace_back(result);
  1909. result->p = &p;
  1910. return result;
  1911. }
  1912. static vk_context ggml_vk_create_temporary_context(vk_command_pool& p) {
  1913. vk_context result = std::make_shared<vk_context_struct>();
  1914. VK_LOG_DEBUG("ggml_vk_create_temporary_context(" << result << ")");
  1915. result->p = &p;
  1916. return result;
  1917. }
  1918. static vk_semaphore * ggml_vk_create_binary_semaphore(ggml_backend_vk_context * ctx) {
  1919. VK_LOG_DEBUG("ggml_vk_create_timeline_semaphore()");
  1920. vk::SemaphoreTypeCreateInfo tci{ vk::SemaphoreType::eBinary, 0 };
  1921. vk::SemaphoreCreateInfo ci{};
  1922. ci.setPNext(&tci);
  1923. vk::Semaphore semaphore = ctx->device->device.createSemaphore(ci);
  1924. ctx->gc.semaphores.push_back({ semaphore, 0 });
  1925. return &ctx->gc.semaphores[ctx->gc.semaphores.size() - 1];
  1926. }
  1927. static vk_semaphore * ggml_vk_create_timeline_semaphore(ggml_backend_vk_context * ctx) {
  1928. VK_LOG_DEBUG("ggml_vk_create_timeline_semaphore()");
  1929. if (ctx->semaphore_idx >= ctx->gc.tl_semaphores.size()) {
  1930. vk::SemaphoreTypeCreateInfo tci{ vk::SemaphoreType::eTimeline, 0 };
  1931. vk::SemaphoreCreateInfo ci{};
  1932. ci.setPNext(&tci);
  1933. vk::Semaphore semaphore = ctx->device->device.createSemaphore(ci);
  1934. ctx->gc.tl_semaphores.push_back({ semaphore, 0 });
  1935. }
  1936. return &ctx->gc.tl_semaphores[ctx->semaphore_idx++];
  1937. }
  1938. static vk::Event ggml_vk_create_event(ggml_backend_vk_context * ctx) {
  1939. if (ctx->event_idx >= ctx->gc.events.size()) {
  1940. ctx->gc.events.push_back(ctx->device->device.createEvent({}));
  1941. }
  1942. return ctx->gc.events[ctx->event_idx++];
  1943. }
  1944. static void ggml_vk_command_pool_cleanup(vk_device& device, vk_command_pool& p) {
  1945. VK_LOG_DEBUG("ggml_vk_command_pool_cleanup()");
  1946. // Requires command buffers to be done
  1947. device->device.resetCommandPool(p.pool);
  1948. p.cmd_buffer_idx = 0;
  1949. }
  1950. static void ggml_vk_queue_command_pools_cleanup(vk_device& device) {
  1951. VK_LOG_DEBUG("ggml_vk_queue_command_pools_cleanup()");
  1952. // Arbitrary frequency to cleanup/reuse command buffers
  1953. static constexpr uint32_t cleanup_frequency = 10;
  1954. if (device->compute_queue.cmd_pool.cmd_buffer_idx >= cleanup_frequency) {
  1955. ggml_vk_command_pool_cleanup(device, device->compute_queue.cmd_pool);
  1956. }
  1957. if (device->transfer_queue.cmd_pool.cmd_buffer_idx >= cleanup_frequency) {
  1958. ggml_vk_command_pool_cleanup(device, device->transfer_queue.cmd_pool);
  1959. }
  1960. }
  1961. static std::vector<uint32_t> ggml_vk_find_memory_properties(const vk::PhysicalDeviceMemoryProperties* mem_props, vk::MemoryRequirements* mem_req, vk::MemoryPropertyFlags flags) {
  1962. std::vector<uint32_t> indices;
  1963. for (uint32_t i = 0; i < mem_props->memoryTypeCount; ++i) {
  1964. vk::MemoryType memory_type = mem_props->memoryTypes[i];
  1965. if ((mem_req->memoryTypeBits & ((uint64_t)1 << i)) &&
  1966. (flags & memory_type.propertyFlags) == flags &&
  1967. mem_props->memoryHeaps[memory_type.heapIndex].size >= mem_req->size) {
  1968. indices.push_back(i);
  1969. }
  1970. }
  1971. return indices;
  1972. }
  1973. static vk_buffer ggml_vk_create_buffer(vk_device& device, size_t size, const std::initializer_list<vk::MemoryPropertyFlags> & req_flags_list) {
  1974. VK_LOG_DEBUG("ggml_vk_create_buffer(" << device->name << ", " << size << ", " << to_string(req_flags_list.begin()[0]) << ", " << to_string(req_flags_list.begin()[req_flags_list.size()-1]) << ")");
  1975. if (size > device->max_buffer_size) {
  1976. throw vk::OutOfDeviceMemoryError("Requested buffer size exceeds device buffer size limit");
  1977. }
  1978. vk_buffer buf = std::make_shared<vk_buffer_struct>();
  1979. if (size == 0) {
  1980. buf->size = 0;
  1981. return buf;
  1982. }
  1983. vk::BufferUsageFlags usage_flags = vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst;
  1984. vk::MemoryAllocateFlags mem_flags {};
  1985. if (device->buffer_device_address) {
  1986. usage_flags |= vk::BufferUsageFlagBits::eShaderDeviceAddress;
  1987. mem_flags |= vk::MemoryAllocateFlagBits::eDeviceAddress;
  1988. }
  1989. vk::BufferCreateInfo buffer_create_info{
  1990. vk::BufferCreateFlags(),
  1991. size,
  1992. usage_flags,
  1993. vk::SharingMode::eExclusive,
  1994. 0,
  1995. nullptr,
  1996. };
  1997. buf->buffer = device->device.createBuffer(buffer_create_info);
  1998. vk::MemoryRequirements mem_req = device->device.getBufferMemoryRequirements(buf->buffer);
  1999. vk::PhysicalDeviceMemoryProperties mem_props = device->physical_device.getMemoryProperties();
  2000. const vk::MemoryAllocateFlagsInfo mem_flags_info { mem_flags };
  2001. for (auto it = req_flags_list.begin(); it != req_flags_list.end(); it++) {
  2002. const auto & req_flags = *it;
  2003. const std::vector<uint32_t> memory_type_indices = ggml_vk_find_memory_properties(&mem_props, &mem_req, req_flags);
  2004. if (memory_type_indices.empty()) {
  2005. continue;
  2006. }
  2007. buf->memory_property_flags = req_flags;
  2008. bool done = false;
  2009. for (auto mtype_it = memory_type_indices.begin(); mtype_it != memory_type_indices.end(); mtype_it++) {
  2010. try {
  2011. buf->device_memory = device->device.allocateMemory({ mem_req.size, *mtype_it, &mem_flags_info });
  2012. done = true;
  2013. break;
  2014. } catch (const vk::SystemError& e) {
  2015. // loop and retry
  2016. // during last attempt throw the exception
  2017. if (it + 1 == req_flags_list.end() && mtype_it + 1 == memory_type_indices.end()) {
  2018. device->device.destroyBuffer(buf->buffer);
  2019. throw e;
  2020. }
  2021. }
  2022. }
  2023. if (done) {
  2024. break;
  2025. }
  2026. }
  2027. if (!buf->device_memory) {
  2028. device->device.destroyBuffer(buf->buffer);
  2029. throw vk::OutOfDeviceMemoryError("No suitable memory type found");
  2030. }
  2031. buf->ptr = nullptr;
  2032. if (buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  2033. buf->ptr = device->device.mapMemory(buf->device_memory, 0, VK_WHOLE_SIZE);
  2034. }
  2035. device->device.bindBufferMemory(buf->buffer, buf->device_memory, 0);
  2036. buf->device = device;
  2037. buf->size = size;
  2038. if (device->buffer_device_address) {
  2039. const vk::BufferDeviceAddressInfo addressInfo(buf->buffer);
  2040. buf->bda_addr = device->device.getBufferAddress(addressInfo);
  2041. }
  2042. #ifdef GGML_VULKAN_MEMORY_DEBUG
  2043. device->memory_logger->log_allocation(buf, size);
  2044. #endif
  2045. return buf;
  2046. }
  2047. static vk_buffer ggml_vk_create_buffer_check(vk_device& device, size_t size, vk::MemoryPropertyFlags req_flags, vk::MemoryPropertyFlags fallback_flags = vk::MemoryPropertyFlags(0)) {
  2048. try {
  2049. return ggml_vk_create_buffer(device, size, {req_flags, fallback_flags});
  2050. } catch (const vk::SystemError& e) {
  2051. std::cerr << "ggml_vulkan: Memory allocation of size " << size << " failed." << std::endl;
  2052. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  2053. throw e;
  2054. }
  2055. }
  2056. static vk_buffer ggml_vk_create_buffer_device(vk_device& device, size_t size) {
  2057. vk_buffer buf;
  2058. try {
  2059. if (device->prefer_host_memory) {
  2060. buf = ggml_vk_create_buffer(device, size, {vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent,
  2061. vk::MemoryPropertyFlagBits::eDeviceLocal});
  2062. } else if (device->uma) {
  2063. // Fall back to host memory type
  2064. buf = ggml_vk_create_buffer(device, size, {vk::MemoryPropertyFlagBits::eDeviceLocal,
  2065. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent});
  2066. } else if (device->disable_host_visible_vidmem) {
  2067. if (device->allow_sysmem_fallback) {
  2068. buf = ggml_vk_create_buffer(device, size, {vk::MemoryPropertyFlagBits::eDeviceLocal,
  2069. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent});
  2070. } else {
  2071. buf = ggml_vk_create_buffer(device, size, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  2072. }
  2073. } else {
  2074. // use rebar if available, otherwise fallback to device only visible memory
  2075. if (device->allow_sysmem_fallback) {
  2076. buf = ggml_vk_create_buffer(device, size, {vk::MemoryPropertyFlagBits::eDeviceLocal | vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent,
  2077. vk::MemoryPropertyFlagBits::eDeviceLocal,
  2078. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent});
  2079. } else {
  2080. buf = ggml_vk_create_buffer(device, size, {vk::MemoryPropertyFlagBits::eDeviceLocal | vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent,
  2081. vk::MemoryPropertyFlagBits::eDeviceLocal});
  2082. }
  2083. }
  2084. } catch (const vk::SystemError& e) {
  2085. std::cerr << "ggml_vulkan: Device memory allocation of size " << size << " failed." << std::endl;
  2086. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  2087. throw e;
  2088. }
  2089. return buf;
  2090. }
  2091. static void ggml_vk_destroy_buffer(vk_buffer& buf) {
  2092. if (buf == nullptr) {
  2093. return;
  2094. }
  2095. #ifdef GGML_VULKAN_MEMORY_DEBUG
  2096. if (buf->device != nullptr) {
  2097. buf->device->memory_logger->log_deallocation(buf);
  2098. }
  2099. #endif
  2100. buf.reset();
  2101. }
  2102. static vk_subbuffer ggml_vk_subbuffer(const ggml_backend_vk_context* ctx, const vk_buffer& buf, size_t offset = 0) {
  2103. return { buf, offset, ggml_vk_get_max_buffer_range(ctx, buf, offset) };
  2104. }
  2105. static void ggml_vk_sync_buffers(ggml_backend_vk_context* ctx, vk_context& subctx) {
  2106. VK_LOG_DEBUG("ggml_vk_sync_buffers()");
  2107. const bool transfer_queue = subctx->p->q->transfer_only;
  2108. if (ctx) {
  2109. ctx->prealloc_x_need_sync = ctx->prealloc_y_need_sync = ctx->prealloc_split_k_need_sync = false;
  2110. }
  2111. subctx->s->buffer.pipelineBarrier(
  2112. subctx->p->q->stage_flags,
  2113. subctx->p->q->stage_flags,
  2114. {},
  2115. { {
  2116. { !transfer_queue ? (vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite | vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) : (vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) },
  2117. { !transfer_queue ? (vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite | vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) : (vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) }
  2118. } },
  2119. {},
  2120. {}
  2121. );
  2122. }
  2123. static void ggml_vk_wait_events(vk_context& ctx, std::vector<vk::Event>&& events) {
  2124. VK_LOG_DEBUG("ggml_vk_wait_events()");
  2125. if (events.empty()) {
  2126. return;
  2127. }
  2128. ctx->s->buffer.waitEvents(
  2129. events,
  2130. ctx->p->q->stage_flags,
  2131. ctx->p->q->stage_flags,
  2132. {},
  2133. {},
  2134. {}
  2135. );
  2136. }
  2137. // number of rows/cols for flash attention shader
  2138. static constexpr uint32_t flash_attention_num_small_rows = 32;
  2139. static constexpr uint32_t scalar_flash_attention_num_small_rows = 1;
  2140. static uint32_t get_fa_scalar_num_large_rows(uint32_t hsk, uint32_t hsv) {
  2141. if (hsv >= 192) {
  2142. return 2;
  2143. } else if ((hsv | hsk) & 8) {
  2144. return 4;
  2145. } else {
  2146. return 8;
  2147. }
  2148. }
  2149. // The FA coopmat1 shader assumes 16x16x16 matrix multiply support.
  2150. // 128 threads split into four subgroups, each subgroup does 1/4
  2151. // of the Bc dimension.
  2152. static constexpr uint32_t coopmat1_flash_attention_num_large_rows = 16;
  2153. static constexpr uint32_t scalar_flash_attention_Bc = 64;
  2154. static constexpr uint32_t scalar_flash_attention_workgroup_size = 128;
  2155. static uint32_t get_fa_num_small_rows(FaCodePath path) {
  2156. if (path == FA_COOPMAT2) {
  2157. return flash_attention_num_small_rows;
  2158. } else {
  2159. return scalar_flash_attention_num_small_rows;
  2160. }
  2161. }
  2162. static std::array<uint32_t, 2> fa_rows_cols(FaCodePath path, uint32_t hsk, uint32_t hsv, uint32_t clamp, ggml_type type, bool small_rows) {
  2163. GGML_UNUSED(clamp);
  2164. GGML_UNUSED(hsv);
  2165. if (path == FA_SCALAR) {
  2166. if (small_rows) {
  2167. return {scalar_flash_attention_num_small_rows, 64};
  2168. } else {
  2169. if ((hsv | hsk) & 8) {
  2170. // HSV/HSK not being a multiple of 16 makes D_split smaller, which makes cols_per_iter
  2171. // larger, and Bc needs to be >= cols_per_thread. 64 is large enough, 32 is not.
  2172. return {get_fa_scalar_num_large_rows(hsk, hsv), 64};
  2173. } else {
  2174. return {get_fa_scalar_num_large_rows(hsk, hsv), 32};
  2175. }
  2176. }
  2177. }
  2178. if (path == FA_COOPMAT1) {
  2179. if (small_rows) {
  2180. return {scalar_flash_attention_num_small_rows, scalar_flash_attention_Bc};
  2181. } else {
  2182. return {coopmat1_flash_attention_num_large_rows, scalar_flash_attention_Bc};
  2183. }
  2184. }
  2185. // small rows, large cols
  2186. if (small_rows) {
  2187. return {get_fa_num_small_rows(FA_COOPMAT2), 32};
  2188. }
  2189. // small cols to reduce register count
  2190. if (ggml_is_quantized(type) || hsk >= 256 || hsv >= 256) {
  2191. if (hsk >= 512 || hsv >= 512) {
  2192. return {32, 32};
  2193. } else {
  2194. return {64, 32};
  2195. }
  2196. }
  2197. return {64, 64};
  2198. }
  2199. static uint32_t fa_align(FaCodePath path, uint32_t hsk, uint32_t hsv, ggml_type type, bool small_rows) {
  2200. return fa_rows_cols(path, hsk, hsv, 0, type, small_rows)[1];
  2201. }
  2202. static bool ggml_vk_matmul_shmem_support(const vk_device& device, const std::vector<uint32_t>& warptile, bool mul_mat_id, ggml_type src0_type) {
  2203. uint32_t lut_size = 0;
  2204. switch (src0_type) {
  2205. case GGML_TYPE_IQ1_S:
  2206. case GGML_TYPE_IQ1_M:
  2207. lut_size = 2*2048;
  2208. break;
  2209. case GGML_TYPE_IQ2_XXS:
  2210. lut_size = 8*256;
  2211. break;
  2212. case GGML_TYPE_IQ2_XS:
  2213. lut_size = 8*512;
  2214. break;
  2215. case GGML_TYPE_IQ2_S:
  2216. lut_size = 8*1024;
  2217. break;
  2218. case GGML_TYPE_IQ3_XXS:
  2219. lut_size = 4*256;
  2220. break;
  2221. case GGML_TYPE_IQ3_S:
  2222. lut_size = 4*512;
  2223. break;
  2224. case GGML_TYPE_IQ4_NL:
  2225. case GGML_TYPE_IQ4_XS:
  2226. case GGML_TYPE_MXFP4:
  2227. lut_size = 4*16;
  2228. break;
  2229. default:
  2230. break;
  2231. }
  2232. // Needs to be kept up to date on shader changes
  2233. const uint32_t bank_conflict_offset = device->coopmat_support ? 8 : 1;
  2234. const uint32_t type_size = device->fp16 ? sizeof(ggml_fp16_t) : sizeof(float);
  2235. const uint32_t warps = warptile[0] / warptile[10];
  2236. const uint32_t load_bufs = (warptile[1] + warptile[2]) * (warptile[3] + bank_conflict_offset) * type_size;
  2237. const uint32_t mmid_row_ids = mul_mat_id ? (warptile[2] * 2 * sizeof(uint16_t)) : 0;
  2238. const uint32_t coopmat_stage = device->coopmat_support ? warptile[7] * warptile[8] / warps * sizeof(float) : 0;
  2239. const uint32_t ballots_sh = mul_mat_id ? (warps * 4 * sizeof(uint32_t)) : 0;
  2240. const uint32_t total_size = load_bufs + mmid_row_ids + coopmat_stage + lut_size + ballots_sh;
  2241. const bool supported = total_size <= device->properties.limits.maxComputeSharedMemorySize;
  2242. VK_LOG_DEBUG("ggml_vk_matmul_shmem_support(warptile=(" << warptile[0] << "," << warptile[1] << "," << warptile[2] << "), "
  2243. "mul_mat_id=" << mul_mat_id << ", src0_type=" << ggml_type_name(src0_type) << ", supported=" << supported);
  2244. return supported;
  2245. }
  2246. struct GpuPipelineConfig {
  2247. // GPU architecture identifier.
  2248. // Example: vk_device_architecture::AMD_GCN
  2249. vk_device_architecture arch;
  2250. // Mapping of pipeline names to their specific subgroup sizes.
  2251. // Example: {"soft_max_f32", 64}
  2252. std::unordered_map<std::string, uint32_t> pipelines;
  2253. // Default subgroup size for this GPU.
  2254. // Defaults to 0 if not explicitly provided.
  2255. uint32_t default_subgroup_size = 0;
  2256. };
  2257. // Pipeline configuration for RDNA1 GPUs.
  2258. static const std::unordered_map<std::string, uint32_t> rdna1_pipelines = {
  2259. {"soft_max", 64}, {"im2col", 64},
  2260. {"argmax", 64}, {"mul_mat_vec", 64},
  2261. {"mul_mat_vec_f16", 32}, {"mul_mat_vec_f32_f16", 32}
  2262. };
  2263. // Pipeline configuration for RDNA2 GPUs.
  2264. static const std::unordered_map<std::string, uint32_t> rdna2_pipelines = {
  2265. {"soft_max", 64}, {"im2col", 64},
  2266. };
  2267. static constexpr uint32_t RDNA_DEFAULT_SUBGROUP_SIZE = 32;
  2268. // Define configurations for different GPUs.
  2269. static std::vector<GpuPipelineConfig> gpu_pipeline_configs = {
  2270. {
  2271. vk_device_architecture::AMD_RDNA1,
  2272. {
  2273. rdna1_pipelines,
  2274. },
  2275. RDNA_DEFAULT_SUBGROUP_SIZE
  2276. },
  2277. {
  2278. vk_device_architecture::AMD_RDNA2,
  2279. {
  2280. rdna2_pipelines,
  2281. },
  2282. RDNA_DEFAULT_SUBGROUP_SIZE
  2283. },
  2284. };
  2285. static uint32_t get_subgroup_size(const std::string &pipeline_name, const vk_device_architecture &arch) {
  2286. for (const auto &config : gpu_pipeline_configs) {
  2287. if (config.arch == arch) {
  2288. auto pipIt = config.pipelines.find(pipeline_name);
  2289. if (pipIt != config.pipelines.end()) {
  2290. return pipIt->second;
  2291. }
  2292. std::vector<std::pair<std::string, uint32_t>> sorted_pipelines(config.pipelines.begin(), config.pipelines.end());
  2293. std::sort(sorted_pipelines.begin(), sorted_pipelines.end(),
  2294. [](const auto &a, const auto &b) { return a.first.size() > b.first.size(); });
  2295. for (const auto &entry : sorted_pipelines) {
  2296. if (pipeline_name.find(entry.first) != std::string::npos) {
  2297. return entry.second;
  2298. }
  2299. }
  2300. return config.default_subgroup_size;
  2301. }
  2302. }
  2303. return 0; // If no matching configuration is found
  2304. }
  2305. static void ggml_vk_load_shaders(vk_device& device) {
  2306. VK_LOG_DEBUG("ggml_vk_load_shaders(" << device->name << ")");
  2307. std::lock_guard<std::recursive_mutex> guard(device->mutex);
  2308. // some shaders have a minimum subgroup size
  2309. const uint32_t subgroup_size_8 = std::max(device->subgroup_size, 8u);
  2310. const uint32_t subgroup_size_16 = std::max(device->subgroup_size, 16u);
  2311. const uint32_t subgroup_size_32 = std::max(device->subgroup_size, 32u);
  2312. const uint32_t mul_mat_subgroup_size = (device->vendor_id == VK_VENDOR_ID_INTEL && device->subgroup_size_control) ? device->subgroup_min_size : device->subgroup_size;
  2313. const uint32_t mul_mat_subgroup_size_8 = std::max(mul_mat_subgroup_size, 8u);
  2314. const uint32_t mul_mat_subgroup_size_16 = std::max(mul_mat_subgroup_size, 16u);
  2315. const uint32_t mul_mat_subgroup_size_32 = std::max(mul_mat_subgroup_size, 32u);
  2316. const bool subgroup_min_size_16 = (!device->subgroup_size_control && device->subgroup_size >= 16) ||
  2317. (device->subgroup_size_control && device->subgroup_max_size >= 16);
  2318. // mulmat
  2319. std::vector<uint32_t> l_warptile, m_warptile, s_warptile,
  2320. l_warptile_id, m_warptile_id, s_warptile_id,
  2321. l_warptile_mmq, m_warptile_mmq, s_warptile_mmq,
  2322. l_warptile_mmq_int, m_warptile_mmq_int, s_warptile_mmq_int,
  2323. l_warptile_mmq_int_k, m_warptile_mmq_int_k, s_warptile_mmq_int_k,
  2324. l_warptile_mmq_k, m_warptile_mmq_k, s_warptile_mmq_k,
  2325. l_warptile_mmqid, m_warptile_mmqid, s_warptile_mmqid,
  2326. l_warptile_mmqid_int, m_warptile_mmqid_int, s_warptile_mmqid_int,
  2327. l_warptile_mmqid_int_k, m_warptile_mmqid_int_k, s_warptile_mmqid_int_k;
  2328. std::array<uint32_t, 3> l_wg_denoms, m_wg_denoms, s_wg_denoms,
  2329. l_mmq_wg_denoms, m_mmq_wg_denoms, s_mmq_wg_denoms,
  2330. l_mmq_wg_denoms_k, m_mmq_wg_denoms_k, s_mmq_wg_denoms_k,
  2331. l_mmqid_wg_denoms, m_mmqid_wg_denoms, s_mmqid_wg_denoms;
  2332. uint32_t l_align, m_align, s_align;
  2333. if (device->coopmat2) {
  2334. // spec constants and tile sizes for non-quant matmul/matmul_id
  2335. l_warptile = { 256, 128, 256, 64, 1 };
  2336. m_warptile = { 256, 128, 128, 64, 0 };
  2337. s_warptile = { 128, 64, 64, 64, 0 };
  2338. l_wg_denoms = {128, 256, 1 };
  2339. m_wg_denoms = {128, 128, 1 };
  2340. s_wg_denoms = { 64, 64, 1 };
  2341. // spec constants and tile sizes for quant matmul (non-Qi_K)
  2342. l_warptile_mmq = { 256, 128, 256, 64, 1 };
  2343. m_warptile_mmq = { 256, 128, 128, 64, 1 };
  2344. s_warptile_mmq = { 256, 32, 64, 128, 0 };
  2345. l_mmq_wg_denoms = { 128, 256, 1 };
  2346. m_mmq_wg_denoms = { 128, 128, 1 };
  2347. s_mmq_wg_denoms = { 32, 64, 1 };
  2348. // spec constants and tile sizes for quant matmul (Qi_K)
  2349. l_warptile_mmq_k = { 256, 128, 256, 64, 1 };
  2350. m_warptile_mmq_k = { 256, 128, 128, 64, 1 };
  2351. s_warptile_mmq_k = { 256, 32, 64, 128, 0 };
  2352. l_mmq_wg_denoms_k = { 128, 256, 1 };
  2353. m_mmq_wg_denoms_k = { 128, 128, 1 };
  2354. s_mmq_wg_denoms_k = { 32, 64, 1 };
  2355. // spec constants and tile sizes for quant matmul_id
  2356. l_warptile_mmqid = { 256, 128, 128, 16, 1, device->subgroup_size };
  2357. m_warptile_mmqid = { 256, 128, 64, 16, 0, device->subgroup_size };
  2358. s_warptile_mmqid = { 256, 128, 64, 16, 0, device->subgroup_size };
  2359. l_mmqid_wg_denoms = { 128, 128, 1 };
  2360. m_mmqid_wg_denoms = { 128, 64, 1 };
  2361. s_mmqid_wg_denoms = { 128, 64, 1 };
  2362. l_align = 128;
  2363. m_align = 64;
  2364. s_align = 32;
  2365. } else {
  2366. // Matrix cores require different warp group sizes
  2367. const uint32_t tm_l = device->coopmat_support ? device->coopmat_m : 4;
  2368. const uint32_t tm_m = device->coopmat_support ? device->coopmat_m : 4;
  2369. const uint32_t tm_s = device->coopmat_support ? device->coopmat_m : 2;
  2370. const uint32_t tn_l = device->coopmat_support ? device->coopmat_n : 4;
  2371. const uint32_t tn_m = device->coopmat_support ? device->coopmat_n : 2;
  2372. const uint32_t tn_s = device->coopmat_support ? device->coopmat_n : 2;
  2373. const uint32_t tk_l = device->coopmat_support ? device->coopmat_k : 1;
  2374. const uint32_t tk_m = device->coopmat_support ? device->coopmat_k : 1;
  2375. const uint32_t tk_s = device->coopmat_support ? device->coopmat_k : 1;
  2376. l_warptile = { 128, 128, 128, 16, subgroup_size_8 * 2, 64, 2, tm_l, tn_l, tk_l, subgroup_size_8 };
  2377. m_warptile = { 128, 64, 64, 16, subgroup_size_8, 32, 2, tm_m, tn_m, tk_m, subgroup_size_8 };
  2378. s_warptile = { subgroup_size_16, 32, 32, 16, 32, 32, 2, tm_s, tn_s, tk_s, subgroup_size_8 };
  2379. l_warptile_mmq = { 128, 128, 128, 32, subgroup_size_8 * 2, 64, 2, tm_l, tn_l, tk_l, subgroup_size_8 };
  2380. m_warptile_mmq = { 128, 64, 64, 32, subgroup_size_8, 32, 2, tm_m, tn_m, tk_m, subgroup_size_8 };
  2381. s_warptile_mmq = { subgroup_size_32, 32, 32, 32, 32, 32, 2, tm_s, tn_s, tk_s, subgroup_size_8 };
  2382. // Integer MMQ has a smaller shared memory profile, but heavier register use
  2383. l_warptile_mmq_int = { 128, 128, 128, 32, subgroup_size_8 * 2, 64, 2, 4, 4, 1, subgroup_size_8 };
  2384. m_warptile_mmq_int = { 128, 64, 64, 32, subgroup_size_8, 32, 2, 2, 2, 1, subgroup_size_8 };
  2385. s_warptile_mmq_int = { subgroup_size_32, 32, 32, 32, 32, 32, 2, 2, 1, 1, subgroup_size_8 };
  2386. // K-quants use even more registers, mitigate by setting WMITER to 1
  2387. l_warptile_mmq_int_k = { 128, 128, 128, 32, subgroup_size_8 * 2, 64, 1, 4, 4, 1, subgroup_size_8 };
  2388. m_warptile_mmq_int_k = { 128, 64, 64, 32, subgroup_size_8, 32, 1, 2, 2, 1, subgroup_size_8 };
  2389. s_warptile_mmq_int_k = { subgroup_size_32, 32, 32, 32, 32, 32, 1, 2, 1, 1, subgroup_size_8 };
  2390. l_warptile_id = { 128, 128, 128, 16, mul_mat_subgroup_size_16 * 2, 64, 2, tm_l, tn_l, tk_l, mul_mat_subgroup_size_16 };
  2391. m_warptile_id = { 128, 64, 64, 16, mul_mat_subgroup_size_16, 32, 2, tm_m, tn_m, tk_m, mul_mat_subgroup_size_16 };
  2392. s_warptile_id = { mul_mat_subgroup_size_16, 32, 32, 16, 32, 32, 2, tm_s, tn_s, tk_s, mul_mat_subgroup_size_16 };
  2393. l_warptile_mmqid = { 128, 128, 128, 32, mul_mat_subgroup_size_8 * 2, 64, 2, tm_l, tn_l, tk_l, mul_mat_subgroup_size_8 };
  2394. m_warptile_mmqid = { 128, 64, 64, 32, mul_mat_subgroup_size_8, 32, 2, tm_m, tn_m, tk_m, mul_mat_subgroup_size_8 };
  2395. s_warptile_mmqid = { mul_mat_subgroup_size_32, 32, 32, 32, 32, 32, 2, tm_s, tn_s, tk_s, mul_mat_subgroup_size_8 };
  2396. l_warptile_mmqid_int = { 128, 128, 128, 32, mul_mat_subgroup_size_8 * 2, 64, 2, 4, 4, 1, mul_mat_subgroup_size_8 };
  2397. m_warptile_mmqid_int = { 128, 64, 64, 32, mul_mat_subgroup_size_8, 32, 2, 2, 2, 1, mul_mat_subgroup_size_8 };
  2398. s_warptile_mmqid_int = { mul_mat_subgroup_size_32, 32, 32, 32, 32, 32, 2, 2, 1, 1, mul_mat_subgroup_size_8 };
  2399. l_warptile_mmqid_int_k = { 128, 128, 128, 32, mul_mat_subgroup_size_16 * 2, 64, 1, 4, 4, 1, mul_mat_subgroup_size_16 };
  2400. m_warptile_mmqid_int_k = { 128, 64, 64, 32, mul_mat_subgroup_size_16, 32, 1, 2, 2, 1, mul_mat_subgroup_size_16 };
  2401. s_warptile_mmqid_int_k = { mul_mat_subgroup_size_32, 32, 32, 32, 32, 32, 1, 2, 1, 1, mul_mat_subgroup_size_16 };
  2402. // chip specific tuning
  2403. if ((device->architecture == AMD_GCN) && (device->driver_id != vk::DriverId::eAmdProprietary)) {
  2404. m_warptile_mmq = m_warptile_mmq_int = { 256, 64, 64, 32, 16, 16, 2, 2, 2, 1, 16 };
  2405. m_warptile_mmqid = m_warptile_mmqid_int = { 256, 64, 64, 32, 16, 16, 2, 2, 2, 1, 16 };
  2406. }
  2407. l_mmq_wg_denoms = l_wg_denoms = {128, 128, 1 };
  2408. m_mmq_wg_denoms = m_wg_denoms = { 64, 64, 1 };
  2409. s_mmq_wg_denoms = s_wg_denoms = { 32, 32, 1 };
  2410. l_align = 128;
  2411. m_align = 64;
  2412. s_align = 32;
  2413. for (uint32_t i = 0; i < GGML_TYPE_COUNT; ++i) {
  2414. ggml_type t = (ggml_type)i;
  2415. // Disable medium and large matrix multiplication if not enough shared memory is available
  2416. // Check mmq warptiles as the largest configuration
  2417. // Throw an error if not enough for any matrix multiplication is available
  2418. if (!ggml_vk_matmul_shmem_support(device, s_warptile_mmq, false, t)) {
  2419. std::cerr << "ggml_vulkan: Error: Shared memory size too small for matrix multiplication." << std::endl;
  2420. throw std::runtime_error("Shared memory size too small for matrix multiplication.");
  2421. } else if (!ggml_vk_matmul_shmem_support(device, m_warptile_mmq, false, t)) {
  2422. device->mul_mat_m[i] = false;
  2423. device->mul_mat_l[i] = false;
  2424. } else if (!ggml_vk_matmul_shmem_support(device, l_warptile_mmq, false, t)) {
  2425. device->mul_mat_l[i] = false;
  2426. }
  2427. // Disable mul_mat_id if not enough shared memory is available
  2428. if (!ggml_vk_matmul_shmem_support(device, s_warptile_mmqid, true, t)) {
  2429. device->mul_mat_id_s[i] = false;
  2430. device->mul_mat_id_m[i] = false;
  2431. device->mul_mat_id_l[i] = false;
  2432. } else if (!ggml_vk_matmul_shmem_support(device, m_warptile_mmqid, true, t)) {
  2433. device->mul_mat_id_m[i] = false;
  2434. device->mul_mat_id_l[i] = false;
  2435. } else if (!ggml_vk_matmul_shmem_support(device, l_warptile_mmqid, true, t)) {
  2436. device->mul_mat_id_l[i] = false;
  2437. }
  2438. }
  2439. }
  2440. if (!device->pipeline_matmul_f32) {
  2441. device->pipeline_matmul_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  2442. }
  2443. if (!device->pipeline_matmul_f32_f16) {
  2444. device->pipeline_matmul_f32_f16 = std::make_shared<vk_matmul_pipeline_struct>();
  2445. }
  2446. if (!device->pipeline_matmul_id_f32) {
  2447. device->pipeline_matmul_id_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  2448. }
  2449. if (!device->pipeline_matmul_bf16) {
  2450. device->pipeline_matmul_bf16 = std::make_shared<vk_matmul_pipeline_struct>();
  2451. }
  2452. if (!device->pipeline_matmul_id_bf16) {
  2453. device->pipeline_matmul_id_bf16 = std::make_shared<vk_matmul_pipeline_struct>();
  2454. }
  2455. std::vector<std::future<void>> compiles;
  2456. auto const &ggml_vk_create_pipeline = [&](vk_device& device, vk_pipeline& pipeline, const char *name, size_t spv_size, const void* spv_data, const char *entrypoint,
  2457. uint32_t parameter_count, uint32_t push_constant_size, std::array<uint32_t, 3> wg_denoms, const std::vector<uint32_t>& specialization_constants,
  2458. uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) {
  2459. if (!require_full_subgroups && required_subgroup_size == 0) {
  2460. required_subgroup_size = get_subgroup_size(name, device->architecture);
  2461. }
  2462. if (!pipeline) {
  2463. pipeline = std::make_shared<vk_pipeline_struct>();
  2464. }
  2465. if (!pipeline->initialized) {
  2466. pipeline->name = name;
  2467. pipeline->parameter_count = parameter_count;
  2468. pipeline->push_constant_size = push_constant_size;
  2469. pipeline->wg_denoms = wg_denoms;
  2470. pipeline->align = align;
  2471. pipeline->initialized = true;
  2472. }
  2473. if (!pipeline->needed || pipeline->compiled) {
  2474. return;
  2475. }
  2476. // TODO: We're no longer benefitting from the async compiles (shaders are
  2477. // compiled individually, as needed) and this complexity can be removed.
  2478. {
  2479. // wait until fewer than N compiles are in progress
  2480. uint32_t N = std::max(1u, std::thread::hardware_concurrency());
  2481. std::unique_lock<std::mutex> guard(compile_count_mutex);
  2482. while (compile_count >= N) {
  2483. compile_count_cond.wait(guard);
  2484. }
  2485. compile_count++;
  2486. }
  2487. compiles.push_back(std::async(ggml_vk_create_pipeline_func, std::ref(device), std::ref(pipeline), spv_size, spv_data, entrypoint,
  2488. parameter_count, wg_denoms, specialization_constants, disable_robustness, require_full_subgroups, required_subgroup_size));
  2489. };
  2490. auto const &ggml_vk_create_pipeline2 = [&](vk_device& device, vk_pipeline& pipeline, const std::string &name, size_t spv_size, const void* spv_data, const char *entrypoint,
  2491. uint32_t parameter_count, uint32_t push_constant_size, std::array<uint32_t, 3> wg_denoms, const std::vector<uint32_t>& specialization_constants,
  2492. uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) {
  2493. return ggml_vk_create_pipeline(device, pipeline, name.c_str(), spv_size, spv_data, entrypoint,
  2494. parameter_count, push_constant_size, wg_denoms, specialization_constants,
  2495. align, disable_robustness, require_full_subgroups, required_subgroup_size);
  2496. };
  2497. auto const &fa_wg_denoms = [&](FaCodePath path, uint32_t hsk, uint32_t hsv, uint32_t clamp, ggml_type type, bool small_rows) -> std::array<uint32_t, 3> {
  2498. return {fa_rows_cols(path, hsk, hsv, clamp, type, small_rows)[0], 1, 1};
  2499. };
  2500. auto const &fa_spec_constants = [&](FaCodePath path, uint32_t hsk, uint32_t hsv, uint32_t clamp, ggml_type type, bool small_rows) -> std::vector<uint32_t> {
  2501. // For large number of rows, 128 invocations seems to work best.
  2502. // For small number of rows (e.g. N==1), 256 works better. But matrix granularity for 256 is 32, so we
  2503. // can't use 256 for D==80.
  2504. // For scalar, use 128 (arbitrary)
  2505. // The same D_split value is used for both HSK and HSV, so just base it on the union of the LSBs.
  2506. const uint32_t D = (hsk|hsv);
  2507. uint32_t wg_size = (path == FA_SCALAR || path == FA_COOPMAT1)
  2508. ? scalar_flash_attention_workgroup_size
  2509. : ((small_rows && (D % 32) == 0) ? 256 : 128);
  2510. auto rows_cols = fa_rows_cols(path, hsk, hsv, clamp, type, small_rows);
  2511. // D_split can't be larger than a subgroup because we use subgroupShuffle to reduce it.
  2512. // D_split can't be larger than the LSB of D divided by 4 due to vectorization in the shader.
  2513. const uint32_t D_lsb = D ^ (D & (D-1));
  2514. uint32_t D_split = std::min(std::min(device->subgroup_size, 8u), D_lsb / 4);
  2515. return {wg_size, rows_cols[0], rows_cols[1], hsk, hsv, clamp, D_split};
  2516. };
  2517. #define CREATE_FA(TYPE, NAMELC, FAPATH, SUFFIX) \
  2518. for (auto &fa : device->pipeline_flash_attn_f32_f16[TYPE]) { \
  2519. uint32_t HSK = fa.first.HSK; \
  2520. uint32_t HSV = fa.first.HSV; \
  2521. bool small_rows = fa.first.small_rows; \
  2522. FaCodePath path = fa.first.path; \
  2523. bool aligned = fa.first.aligned; \
  2524. bool f32acc = fa.first.f32acc; \
  2525. if (path == FAPATH) { \
  2526. if (aligned) { \
  2527. if (f32acc) { \
  2528. ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_aligned_f32acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,0,TYPE,small_rows), fa_spec_constants(FAPATH, HSK,HSV,0,TYPE,small_rows), fa_align(FAPATH,HSK,HSV,TYPE,small_rows), true, true, (FAPATH==FA_COOPMAT1 ? 32 : 0)); \
  2529. } else { \
  2530. ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_aligned_f16acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,0,TYPE,small_rows), fa_spec_constants(FAPATH, HSK,HSV,0,TYPE,small_rows), fa_align(FAPATH,HSK,HSV,TYPE,small_rows), true, true, (FAPATH==FA_COOPMAT1 ? 32 : 0)); \
  2531. } \
  2532. } else { \
  2533. if (f32acc) { \
  2534. ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_f32acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,1,TYPE,small_rows), fa_spec_constants(FAPATH, HSK,HSV,1,TYPE,small_rows), 1, true, true, (FAPATH==FA_COOPMAT1 ? 32 : 0)); \
  2535. } else { \
  2536. ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_f16acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,1,TYPE,small_rows), fa_spec_constants(FAPATH, HSK,HSV,1,TYPE,small_rows), 1, true, true, (FAPATH==FA_COOPMAT1 ? 32 : 0)); \
  2537. } \
  2538. } \
  2539. } \
  2540. }
  2541. CREATE_FA(GGML_TYPE_F32, f32, FA_SCALAR, )
  2542. CREATE_FA(GGML_TYPE_F16, f16, FA_SCALAR, )
  2543. CREATE_FA(GGML_TYPE_Q4_0, q4_0, FA_SCALAR, )
  2544. CREATE_FA(GGML_TYPE_Q8_0, q8_0, FA_SCALAR, )
  2545. #if defined(VK_KHR_cooperative_matrix) && defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
  2546. if (device->coopmat1_fa_support) {
  2547. CREATE_FA(GGML_TYPE_F32, f32, FA_COOPMAT1, _cm1)
  2548. CREATE_FA(GGML_TYPE_F16, f16, FA_COOPMAT1, _cm1)
  2549. CREATE_FA(GGML_TYPE_Q4_0, q4_0, FA_COOPMAT1, _cm1)
  2550. CREATE_FA(GGML_TYPE_Q8_0, q8_0, FA_COOPMAT1, _cm1)
  2551. }
  2552. #endif
  2553. #if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  2554. if (device->coopmat2) {
  2555. CREATE_FA(GGML_TYPE_F32, f32, FA_COOPMAT2, _cm2)
  2556. CREATE_FA(GGML_TYPE_F16, f16, FA_COOPMAT2, _cm2)
  2557. CREATE_FA(GGML_TYPE_Q4_0, q4_0, FA_COOPMAT2, _cm2)
  2558. CREATE_FA(GGML_TYPE_Q4_1, q4_1, FA_COOPMAT2, _cm2)
  2559. CREATE_FA(GGML_TYPE_Q5_0, q5_0, FA_COOPMAT2, _cm2)
  2560. CREATE_FA(GGML_TYPE_Q5_1, q5_1, FA_COOPMAT2, _cm2)
  2561. CREATE_FA(GGML_TYPE_Q8_0, q8_0, FA_COOPMAT2, _cm2)
  2562. CREATE_FA(GGML_TYPE_IQ4_NL, iq4_nl, FA_COOPMAT2, _cm2)
  2563. }
  2564. #endif
  2565. #undef CREATE_FA
  2566. #if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  2567. if (device->coopmat2) {
  2568. // Create 6 variants, {s,m,l}x{unaligned,aligned}
  2569. #define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \
  2570. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _cm2_len, NAMELC ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1); \
  2571. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _cm2_len, NAMELC ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1); \
  2572. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _cm2_len, NAMELC ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1); \
  2573. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _cm2_len, NAMELC ## _aligned ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align); \
  2574. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _cm2_len, NAMELC ## _aligned ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align); \
  2575. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _cm2_len, NAMELC ## _aligned ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align); \
  2576. // Create 2 variants, {f16,f32} accumulator
  2577. #define CREATE_MM2(PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \
  2578. CREATE_MM(PIPELINE_NAME . f16acc, NAMELC, _f16acc, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \
  2579. CREATE_MM(PIPELINE_NAME . f32acc, NAMELC, , WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \
  2580. CREATE_MM2(pipeline_matmul_f16, matmul_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 3)
  2581. #if defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  2582. if (device->coopmat_bf16_support) {
  2583. CREATE_MM(pipeline_matmul_bf16, matmul_bf16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3)
  2584. }
  2585. #endif
  2586. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q4_0], matmul_q4_0_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2587. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q4_1], matmul_q4_1_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2588. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q5_0], matmul_q5_0_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2589. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q5_1], matmul_q5_1_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2590. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q8_0], matmul_q8_0_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2591. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q2_K], matmul_q2_k_f16, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
  2592. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q3_K], matmul_q3_k_f16, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
  2593. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q4_K], matmul_q4_k_f16, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
  2594. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q5_K], matmul_q5_k_f16, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
  2595. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q6_K], matmul_q6_k_f16, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3)
  2596. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ1_S], matmul_iq1_s_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2597. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ1_M], matmul_iq1_m_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2598. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ2_XXS], matmul_iq2_xxs_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2599. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ2_XS], matmul_iq2_xs_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2600. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ2_S], matmul_iq2_s_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2601. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ3_XXS], matmul_iq3_xxs_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2602. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ3_S], matmul_iq3_s_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2603. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ4_XS], matmul_iq4_xs_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2604. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ4_NL], matmul_iq4_nl_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2605. CREATE_MM2(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_MXFP4], matmul_mxfp4_f16, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3)
  2606. GGML_ASSERT(device->subgroup_ballot);
  2607. CREATE_MM2(pipeline_matmul_id_f16, matmul_id_subgroup_f16, wg_denoms, warptile, vk_mat_mat_id_push_constants, 4)
  2608. #if defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  2609. if (device->coopmat_bf16_support) {
  2610. CREATE_MM(pipeline_matmul_id_bf16, matmul_id_subgroup_bf16, , wg_denoms, warptile, vk_mat_mat_id_push_constants, 4)
  2611. }
  2612. #endif
  2613. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0], matmul_id_subgroup_q4_0_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2614. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1], matmul_id_subgroup_q4_1_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2615. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0], matmul_id_subgroup_q5_0_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2616. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1], matmul_id_subgroup_q5_1_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2617. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0], matmul_id_subgroup_q8_0_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2618. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K], matmul_id_subgroup_q2_k_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2619. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K], matmul_id_subgroup_q3_k_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2620. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K], matmul_id_subgroup_q4_k_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2621. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K], matmul_id_subgroup_q5_k_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2622. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K], matmul_id_subgroup_q6_k_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2623. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_S], matmul_id_subgroup_iq1_s_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2624. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_M], matmul_id_subgroup_iq1_m_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2625. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS], matmul_id_subgroup_iq2_xxs_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2626. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS], matmul_id_subgroup_iq2_xs_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2627. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S], matmul_id_subgroup_iq2_s_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2628. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS], matmul_id_subgroup_iq3_xxs_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2629. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S], matmul_id_subgroup_iq3_s_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2630. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS], matmul_id_subgroup_iq4_xs_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2631. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL], matmul_id_subgroup_iq4_nl_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2632. CREATE_MM2(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_MXFP4], matmul_id_subgroup_mxfp4_f16, mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4)
  2633. #undef CREATE_MM
  2634. #undef CREATE_MM2
  2635. } else
  2636. #endif // defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  2637. #if defined(VK_KHR_cooperative_matrix) && defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
  2638. if (device->coopmat_support) {
  2639. // Create 6 variants, {s,m,l}x{unaligned,aligned}
  2640. #define CREATE_MM(TYPE, PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
  2641. if (device->mul_mat ## ID ## _l[TYPE]) \
  2642. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _cm1_len, NAMELC ## F16ACC ## _cm1_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1, false, true); \
  2643. if (device->mul_mat ## ID ## _m[TYPE]) \
  2644. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _cm1_len, NAMELC ## F16ACC ## _cm1_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1, false, true); \
  2645. if (device->mul_mat ## ID ## _s[TYPE]) \
  2646. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _cm1_len, NAMELC ## F16ACC ## _cm1_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1, false, true); \
  2647. if (device->mul_mat ## ID ## _l[TYPE]) \
  2648. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _cm1_len, NAMELC ## _aligned ## F16ACC ## _cm1_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align, false, true); \
  2649. if (device->mul_mat ## ID ## _m[TYPE]) \
  2650. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _cm1_len, NAMELC ## _aligned ## F16ACC ## _cm1_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align, false, true); \
  2651. if (device->mul_mat ## ID ## _s[TYPE]) \
  2652. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _cm1_len, NAMELC ## _aligned ## F16ACC ## _cm1_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align, false, true); \
  2653. // Create 2 variants, {f16,f32} accumulator
  2654. #define CREATE_MM2(TYPE, PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
  2655. if (device->coopmat_acc_f16_support) { \
  2656. CREATE_MM(TYPE, PIPELINE_NAME . f16acc, NAMELC, _f16acc, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
  2657. } \
  2658. if (device->coopmat_acc_f32_support) { \
  2659. CREATE_MM(TYPE, PIPELINE_NAME . f32acc, NAMELC, , WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
  2660. } \
  2661. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
  2662. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
  2663. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_f16, matmul_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
  2664. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_f16_f32, matmul_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 3, );
  2665. #if defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  2666. if (device->coopmat_bf16_support) {
  2667. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_bf16, matmul_bf16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, )
  2668. }
  2669. #endif
  2670. if (device->coopmat_acc_f16_support) {
  2671. CREATE_MM2(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0], matmul_q4_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2672. CREATE_MM2(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1], matmul_q4_1_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2673. CREATE_MM2(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0], matmul_q5_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2674. CREATE_MM2(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1], matmul_q5_1_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2675. CREATE_MM2(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0], matmul_q8_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2676. CREATE_MM2(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K], matmul_q2_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2677. CREATE_MM2(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K], matmul_q3_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2678. CREATE_MM2(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K], matmul_q4_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2679. CREATE_MM2(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K], matmul_q5_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2680. CREATE_MM2(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K], matmul_q6_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2681. CREATE_MM2(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_S], matmul_iq1_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2682. CREATE_MM2(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_M], matmul_iq1_m_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2683. CREATE_MM2(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XXS], matmul_iq2_xxs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2684. CREATE_MM2(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XS], matmul_iq2_xs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2685. CREATE_MM2(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S], matmul_iq2_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2686. CREATE_MM2(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS], matmul_iq3_xxs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2687. CREATE_MM2(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S], matmul_iq3_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2688. CREATE_MM2(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_XS], matmul_iq4_xs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2689. CREATE_MM2(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL], matmul_iq4_nl_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2690. CREATE_MM2(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat[GGML_TYPE_MXFP4], matmul_mxfp4_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2691. } else {
  2692. CREATE_MM(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f32acc, matmul_q4_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2693. CREATE_MM(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f32acc, matmul_q4_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2694. CREATE_MM(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0].f32acc, matmul_q5_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2695. CREATE_MM(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1].f32acc, matmul_q5_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2696. CREATE_MM(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0].f32acc, matmul_q8_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2697. CREATE_MM(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K].f32acc, matmul_q2_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2698. CREATE_MM(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K].f32acc, matmul_q3_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2699. CREATE_MM(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f32acc, matmul_q4_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2700. CREATE_MM(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f32acc, matmul_q5_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2701. CREATE_MM(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f32acc, matmul_q6_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2702. CREATE_MM(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_S].f32acc, matmul_iq1_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2703. CREATE_MM(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_M].f32acc, matmul_iq1_m_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2704. CREATE_MM(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XXS].f32acc, matmul_iq2_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2705. CREATE_MM(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XS].f32acc, matmul_iq2_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2706. CREATE_MM(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S].f32acc, matmul_iq2_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2707. CREATE_MM(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS].f32acc, matmul_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2708. CREATE_MM(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S].f32acc, matmul_iq3_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2709. CREATE_MM(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_XS].f32acc, matmul_iq4_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2710. CREATE_MM(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f32acc, matmul_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2711. CREATE_MM(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat[GGML_TYPE_MXFP4].f32acc, matmul_mxfp4_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, );
  2712. }
  2713. GGML_ASSERT(device->subgroup_ballot);
  2714. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_id_f32, matmul_id_subgroup_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
  2715. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_id_f16, matmul_id_subgroup_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
  2716. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_id_f16_f32, matmul_id_subgroup_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
  2717. #if defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  2718. if (device->coopmat_bf16_support) {
  2719. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_id_bf16, matmul_id_subgroup_bf16, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id);
  2720. }
  2721. #endif
  2722. CREATE_MM2(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0], matmul_id_subgroup_q4_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2723. CREATE_MM2(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1], matmul_id_subgroup_q4_1_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2724. CREATE_MM2(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0], matmul_id_subgroup_q5_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2725. CREATE_MM2(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1], matmul_id_subgroup_q5_1_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2726. CREATE_MM2(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0], matmul_id_subgroup_q8_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2727. CREATE_MM2(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K], matmul_id_subgroup_q2_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2728. CREATE_MM2(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K], matmul_id_subgroup_q3_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2729. CREATE_MM2(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K], matmul_id_subgroup_q4_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2730. CREATE_MM2(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K], matmul_id_subgroup_q5_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2731. CREATE_MM2(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K], matmul_id_subgroup_q6_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2732. CREATE_MM2(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_S], matmul_id_subgroup_iq1_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2733. CREATE_MM2(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_M], matmul_id_subgroup_iq1_m_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2734. CREATE_MM2(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS], matmul_id_subgroup_iq2_xxs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2735. CREATE_MM2(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS], matmul_id_subgroup_iq2_xs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2736. CREATE_MM2(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S], matmul_id_subgroup_iq2_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2737. CREATE_MM2(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS], matmul_id_subgroup_iq3_xxs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2738. CREATE_MM2(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S], matmul_id_subgroup_iq3_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2739. CREATE_MM2(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS], matmul_id_subgroup_iq4_xs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2740. CREATE_MM2(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL], matmul_id_subgroup_iq4_nl_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2741. CREATE_MM2(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_MXFP4], matmul_id_subgroup_mxfp4_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id);
  2742. #undef CREATE_MM2
  2743. #undef CREATE_MM
  2744. } else
  2745. #endif // defined(VK_KHR_cooperative_matrix) && defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
  2746. if (device->fp16) {
  2747. // Create 6 variants, {s,m,l}x{unaligned,aligned}
  2748. #define CREATE_MM(TYPE, PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID, REQSUBGROUPSIZE) \
  2749. if (device->mul_mat ## ID ## _l[TYPE]) \
  2750. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2751. if (device->mul_mat ## ID ## _m[TYPE]) \
  2752. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2753. if (device->mul_mat ## ID ## _s[TYPE]) \
  2754. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2755. if (device->mul_mat ## ID ## _l[TYPE]) \
  2756. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2757. if (device->mul_mat ## ID ## _m[TYPE]) \
  2758. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2759. if (device->mul_mat ## ID ## _s[TYPE]) \
  2760. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2761. #define CREATE_MMQ(TYPE, PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID, REQSUBGROUPSIZE) \
  2762. if (device->mul_mat ## ID ## _l[TYPE]) { \
  2763. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME .f32acc->l, #NAMELC "_l", NAMELC ## _len, NAMELC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2764. } \
  2765. if (device->mul_mat ## ID ## _m[TYPE]) { \
  2766. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME .f32acc->m, #NAMELC "_m", NAMELC ## _len, NAMELC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2767. } \
  2768. if (device->mul_mat ## ID ## _s[TYPE]) { \
  2769. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME .f32acc->s, #NAMELC "_s", NAMELC ## _len, NAMELC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2770. } \
  2771. // Create 2 variants, {f16,f32} accumulator
  2772. #define CREATE_MM2(TYPE, PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID, REQSUBGROUPSIZE) \
  2773. CREATE_MM(TYPE, PIPELINE_NAME . f16acc, NAMELC, _f16acc, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID, REQSUBGROUPSIZE) \
  2774. CREATE_MM(TYPE, PIPELINE_NAME . f32acc, NAMELC, , WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID, REQSUBGROUPSIZE) \
  2775. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2776. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2777. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_f16, matmul_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2778. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_f16_f32, matmul_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2779. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_bf16, matmul_bf16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2780. CREATE_MM2(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0], matmul_q4_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2781. CREATE_MM2(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1], matmul_q4_1_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2782. CREATE_MM2(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0], matmul_q5_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2783. CREATE_MM2(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1], matmul_q5_1_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2784. CREATE_MM2(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0], matmul_q8_0_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2785. CREATE_MM2(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K], matmul_q2_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2786. CREATE_MM2(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K], matmul_q3_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2787. CREATE_MM2(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K], matmul_q4_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2788. CREATE_MM2(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K], matmul_q5_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2789. CREATE_MM2(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K], matmul_q6_k_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2790. CREATE_MM2(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_S], matmul_iq1_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2791. CREATE_MM2(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_M], matmul_iq1_m_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2792. CREATE_MM2(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XXS], matmul_iq2_xxs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2793. CREATE_MM2(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XS], matmul_iq2_xs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2794. CREATE_MM2(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S], matmul_iq2_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2795. CREATE_MM2(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS], matmul_iq3_xxs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2796. CREATE_MM2(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S], matmul_iq3_s_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2797. CREATE_MM2(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_XS], matmul_iq4_xs_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2798. CREATE_MM2(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL], matmul_iq4_nl_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2799. CREATE_MM2(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat[GGML_TYPE_MXFP4], matmul_mxfp4_f32, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2800. #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
  2801. if (device->integer_dot_product) {
  2802. CREATE_MMQ(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q4_0], matmul_q4_0_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, , 0);
  2803. CREATE_MMQ(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q4_1], matmul_q4_1_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, , 0);
  2804. CREATE_MMQ(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q5_0], matmul_q5_0_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, , 0);
  2805. CREATE_MMQ(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q5_1], matmul_q5_1_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, , 0);
  2806. CREATE_MMQ(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q8_0], matmul_q8_0_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, , 0);
  2807. CREATE_MMQ(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_MXFP4], matmul_mxfp4_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, , 0);
  2808. CREATE_MMQ(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q2_K], matmul_q2_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, , 0);
  2809. CREATE_MMQ(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q3_K], matmul_q3_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, , 0);
  2810. CREATE_MMQ(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q4_K], matmul_q4_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, , 0);
  2811. CREATE_MMQ(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q5_K], matmul_q5_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, , 0);
  2812. CREATE_MMQ(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q6_K], matmul_q6_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, , 0);
  2813. }
  2814. #endif
  2815. if (device->subgroup_ballot && device->subgroup_require_full_support && subgroup_min_size_16) {
  2816. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_id_f32, matmul_id_subgroup_f32_f32, , wg_denoms, warptile_id, vk_mat_mat_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2817. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_id_f16, matmul_id_subgroup_f16, wg_denoms, warptile_id, vk_mat_mat_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2818. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_id_f16_f32, matmul_id_subgroup_f16_f32, wg_denoms, warptile_id, vk_mat_mat_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2819. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_id_bf16, matmul_id_subgroup_bf16, , wg_denoms, warptile_id, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2820. CREATE_MM2(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0], matmul_id_subgroup_q4_0_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2821. CREATE_MM2(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1], matmul_id_subgroup_q4_1_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2822. CREATE_MM2(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0], matmul_id_subgroup_q5_0_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2823. CREATE_MM2(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1], matmul_id_subgroup_q5_1_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2824. CREATE_MM2(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0], matmul_id_subgroup_q8_0_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2825. CREATE_MM2(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K], matmul_id_subgroup_q2_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2826. CREATE_MM2(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K], matmul_id_subgroup_q3_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2827. CREATE_MM2(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K], matmul_id_subgroup_q4_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2828. CREATE_MM2(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K], matmul_id_subgroup_q5_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2829. CREATE_MM2(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K], matmul_id_subgroup_q6_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2830. CREATE_MM2(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_S], matmul_id_subgroup_iq1_s_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2831. CREATE_MM2(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_M], matmul_id_subgroup_iq1_m_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2832. CREATE_MM2(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS], matmul_id_subgroup_iq2_xxs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2833. CREATE_MM2(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS], matmul_id_subgroup_iq2_xs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2834. CREATE_MM2(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S], matmul_id_subgroup_iq2_s_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2835. CREATE_MM2(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS], matmul_id_subgroup_iq3_xxs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2836. CREATE_MM2(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S], matmul_id_subgroup_iq3_s_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2837. CREATE_MM2(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS], matmul_id_subgroup_iq4_xs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2838. CREATE_MM2(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL], matmul_id_subgroup_iq4_nl_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2839. CREATE_MM2(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_MXFP4], matmul_id_subgroup_mxfp4_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2840. #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
  2841. if (device->integer_dot_product) {
  2842. CREATE_MMQ(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q4_0], matmul_id_subgroup_q4_0_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2843. CREATE_MMQ(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q4_1], matmul_id_subgroup_q4_1_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2844. CREATE_MMQ(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q5_0], matmul_id_subgroup_q5_0_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2845. CREATE_MMQ(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q5_1], matmul_id_subgroup_q5_1_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2846. CREATE_MMQ(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q8_0], matmul_id_subgroup_q8_0_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2847. CREATE_MMQ(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_MXFP4], matmul_id_subgroup_mxfp4_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2848. CREATE_MMQ(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q2_K], matmul_id_subgroup_q2_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2849. CREATE_MMQ(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q3_K], matmul_id_subgroup_q3_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2850. CREATE_MMQ(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q4_K], matmul_id_subgroup_q4_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2851. CREATE_MMQ(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q5_K], matmul_id_subgroup_q5_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2852. CREATE_MMQ(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q6_K], matmul_id_subgroup_q6_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2853. }
  2854. #endif
  2855. } else {
  2856. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_id_f32, matmul_id_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id, 0);
  2857. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_id_f16, matmul_id_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id, 0);
  2858. CREATE_MM2(GGML_TYPE_F16, pipeline_matmul_id_f16_f32, matmul_id_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id, 0);
  2859. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_id_bf16, matmul_id_bf16, , wg_denoms, warptile, vk_mat_mat_id_push_constants, 4, _id, 0);
  2860. CREATE_MM2(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0], matmul_id_q4_0_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2861. CREATE_MM2(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1], matmul_id_q4_1_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2862. CREATE_MM2(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0], matmul_id_q5_0_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2863. CREATE_MM2(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1], matmul_id_q5_1_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2864. CREATE_MM2(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0], matmul_id_q8_0_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2865. CREATE_MM2(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K], matmul_id_q2_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2866. CREATE_MM2(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K], matmul_id_q3_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2867. CREATE_MM2(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K], matmul_id_q4_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2868. CREATE_MM2(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K], matmul_id_q5_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2869. CREATE_MM2(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K], matmul_id_q6_k_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2870. CREATE_MM2(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_S], matmul_id_iq1_s_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2871. CREATE_MM2(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_M], matmul_id_iq1_m_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2872. CREATE_MM2(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS], matmul_id_iq2_xxs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2873. CREATE_MM2(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS], matmul_id_iq2_xs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2874. CREATE_MM2(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S], matmul_id_iq2_s_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2875. CREATE_MM2(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS], matmul_id_iq3_xxs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2876. CREATE_MM2(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S], matmul_id_iq3_s_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2877. CREATE_MM2(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS], matmul_id_iq4_xs_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2878. CREATE_MM2(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL], matmul_id_iq4_nl_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2879. CREATE_MM2(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_MXFP4], matmul_id_mxfp4_f32, mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2880. #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
  2881. if (device->integer_dot_product) {
  2882. CREATE_MMQ(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q4_0], matmul_id_q4_0_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, 0);
  2883. CREATE_MMQ(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q4_1], matmul_id_q4_1_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, 0);
  2884. CREATE_MMQ(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q5_0], matmul_id_q5_0_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, 0);
  2885. CREATE_MMQ(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q5_1], matmul_id_q5_1_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, 0);
  2886. CREATE_MMQ(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q8_0], matmul_id_q8_0_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, 0);
  2887. CREATE_MMQ(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_MXFP4], matmul_id_mxfp4_q8_1, mmq_wg_denoms, warptile_mmqid_int, vk_mat_mat_id_push_constants, 4, _id, 0);
  2888. CREATE_MMQ(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q2_K], matmul_id_q2_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, 0);
  2889. CREATE_MMQ(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q3_K], matmul_id_q3_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, 0);
  2890. CREATE_MMQ(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q4_K], matmul_id_q4_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, 0);
  2891. CREATE_MMQ(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q5_K], matmul_id_q5_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, 0);
  2892. CREATE_MMQ(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_id_q8_1[GGML_TYPE_Q6_K], matmul_id_q6_k_q8_1, mmq_wg_denoms, warptile_mmqid_int_k, vk_mat_mat_id_push_constants, 4, _id, 0);
  2893. }
  2894. #endif
  2895. }
  2896. #undef CREATE_MM2
  2897. #undef CREATE_MMQ
  2898. #undef CREATE_MM
  2899. } else {
  2900. // Create 6 variants, {s,m,l}x{unaligned,aligned}
  2901. #define CREATE_MM(TYPE, PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID, REQSUBGROUPSIZE) \
  2902. if (device->mul_mat ## ID ## _l[TYPE]) \
  2903. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2904. if (device->mul_mat ## ID ## _m[TYPE]) \
  2905. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2906. if (device->mul_mat ## ID ## _s[TYPE]) \
  2907. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2908. if (device->mul_mat ## ID ## _l[TYPE]) \
  2909. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2910. if (device->mul_mat ## ID ## _m[TYPE]) \
  2911. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2912. if (device->mul_mat ## ID ## _s[TYPE]) \
  2913. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align, false, REQSUBGROUPSIZE > 0, REQSUBGROUPSIZE); \
  2914. #define CREATE_MMQ(TYPE, PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
  2915. if (device->mul_mat ## ID ## _l[TYPE]) \
  2916. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC "_l", NAMELC ## _fp32_len, NAMELC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1); \
  2917. if (device->mul_mat ## ID ## _m[TYPE]) \
  2918. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC "_m", NAMELC ## _fp32_len, NAMELC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1); \
  2919. if (device->mul_mat ## ID ## _s[TYPE]) \
  2920. ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC "_s", NAMELC ## _fp32_len, NAMELC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1); \
  2921. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2922. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2923. CREATE_MM(GGML_TYPE_F16, pipeline_matmul_f16.f32acc, matmul_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2924. CREATE_MM(GGML_TYPE_F16, pipeline_matmul_f16_f32.f32acc, matmul_f16_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2925. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_bf16, matmul_bf16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  2926. CREATE_MM(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f32acc, matmul_q4_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2927. CREATE_MM(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f32acc, matmul_q4_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2928. CREATE_MM(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0].f32acc, matmul_q5_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2929. CREATE_MM(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1].f32acc, matmul_q5_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2930. CREATE_MM(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0].f32acc, matmul_q8_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2931. CREATE_MM(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K].f32acc, matmul_q2_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2932. CREATE_MM(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K].f32acc, matmul_q3_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2933. CREATE_MM(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f32acc, matmul_q4_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2934. CREATE_MM(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f32acc, matmul_q5_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2935. CREATE_MM(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f32acc, matmul_q6_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2936. CREATE_MM(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_S].f32acc, matmul_iq1_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2937. CREATE_MM(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ1_M].f32acc, matmul_iq1_m_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2938. CREATE_MM(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XXS].f32acc, matmul_iq2_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2939. CREATE_MM(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XS].f32acc, matmul_iq2_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2940. CREATE_MM(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S].f32acc, matmul_iq2_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2941. CREATE_MM(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS].f32acc, matmul_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2942. CREATE_MM(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S].f32acc, matmul_iq3_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2943. CREATE_MM(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_XS].f32acc, matmul_iq4_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2944. CREATE_MM(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f32acc, matmul_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2945. CREATE_MM(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat[GGML_TYPE_MXFP4].f32acc, matmul_mxfp4_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, , 0);
  2946. #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
  2947. if (device->integer_dot_product) {
  2948. CREATE_MMQ(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q4_0].f32acc, matmul_q4_0_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, );
  2949. CREATE_MMQ(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q4_1].f32acc, matmul_q4_1_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, );
  2950. CREATE_MMQ(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q5_0].f32acc, matmul_q5_0_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, );
  2951. CREATE_MMQ(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q5_1].f32acc, matmul_q5_1_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, );
  2952. CREATE_MMQ(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q8_0].f32acc, matmul_q8_0_q8_1, mmq_wg_denoms, warptile_mmq_int, vk_mat_mat_push_constants, 3, );
  2953. CREATE_MMQ(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q2_K].f32acc, matmul_q2_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, );
  2954. CREATE_MMQ(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q3_K].f32acc, matmul_q3_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, );
  2955. CREATE_MMQ(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q4_K].f32acc, matmul_q4_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, );
  2956. CREATE_MMQ(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q5_K].f32acc, matmul_q5_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, );
  2957. CREATE_MMQ(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_q8_1[GGML_TYPE_Q6_K].f32acc, matmul_q6_k_q8_1, mmq_wg_denoms, warptile_mmq_int_k, vk_mat_mat_push_constants, 3, );
  2958. }
  2959. #endif
  2960. if (device->subgroup_ballot && device->subgroup_require_full_support && subgroup_min_size_16) {
  2961. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_id_f32, matmul_id_subgroup_f32_f32, , wg_denoms, warptile_id, vk_mat_mat_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2962. CREATE_MM(GGML_TYPE_F16, pipeline_matmul_id_f16.f32acc, matmul_id_subgroup_f16, , wg_denoms, warptile_id, vk_mat_mat_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2963. CREATE_MM(GGML_TYPE_F16, pipeline_matmul_id_f16_f32.f32acc, matmul_id_subgroup_f16_f32, , wg_denoms, warptile_id, vk_mat_mat_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2964. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_id_bf16, matmul_id_subgroup_bf16, , wg_denoms, warptile_id, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size_16);
  2965. CREATE_MM(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f32acc, matmul_id_subgroup_q4_0_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2966. CREATE_MM(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f32acc, matmul_id_subgroup_q4_1_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2967. CREATE_MM(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0].f32acc, matmul_id_subgroup_q5_0_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2968. CREATE_MM(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1].f32acc, matmul_id_subgroup_q5_1_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2969. CREATE_MM(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0].f32acc, matmul_id_subgroup_q8_0_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2970. CREATE_MM(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K].f32acc, matmul_id_subgroup_q2_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2971. CREATE_MM(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K].f32acc, matmul_id_subgroup_q3_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2972. CREATE_MM(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f32acc, matmul_id_subgroup_q4_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2973. CREATE_MM(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f32acc, matmul_id_subgroup_q5_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2974. CREATE_MM(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f32acc, matmul_id_subgroup_q6_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2975. CREATE_MM(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_S].f32acc, matmul_id_subgroup_iq1_s_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2976. CREATE_MM(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_M].f32acc, matmul_id_subgroup_iq1_m_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2977. CREATE_MM(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS].f32acc, matmul_id_subgroup_iq2_xxs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2978. CREATE_MM(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS].f32acc, matmul_id_subgroup_iq2_xs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2979. CREATE_MM(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S].f32acc, matmul_id_subgroup_iq2_s_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2980. CREATE_MM(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS].f32acc, matmul_id_subgroup_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2981. CREATE_MM(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S].f32acc, matmul_id_subgroup_iq3_s_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2982. CREATE_MM(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS].f32acc, matmul_id_subgroup_iq4_xs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2983. CREATE_MM(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f32acc, matmul_id_subgroup_iq4_nl_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2984. CREATE_MM(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_MXFP4].f32acc, matmul_id_subgroup_mxfp4_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, mul_mat_subgroup_size);
  2985. } else {
  2986. CREATE_MM(GGML_TYPE_F32, pipeline_matmul_id_f32, matmul_id_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id, 0);
  2987. CREATE_MM(GGML_TYPE_F16, pipeline_matmul_id_f16.f32acc, matmul_id_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id, 0);
  2988. CREATE_MM(GGML_TYPE_F16, pipeline_matmul_id_f16_f32.f32acc, matmul_id_f16_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id, 0);
  2989. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_id_bf16, matmul_id_bf16, , wg_denoms, warptile, vk_mat_mat_id_push_constants, 4, _id, 0);
  2990. CREATE_MM(GGML_TYPE_Q4_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f32acc, matmul_id_q4_0_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2991. CREATE_MM(GGML_TYPE_Q4_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f32acc, matmul_id_q4_1_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2992. CREATE_MM(GGML_TYPE_Q5_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0].f32acc, matmul_id_q5_0_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2993. CREATE_MM(GGML_TYPE_Q5_1, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1].f32acc, matmul_id_q5_1_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2994. CREATE_MM(GGML_TYPE_Q8_0, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0].f32acc, matmul_id_q8_0_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2995. CREATE_MM(GGML_TYPE_Q2_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K].f32acc, matmul_id_q2_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2996. CREATE_MM(GGML_TYPE_Q3_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K].f32acc, matmul_id_q3_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2997. CREATE_MM(GGML_TYPE_Q4_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f32acc, matmul_id_q4_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2998. CREATE_MM(GGML_TYPE_Q5_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f32acc, matmul_id_q5_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  2999. CREATE_MM(GGML_TYPE_Q6_K, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f32acc, matmul_id_q6_k_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  3000. CREATE_MM(GGML_TYPE_IQ1_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_S].f32acc, matmul_id_iq1_s_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  3001. CREATE_MM(GGML_TYPE_IQ1_M, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ1_M].f32acc, matmul_id_iq1_m_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  3002. CREATE_MM(GGML_TYPE_IQ2_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS].f32acc, matmul_id_iq2_xxs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  3003. CREATE_MM(GGML_TYPE_IQ2_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS].f32acc, matmul_id_iq2_xs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  3004. CREATE_MM(GGML_TYPE_IQ2_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S].f32acc, matmul_id_iq2_s_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  3005. CREATE_MM(GGML_TYPE_IQ3_XXS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS].f32acc, matmul_id_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  3006. CREATE_MM(GGML_TYPE_IQ3_S, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S].f32acc, matmul_id_iq3_s_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  3007. CREATE_MM(GGML_TYPE_IQ4_XS, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS].f32acc, matmul_id_iq4_xs_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  3008. CREATE_MM(GGML_TYPE_IQ4_NL, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f32acc, matmul_id_iq4_nl_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  3009. CREATE_MM(GGML_TYPE_MXFP4, pipeline_dequant_mul_mat_mat_id[GGML_TYPE_MXFP4].f32acc, matmul_id_mxfp4_f32, , mmq_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4, _id, 0);
  3010. }
  3011. }
  3012. // reusing CREATE_MM from the fp32 path
  3013. if ((device->coopmat2 || device->coopmat_support)
  3014. #if defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  3015. && !device->coopmat_bf16_support
  3016. #endif
  3017. ) {
  3018. // use scalar tile sizes
  3019. l_warptile = { 128, 128, 128, 16, subgroup_size_8 * 2, 64, 2, 4, 4, 1, subgroup_size_8 };
  3020. m_warptile = { 128, 64, 64, 16, subgroup_size_8, 32, 2, 4, 2, 1, subgroup_size_8 };
  3021. s_warptile = { subgroup_size_16, 32, 32, 16, 32, 32, 2, 2, 2, 1, subgroup_size_8 };
  3022. l_wg_denoms = {128, 128, 1 };
  3023. m_wg_denoms = { 64, 64, 1 };
  3024. s_wg_denoms = { 32, 32, 1 };
  3025. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_bf16, matmul_bf16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, , 0);
  3026. CREATE_MM(GGML_TYPE_BF16, pipeline_matmul_id_bf16, matmul_id_bf16, , wg_denoms, warptile, vk_mat_mat_id_push_constants, 4, _id, 0);
  3027. }
  3028. #undef CREATE_MM
  3029. // mul mat vec
  3030. // the number of rows computed per shader depends on GPU model and quant
  3031. uint32_t rm_stdq = 1;
  3032. uint32_t rm_kq = 2;
  3033. if (device->vendor_id == VK_VENDOR_ID_AMD) {
  3034. if (device->architecture == AMD_GCN) {
  3035. rm_stdq = 2;
  3036. rm_kq = 4;
  3037. }
  3038. } else if (device->vendor_id == VK_VENDOR_ID_INTEL)
  3039. rm_stdq = 2;
  3040. uint32_t rm_iq = 2 * rm_kq;
  3041. const bool use_subgroups = device->subgroup_arithmetic && device->architecture != vk_device_architecture::AMD_GCN;
  3042. // Ensure a subgroup size >= 16 is available
  3043. const bool use_subgroups16 = use_subgroups && subgroup_min_size_16;
  3044. const uint32_t subgroup_size = (device->vendor_id == VK_VENDOR_ID_INTEL && device->subgroup_size_control && device->subgroup_min_size <= 16 && device->subgroup_max_size >= 16) ? 16 : device->subgroup_size;
  3045. const uint32_t subgroup_size16 = std::max(subgroup_size, 16u);
  3046. const uint32_t force_subgroup_size = use_subgroups ? subgroup_size : 0;
  3047. const uint32_t force_subgroup_size16 = use_subgroups16 ? subgroup_size16 : 0;
  3048. static constexpr uint32_t mul_mat_vec_num_bindings = 5;
  3049. static constexpr uint32_t mul_mat_vec_id_num_bindings = 6;
  3050. for (uint32_t w = 0; w < DMMV_WG_SIZE_COUNT; ++w) {
  3051. const uint32_t wg_size_subgroup = (w == DMMV_WG_SIZE_SUBGROUP) ? subgroup_size : (subgroup_size * 4);
  3052. const uint32_t wg_size_subgroup16 = (w == DMMV_WG_SIZE_SUBGROUP) ? subgroup_size16 : (subgroup_size16 * 4);
  3053. const shader_reduction_mode reduc = (use_subgroups && w == DMMV_WG_SIZE_SUBGROUP) ? SHADER_REDUCTION_MODE_SUBGROUP :
  3054. (use_subgroups && w == DMMV_WG_SIZE_LARGE) ? SHADER_REDUCTION_MODE_HYBRID :
  3055. SHADER_REDUCTION_MODE_SHMEM;
  3056. const shader_reduction_mode reduc16 = (use_subgroups16 && w == DMMV_WG_SIZE_SUBGROUP) ? SHADER_REDUCTION_MODE_SUBGROUP :
  3057. (use_subgroups16 && w == DMMV_WG_SIZE_LARGE) ? SHADER_REDUCTION_MODE_HYBRID :
  3058. SHADER_REDUCTION_MODE_SHMEM;
  3059. for (uint32_t i = 0; i < mul_mat_vec_max_cols; ++i) {
  3060. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f32_f32", arr_dmmv_f32_f32_f32_len[reduc], arr_dmmv_f32_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size);
  3061. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f32_f32", arr_dmmv_f16_f32_f32_len[reduc], arr_dmmv_f16_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size);
  3062. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_BF16][i], "mul_mat_vec_bf16_f32_f32", arr_dmmv_bf16_f32_f32_len[reduc], arr_dmmv_bf16_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size);
  3063. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_f32_f32", arr_dmmv_q4_0_f32_f32_len[reduc], arr_dmmv_q4_0_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  3064. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_f32_f32", arr_dmmv_q4_1_f32_f32_len[reduc], arr_dmmv_q4_1_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  3065. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_f32_f32", arr_dmmv_q5_0_f32_f32_len[reduc], arr_dmmv_q5_0_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  3066. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_f32_f32", arr_dmmv_q5_1_f32_f32_len[reduc], arr_dmmv_q5_1_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  3067. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_f32_f32", arr_dmmv_q8_0_f32_f32_len[reduc], arr_dmmv_q8_0_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {wg_size_subgroup, 1*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  3068. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q2_K][i], "mul_mat_vec_q2_k_f32_f32", arr_dmmv_q2_k_f32_f32_len[reduc16], arr_dmmv_q2_k_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3069. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q3_K][i], "mul_mat_vec_q3_k_f32_f32", arr_dmmv_q3_k_f32_f32_len[reduc16], arr_dmmv_q3_k_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3070. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f32_f32", arr_dmmv_q4_k_f32_f32_len[reduc16], arr_dmmv_q4_k_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3071. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f32_f32", arr_dmmv_q5_k_f32_f32_len[reduc16], arr_dmmv_q5_k_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3072. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f32_f32", arr_dmmv_q6_k_f32_f32_len[reduc16], arr_dmmv_q6_k_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3073. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ1_S][i], "mul_mat_vec_iq1_s_f32_f32", arr_dmmv_iq1_s_f32_f32_len[reduc16], arr_dmmv_iq1_s_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3074. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ1_M][i], "mul_mat_vec_iq1_m_f32_f32", arr_dmmv_iq1_m_f32_f32_len[reduc16], arr_dmmv_iq1_m_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3075. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ2_XXS][i], "mul_mat_vec_iq2_xxs_f32_f32", arr_dmmv_iq2_xxs_f32_f32_len[reduc16], arr_dmmv_iq2_xxs_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3076. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ2_XS][i], "mul_mat_vec_iq2_xs_f32_f32", arr_dmmv_iq2_xs_f32_f32_len[reduc16], arr_dmmv_iq2_xs_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3077. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ2_S][i], "mul_mat_vec_iq2_s_f32_f32", arr_dmmv_iq2_s_f32_f32_len[reduc16], arr_dmmv_iq2_s_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3078. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ3_XXS][i], "mul_mat_vec_iq3_xxs_f32_f32", arr_dmmv_iq3_xxs_f32_f32_len[reduc16], arr_dmmv_iq3_xxs_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3079. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ3_S][i], "mul_mat_vec_iq3_s_f32_f32", arr_dmmv_iq3_s_f32_f32_len[reduc16], arr_dmmv_iq3_s_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3080. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ4_XS][i], "mul_mat_vec_iq4_xs_f32_f32", arr_dmmv_iq4_xs_f32_f32_len[reduc16], arr_dmmv_iq4_xs_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3081. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f32_f32", arr_dmmv_iq4_nl_f32_f32_len[reduc16], arr_dmmv_iq4_nl_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3082. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_MXFP4][i], "mul_mat_vec_mxfp4_f32_f32", arr_dmmv_mxfp4_f32_f32_len[reduc16], arr_dmmv_mxfp4_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3083. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f16_f32", arr_dmmv_f32_f16_f32_len[reduc], arr_dmmv_f32_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size);
  3084. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f16_f32", arr_dmmv_f16_f16_f32_len[reduc], arr_dmmv_f16_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size);
  3085. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_BF16][i], "mul_mat_vec_bf16_f16_f32", arr_dmmv_bf16_f16_f32_len[reduc], arr_dmmv_bf16_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size);
  3086. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_f16_f32", arr_dmmv_q4_0_f16_f32_len[reduc], arr_dmmv_q4_0_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  3087. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_f16_f32", arr_dmmv_q4_1_f16_f32_len[reduc], arr_dmmv_q4_1_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  3088. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_f16_f32", arr_dmmv_q5_0_f16_f32_len[reduc], arr_dmmv_q5_0_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  3089. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_f16_f32", arr_dmmv_q5_1_f16_f32_len[reduc], arr_dmmv_q5_1_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  3090. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_f16_f32", arr_dmmv_q8_0_f16_f32_len[reduc], arr_dmmv_q8_0_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {wg_size_subgroup, 1*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size);
  3091. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q2_K][i], "mul_mat_vec_q2_k_f16_f32", arr_dmmv_q2_k_f16_f32_len[reduc16], arr_dmmv_q2_k_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3092. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q3_K][i], "mul_mat_vec_q3_k_f16_f32", arr_dmmv_q3_k_f16_f32_len[reduc16], arr_dmmv_q3_k_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3093. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f16_f32", arr_dmmv_q4_k_f16_f32_len[reduc16], arr_dmmv_q4_k_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3094. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f16_f32", arr_dmmv_q5_k_f16_f32_len[reduc16], arr_dmmv_q5_k_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3095. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f16_f32", arr_dmmv_q6_k_f16_f32_len[reduc16], arr_dmmv_q6_k_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3096. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ1_S][i], "mul_mat_vec_iq1_s_f16_f32", arr_dmmv_iq1_s_f16_f32_len[reduc16], arr_dmmv_iq1_s_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3097. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ1_M][i], "mul_mat_vec_iq1_m_f16_f32", arr_dmmv_iq1_m_f16_f32_len[reduc16], arr_dmmv_iq1_m_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3098. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ2_XXS][i], "mul_mat_vec_iq2_xxs_f16_f32", arr_dmmv_iq2_xxs_f16_f32_len[reduc16], arr_dmmv_iq2_xxs_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3099. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ2_XS][i], "mul_mat_vec_iq2_xs_f16_f32", arr_dmmv_iq2_xs_f16_f32_len[reduc16], arr_dmmv_iq2_xs_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3100. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ2_S][i], "mul_mat_vec_iq2_s_f16_f32", arr_dmmv_iq2_s_f16_f32_len[reduc16], arr_dmmv_iq2_s_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3101. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ3_XXS][i], "mul_mat_vec_iq3_xxs_f16_f32", arr_dmmv_iq3_xxs_f16_f32_len[reduc16], arr_dmmv_iq3_xxs_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3102. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ3_S][i], "mul_mat_vec_iq3_s_f16_f32", arr_dmmv_iq3_s_f16_f32_len[reduc16], arr_dmmv_iq3_s_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3103. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ4_XS][i], "mul_mat_vec_iq4_xs_f16_f32", arr_dmmv_iq4_xs_f16_f32_len[reduc16], arr_dmmv_iq4_xs_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3104. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f16_f32", arr_dmmv_iq4_nl_f16_f32_len[reduc16], arr_dmmv_iq4_nl_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3105. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_MXFP4][i], "mul_mat_vec_mxfp4_f16_f32", arr_dmmv_mxfp4_f16_f32_len[reduc16], arr_dmmv_mxfp4_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16);
  3106. #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
  3107. if (device->integer_dot_product) {
  3108. const uint32_t subgroup_size_int = (device->vendor_id == VK_VENDOR_ID_INTEL && device->subgroup_size_control) ? device->subgroup_min_size : device->subgroup_size;
  3109. const uint32_t wg_size_subgroup_int = (w == DMMV_WG_SIZE_SUBGROUP) ? subgroup_size_int : (subgroup_size_int * 4);
  3110. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_q8_1_f32", arr_dmmv_q4_0_q8_1_f32_len[reduc], arr_dmmv_q4_0_q8_1_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int);
  3111. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_q8_1_f32", arr_dmmv_q4_1_q8_1_f32_len[reduc], arr_dmmv_q4_1_q8_1_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int);
  3112. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_q8_1_f32", arr_dmmv_q5_0_q8_1_f32_len[reduc], arr_dmmv_q5_0_q8_1_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int);
  3113. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_q8_1_f32", arr_dmmv_q5_1_q8_1_f32_len[reduc], arr_dmmv_q5_1_q8_1_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int);
  3114. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_q8_1_f32", arr_dmmv_q8_0_q8_1_f32_len[reduc], arr_dmmv_q8_0_q8_1_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {wg_size_subgroup_int, 1*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int);
  3115. }
  3116. #endif // GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT
  3117. }
  3118. }
  3119. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F32 ], "mul_mat_vec_id_f32_f32", mul_mat_vec_id_f32_f32_len, mul_mat_vec_id_f32_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1);
  3120. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F16 ], "mul_mat_vec_id_f16_f32", mul_mat_vec_id_f16_f32_len, mul_mat_vec_id_f16_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1);
  3121. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_BF16], "mul_mat_vec_id_bf16_f32", mul_mat_vec_id_bf16_f32_len, mul_mat_vec_id_bf16_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1);
  3122. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_0], "mul_mat_vec_id_q4_0_f32", mul_mat_vec_id_q4_0_f32_len, mul_mat_vec_id_q4_0_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true);
  3123. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_1], "mul_mat_vec_id_q4_1_f32", mul_mat_vec_id_q4_1_f32_len, mul_mat_vec_id_q4_1_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true);
  3124. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_0], "mul_mat_vec_id_q5_0_f32", mul_mat_vec_id_q5_0_f32_len, mul_mat_vec_id_q5_0_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true);
  3125. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_1], "mul_mat_vec_id_q5_1_f32", mul_mat_vec_id_q5_1_f32_len, mul_mat_vec_id_q5_1_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true);
  3126. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q8_0], "mul_mat_vec_id_q8_0_f32", mul_mat_vec_id_q8_0_f32_len, mul_mat_vec_id_q8_0_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq}, 1, true);
  3127. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q2_K], "mul_mat_vec_id_q2_k_f32", mul_mat_vec_id_q2_k_f32_len, mul_mat_vec_id_q2_k_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
  3128. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q3_K], "mul_mat_vec_id_q3_k_f32", mul_mat_vec_id_q3_k_f32_len, mul_mat_vec_id_q3_k_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
  3129. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_K], "mul_mat_vec_id_q4_k_f32", mul_mat_vec_id_q4_k_f32_len, mul_mat_vec_id_q4_k_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
  3130. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_K], "mul_mat_vec_id_q5_k_f32", mul_mat_vec_id_q5_k_f32_len, mul_mat_vec_id_q5_k_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
  3131. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q6_K], "mul_mat_vec_id_q6_k_f32", mul_mat_vec_id_q6_k_f32_len, mul_mat_vec_id_q6_k_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true);
  3132. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ1_S], "mul_mat_vec_id_iq1_s_f32", mul_mat_vec_id_iq1_s_f32_len, mul_mat_vec_id_iq1_s_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  3133. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ1_M], "mul_mat_vec_id_iq1_m_f32", mul_mat_vec_id_iq1_m_f32_len, mul_mat_vec_id_iq1_m_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  3134. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_XXS], "mul_mat_vec_id_iq2_xxs_f32", mul_mat_vec_id_iq2_xxs_f32_len, mul_mat_vec_id_iq2_xxs_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  3135. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_XS], "mul_mat_vec_id_iq2_xs_f32", mul_mat_vec_id_iq2_xs_f32_len, mul_mat_vec_id_iq2_xs_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  3136. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_S], "mul_mat_vec_id_iq2_s_f32", mul_mat_vec_id_iq2_s_f32_len, mul_mat_vec_id_iq2_s_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  3137. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ3_XXS], "mul_mat_vec_id_iq3_xxs_f32", mul_mat_vec_id_iq3_xxs_f32_len, mul_mat_vec_id_iq3_xxs_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  3138. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ3_S], "mul_mat_vec_id_iq3_s_f32", mul_mat_vec_id_iq3_s_f32_len, mul_mat_vec_id_iq3_s_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  3139. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_XS], "mul_mat_vec_id_iq4_xs_f32", mul_mat_vec_id_iq4_xs_f32_len, mul_mat_vec_id_iq4_xs_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  3140. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_id_iq4_nl_f32", mul_mat_vec_id_iq4_nl_f32_len, mul_mat_vec_id_iq4_nl_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  3141. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_MXFP4], "mul_mat_vec_id_mxfp4_f32", mul_mat_vec_id_mxfp4_f32_len, mul_mat_vec_id_mxfp4_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true);
  3142. // dequant shaders
  3143. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_F32 ], "f32_to_f16", dequant_f32_len, dequant_f32_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  3144. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_0], "dequant_q4_0", dequant_q4_0_len, dequant_q4_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  3145. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_1], "dequant_q4_1", dequant_q4_1_len, dequant_q4_1_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  3146. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_0], "dequant_q5_0", dequant_q5_0_len, dequant_q5_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  3147. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_1], "dequant_q5_1", dequant_q5_1_len, dequant_q5_1_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  3148. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q8_0], "dequant_q8_0", dequant_q8_0_len, dequant_q8_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  3149. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q2_K], "dequant_q2_k", dequant_q2_k_len, dequant_q2_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  3150. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q3_K], "dequant_q3_k", dequant_q3_k_len, dequant_q3_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  3151. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_K], "dequant_q4_k", dequant_q4_k_len, dequant_q4_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  3152. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_K], "dequant_q5_k", dequant_q5_k_len, dequant_q5_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  3153. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q6_K], "dequant_q6_k", dequant_q6_k_len, dequant_q6_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  3154. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ1_S], "dequant_iq1_s", dequant_iq1_s_len, dequant_iq1_s_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  3155. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ1_M], "dequant_iq1_m", dequant_iq1_m_len, dequant_iq1_m_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  3156. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ2_XXS], "dequant_iq2_xxs", dequant_iq2_xxs_len, dequant_iq2_xxs_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  3157. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ2_XS], "dequant_iq2_xs", dequant_iq2_xs_len, dequant_iq2_xs_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  3158. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ2_S], "dequant_iq2_s", dequant_iq2_s_len, dequant_iq2_s_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  3159. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ3_XXS], "dequant_iq3_xxs", dequant_iq3_xxs_len, dequant_iq3_xxs_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  3160. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ3_S], "dequant_iq3_s", dequant_iq3_s_len, dequant_iq3_s_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  3161. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ4_XS], "dequant_iq4_xs", dequant_iq4_xs_len, dequant_iq4_xs_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  3162. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ4_NL], "dequant_iq4_nl", dequant_iq4_nl_len, dequant_iq4_nl_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  3163. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_MXFP4], "dequant_mxfp4", dequant_mxfp4_len, dequant_mxfp4_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  3164. // get_rows
  3165. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_F32 ], "get_rows_f32", get_rows_f32_len, get_rows_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  3166. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_F16 ], "get_rows_f16", get_rows_f16_len, get_rows_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  3167. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_BF16], "get_rows_bf16", get_rows_bf16_len, get_rows_bf16_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  3168. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q4_0], "get_rows_q4_0", get_rows_q4_0_len, get_rows_q4_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3169. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q4_1], "get_rows_q4_1", get_rows_q4_1_len, get_rows_q4_1_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3170. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q5_0], "get_rows_q5_0", get_rows_q5_0_len, get_rows_q5_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3171. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q5_1], "get_rows_q5_1", get_rows_q5_1_len, get_rows_q5_1_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3172. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q8_0], "get_rows_q8_0", get_rows_q8_0_len, get_rows_q8_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3173. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q2_K], "get_rows_q2_k", get_rows_q2_k_len, get_rows_q2_k_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3174. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q3_K], "get_rows_q3_k", get_rows_q3_k_len, get_rows_q3_k_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3175. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q4_K], "get_rows_q4_k", get_rows_q4_k_len, get_rows_q4_k_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3176. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q5_K], "get_rows_q5_k", get_rows_q5_k_len, get_rows_q5_k_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3177. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q6_K], "get_rows_q6_k", get_rows_q6_k_len, get_rows_q6_k_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3178. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ1_S], "get_rows_iq1_s", get_rows_iq1_s_len, get_rows_iq1_s_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3179. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ1_M], "get_rows_iq1_m", get_rows_iq1_m_len, get_rows_iq1_m_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3180. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ2_XXS], "get_rows_iq2_xxs", get_rows_iq2_xxs_len, get_rows_iq2_xxs_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3181. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ2_XS], "get_rows_iq2_xs", get_rows_iq2_xs_len, get_rows_iq2_xs_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3182. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ2_S], "get_rows_iq2_s", get_rows_iq2_s_len, get_rows_iq2_s_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3183. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ3_XXS], "get_rows_iq3_xxs", get_rows_iq3_xxs_len, get_rows_iq3_xxs_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3184. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ3_S], "get_rows_iq3_s", get_rows_iq3_s_len, get_rows_iq3_s_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3185. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ4_XS], "get_rows_iq4_xs", get_rows_iq4_xs_len, get_rows_iq4_xs_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3186. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl", get_rows_iq4_nl_len, get_rows_iq4_nl_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3187. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_MXFP4], "get_rows_mxfp4", get_rows_mxfp4_len, get_rows_mxfp4_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3188. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_F32 ], "get_rows_f32_f32", get_rows_f32_f32_len, get_rows_f32_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  3189. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_F16 ], "get_rows_f16_f32", get_rows_f16_f32_len, get_rows_f16_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  3190. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_BF16], "get_rows_bf16_f32", get_rows_bf16_f32_len, get_rows_bf16_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  3191. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q4_0], "get_rows_q4_0_f32", get_rows_q4_0_f32_len, get_rows_q4_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3192. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q4_1], "get_rows_q4_1_f32", get_rows_q4_1_f32_len, get_rows_q4_1_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3193. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q5_0], "get_rows_q5_0_f32", get_rows_q5_0_f32_len, get_rows_q5_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3194. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q5_1], "get_rows_q5_1_f32", get_rows_q5_1_f32_len, get_rows_q5_1_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3195. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q8_0], "get_rows_q8_0_f32", get_rows_q8_0_f32_len, get_rows_q8_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3196. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q2_K], "get_rows_q2_k_f32", get_rows_q2_k_f32_len, get_rows_q2_k_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3197. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q3_K], "get_rows_q3_k_f32", get_rows_q3_k_f32_len, get_rows_q3_k_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3198. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q4_K], "get_rows_q4_k_f32", get_rows_q4_k_f32_len, get_rows_q4_k_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3199. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q5_K], "get_rows_q5_k_f32", get_rows_q5_k_f32_len, get_rows_q5_k_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3200. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q6_K], "get_rows_q6_k_f32", get_rows_q6_k_f32_len, get_rows_q6_k_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3201. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ1_S], "get_rows_iq1_s_f32", get_rows_iq1_s_f32_len, get_rows_iq1_s_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3202. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ1_M], "get_rows_iq1_m_f32", get_rows_iq1_m_f32_len, get_rows_iq1_m_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3203. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ2_XXS], "get_rows_iq2_xxs_f32", get_rows_iq2_xxs_f32_len, get_rows_iq2_xxs_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3204. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ2_XS], "get_rows_iq2_xs_f32", get_rows_iq2_xs_f32_len, get_rows_iq2_xs_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3205. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ2_S], "get_rows_iq2_s_f32", get_rows_iq2_s_f32_len, get_rows_iq2_s_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3206. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ3_XXS], "get_rows_iq3_xxs_f32", get_rows_iq3_xxs_f32_len, get_rows_iq3_xxs_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3207. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ3_S], "get_rows_iq3_s_f32", get_rows_iq3_s_f32_len, get_rows_iq3_s_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3208. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ4_XS], "get_rows_iq4_xs_f32", get_rows_iq4_xs_f32_len, get_rows_iq4_xs_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3209. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl_f32", get_rows_iq4_nl_f32_len, get_rows_iq4_nl_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3210. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_MXFP4], "get_rows_mxfp4_f32", get_rows_mxfp4_f32_len, get_rows_mxfp4_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  3211. ggml_vk_create_pipeline(device, device->pipeline_matmul_split_k_reduce, "split_k_reduce", split_k_reduce_len, split_k_reduce_data, "main", 2, 2 * sizeof(uint32_t), {256 * 4, 1, 1}, {}, 1);
  3212. ggml_vk_create_pipeline(device, device->pipeline_flash_attn_split_k_reduce, "fa_split_k_reduce", fa_split_k_reduce_len, fa_split_k_reduce_data, "main", 3, 5 * sizeof(uint32_t), {1, device->subgroup_size, 1}, {device->subgroup_size}, 1, true);
  3213. if (device->subgroup_clustered && device->subgroup_require_full_support) {
  3214. ggml_vk_create_pipeline(device, device->pipeline_quantize_q8_1_x4, "quantize_q8_1_x4", quantize_q8_1_x4_subgroup_len, quantize_q8_1_x4_subgroup_data, "main", 2, 1 * sizeof(uint32_t), {32 * device->subgroup_size / 8, 1, 1}, { device->subgroup_size }, 1, true, true);
  3215. } else {
  3216. ggml_vk_create_pipeline(device, device->pipeline_quantize_q8_1_x4, "quantize_q8_1_x4", quantize_q8_1_x4_len, quantize_q8_1_x4_data, "main", 2, 1 * sizeof(uint32_t), {32 * device->subgroup_size / 8, 1, 1}, { device->subgroup_size }, 1);
  3217. }
  3218. for (uint32_t i = 0; i < p021_max_gqa_ratio; ++i) {
  3219. if (device->subgroup_arithmetic && device->subgroup_require_full_support) {
  3220. ggml_vk_create_pipeline2(device, device->pipeline_mul_mat_vec_p021_f16_f32[i], "mul_mat_vec_p021_f16_f32"+std::to_string(i+1), mul_mat_vec_p021_f16_f32_subgroup_add_len, mul_mat_vec_p021_f16_f32_subgroup_add_data, "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_p021_push_constants), {1, 1, 1}, {device->subgroup_size, i + 1}, 1, true, true);
  3221. } else {
  3222. ggml_vk_create_pipeline2(device, device->pipeline_mul_mat_vec_p021_f16_f32[i], "mul_mat_vec_p021_f16_f32"+std::to_string(i+1), mul_mat_vec_p021_f16_f32_len, mul_mat_vec_p021_f16_f32_data, "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_p021_push_constants), {1, 1, 1}, {device->subgroup_size, i + 1}, 1, true);
  3223. }
  3224. }
  3225. ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_nc_f16_f32, "mul_mat_vec_nc_f16_f32", mul_mat_vec_nc_f16_f32_len, mul_mat_vec_nc_f16_f32_data, "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_nc_push_constants), {1, 1, 1}, {}, 1);
  3226. ggml_vk_create_pipeline(device, device->pipeline_norm_f32, "norm_f32", norm_f32_len, norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  3227. ggml_vk_create_pipeline(device, device->pipeline_group_norm_f32, "group_norm_f32", group_norm_f32_len, group_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  3228. ggml_vk_create_pipeline(device, device->pipeline_rms_norm_f32, "rms_norm_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 4, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {0, 0}, 1, true);
  3229. ggml_vk_create_pipeline(device, device->pipeline_rms_norm_mul_f32, "rms_norm_mul_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 4, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {0, 1}, 1, true);
  3230. ggml_vk_create_pipeline(device, device->pipeline_rms_norm_partials_f32, "rms_norm_partials_f32", rms_norm_partials_f32_len, rms_norm_partials_f32_data, "main", 4, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {0, 0}, 1, true);
  3231. ggml_vk_create_pipeline(device, device->pipeline_rms_norm_mul_partials_f32, "rms_norm_mul_partials_f32", rms_norm_partials_f32_len, rms_norm_partials_f32_data, "main", 4, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {0, 1}, 1, true);
  3232. if (device->float_controls_rte_fp16 &&
  3233. sizeof(vk_op_rms_norm_mul_rope_push_constants) <= device->properties.limits.maxPushConstantsSize) {
  3234. ggml_vk_create_pipeline(device, device->pipeline_rms_norm_mul_rope_f32_f32, "rms_norm_mul_rope_f32_f32", rms_norm_mul_rope_f32_f32_len, rms_norm_mul_rope_f32_f32_data, "main", 7, sizeof(vk_op_rms_norm_mul_rope_push_constants), {1, 1, 1}, {0, 1}, 1, true);
  3235. ggml_vk_create_pipeline(device, device->pipeline_rms_norm_mul_rope_f32_f16, "rms_norm_mul_rope_f32_f16", rms_norm_mul_rope_f32_f16_rte_len, rms_norm_mul_rope_f32_f16_rte_data, "main", 7, sizeof(vk_op_rms_norm_mul_rope_push_constants), {1, 1, 1}, {0, 1}, 1, true);
  3236. }
  3237. ggml_vk_create_pipeline(device, device->pipeline_rms_norm_back_f32, "rms_norm_back_f32", rms_norm_back_f32_len, rms_norm_back_f32_data, "main", 3, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  3238. ggml_vk_create_pipeline(device, device->pipeline_l2_norm_f32, "l2_norm_f32", l2_norm_f32_len, l2_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  3239. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_f32, "cpy_f32_f32", cpy_f32_f32_len, cpy_f32_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3240. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_f16, "cpy_f32_f16", cpy_f32_f16_len, cpy_f32_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3241. ggml_vk_create_pipeline(device, device->pipeline_cpy_f16_f16, "cpy_f16_f16", cpy_f16_f16_len, cpy_f16_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3242. ggml_vk_create_pipeline(device, device->pipeline_cpy_f16_f32, "cpy_f16_f32", cpy_f16_f32_len, cpy_f16_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3243. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_bf16,"cpy_f32_bf16",cpy_f32_bf16_len,cpy_f32_bf16_data,"main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3244. ggml_vk_create_pipeline(device, device->pipeline_cpy_i32_f32, "cpy_i32_f32", cpy_i32_f32_len, cpy_i32_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3245. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_i32, "cpy_f32_i32", cpy_f32_i32_len, cpy_f32_i32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3246. ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f32_f32, "contig_cpy_f32_f32", contig_cpy_f32_f32_len, contig_cpy_f32_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3247. ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f32_f16, "contig_cpy_f32_f16", contig_cpy_f32_f16_len, contig_cpy_f32_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3248. ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f16_f16, "contig_cpy_f16_f16", contig_cpy_f16_f16_len, contig_cpy_f16_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3249. ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f16_f32, "contig_cpy_f16_f32", contig_cpy_f16_f32_len, contig_cpy_f16_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3250. ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f32_bf16,"contig_cpy_f32_bf16",contig_cpy_f32_bf16_len,contig_cpy_f32_bf16_data,"main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3251. ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_i32_f32, "contig_cpy_i32_f32", contig_cpy_i32_f32_len, contig_cpy_i32_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3252. ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f32_i32, "contig_cpy_f32_i32", contig_cpy_f32_i32_len, contig_cpy_f32_i32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3253. ggml_vk_create_pipeline(device, device->pipeline_cpy_transpose_32, "cpy_transpose_32", cpy_transpose_32_len, cpy_transpose_32_data, "main", 2, sizeof(vk_op_unary_push_constants), {1, 1, 1}, {}, 1);
  3254. ggml_vk_create_pipeline(device, device->pipeline_cpy_transpose_16, "cpy_transpose_16", cpy_transpose_16_len, cpy_transpose_16_data, "main", 2, sizeof(vk_op_unary_push_constants), {1, 1, 1}, {}, 1);
  3255. if (device->float_controls_rte_fp16) {
  3256. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q4_0], "cpy_f32_q4_0", cpy_f32_q4_0_rte_len, cpy_f32_q4_0_rte_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3257. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q4_1], "cpy_f32_q4_1", cpy_f32_q4_1_rte_len, cpy_f32_q4_1_rte_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3258. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q5_0], "cpy_f32_q5_0", cpy_f32_q5_0_rte_len, cpy_f32_q5_0_rte_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3259. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q5_1], "cpy_f32_q5_1", cpy_f32_q5_1_rte_len, cpy_f32_q5_1_rte_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3260. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q8_0], "cpy_f32_q8_0", cpy_f32_q8_0_rte_len, cpy_f32_q8_0_rte_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3261. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_IQ4_NL], "cpy_f32_iq4_nl", cpy_f32_iq4_nl_rte_len, cpy_f32_iq4_nl_rte_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3262. } else {
  3263. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q4_0], "cpy_f32_q4_0", cpy_f32_q4_0_len, cpy_f32_q4_0_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3264. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q4_1], "cpy_f32_q4_1", cpy_f32_q4_1_len, cpy_f32_q4_1_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3265. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q5_0], "cpy_f32_q5_0", cpy_f32_q5_0_len, cpy_f32_q5_0_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3266. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q5_1], "cpy_f32_q5_1", cpy_f32_q5_1_len, cpy_f32_q5_1_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3267. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q8_0], "cpy_f32_q8_0", cpy_f32_q8_0_len, cpy_f32_q8_0_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3268. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_IQ4_NL], "cpy_f32_iq4_nl", cpy_f32_iq4_nl_len, cpy_f32_iq4_nl_data, "main", 2, sizeof(vk_op_unary_push_constants), {32, 1, 1}, {}, 1);
  3269. }
  3270. #define SET_ROWS(itype, rte) \
  3271. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_F32], "set_rows_f32" #itype, set_rows_f32 ## itype ## rte ## _len, set_rows_f32 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3272. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_F16], "set_rows_f16" #itype, set_rows_f16 ## itype ## rte ## _len, set_rows_f16 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3273. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_BF16], "set_rows_bf16" #itype, set_rows_bf16 ## itype ## rte ## _len, set_rows_bf16 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3274. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_Q4_0], "set_rows_q4_0" #itype, set_rows_q4_0 ## itype ## rte ## _len, set_rows_q4_0 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3275. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_Q4_1], "set_rows_q4_1" #itype, set_rows_q4_1 ## itype ## rte ## _len, set_rows_q4_1 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3276. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_Q5_0], "set_rows_q5_0" #itype, set_rows_q5_0 ## itype ## rte ## _len, set_rows_q5_0 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3277. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_Q5_1], "set_rows_q5_1" #itype, set_rows_q5_1 ## itype ## rte ## _len, set_rows_q5_1 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3278. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_Q8_0], "set_rows_q8_0" #itype, set_rows_q8_0 ## itype ## rte ## _len, set_rows_q8_0 ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true); \
  3279. ggml_vk_create_pipeline(device, device->pipeline_set_rows ## itype [GGML_TYPE_IQ4_NL], "set_rows_iq4_nl" #itype, set_rows_iq4_nl ## itype ## rte ## _len, set_rows_iq4_nl ## itype ## rte ## _data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {1}, 1, true);
  3280. if (device->float_controls_rte_fp16) {
  3281. SET_ROWS(_i32, _rte)
  3282. SET_ROWS(_i64, _rte)
  3283. } else {
  3284. SET_ROWS(_i32, )
  3285. SET_ROWS(_i64, )
  3286. }
  3287. #undef SET_ROWS
  3288. ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q4_0], "cpy_q4_0_f32", cpy_q4_0_f32_len, cpy_q4_0_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q4_0), 1, 1}, {}, 1);
  3289. ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q4_1], "cpy_q4_1_f32", cpy_q4_1_f32_len, cpy_q4_1_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q4_1), 1, 1}, {}, 1);
  3290. ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q5_0], "cpy_q5_0_f32", cpy_q5_0_f32_len, cpy_q5_0_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q5_0), 1, 1}, {}, 1);
  3291. ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q5_1], "cpy_q5_1_f32", cpy_q5_1_f32_len, cpy_q5_1_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q5_1), 1, 1}, {}, 1);
  3292. ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q8_0], "cpy_q8_0_f32", cpy_q8_0_f32_len, cpy_q8_0_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q8_0), 1, 1}, {}, 1);
  3293. ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_IQ4_NL], "cpy_iq4_nl_f32", cpy_iq4_nl_f32_len, cpy_iq4_nl_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_IQ4_NL), 1, 1}, {}, 1);
  3294. auto get_suffix = [](bool src0_f16, bool src1_f16, bool dst_f16) {
  3295. std::string s;
  3296. s += std::string(src0_f16 ? "_f16" : "_f32");
  3297. s += std::string(src1_f16 ? "_f16" : "_f32");
  3298. s += std::string(dst_f16 ? "_f16" : "_f32");
  3299. return s;
  3300. };
  3301. bool rte = device->float_controls_rte_fp16;
  3302. #define CREATE_BINARY(name, namemod, spec, bindings) \
  3303. for (int s0 : {0,1}) for (int s1 : {0,1}) for (int d : {0,1}) \
  3304. ggml_vk_create_pipeline2(device, device->pipeline_ ## name ## namemod[s0][s1][d], \
  3305. #name + get_suffix(s0, s1, d) + #namemod, name ## _len[s0][s1][d][rte], name ## _data[s0][s1][d][rte], \
  3306. "main", (bindings), sizeof(vk_op_binary_push_constants), {512, 1, 1}, spec, 1);
  3307. CREATE_BINARY(add, , {0}, 4)
  3308. CREATE_BINARY(add, _norepeat, {1}, 4)
  3309. CREATE_BINARY(sub, , {0}, 3)
  3310. CREATE_BINARY(sub, _norepeat, {1}, 3)
  3311. CREATE_BINARY(mul, , {0}, 3)
  3312. CREATE_BINARY(mul, _norepeat, {1}, 3)
  3313. CREATE_BINARY(div, , {0}, 3)
  3314. CREATE_BINARY(div, _norepeat, {1}, 3)
  3315. CREATE_BINARY(add_rms, , {0}, 4)
  3316. CREATE_BINARY(add_rms, _norepeat, {1}, 4)
  3317. #undef CREATE_BINARY
  3318. if (device->multi_add) {
  3319. for (uint32_t i = 0; i < MAX_FUSED_ADDS; ++i) {
  3320. ggml_vk_create_pipeline2(device, device->pipeline_multi_add[i], "multi_add_f32_" + std::to_string(i+1), multi_add_f32_len, multi_add_f32_data, "main", MAX_PARAMETER_COUNT, sizeof(vk_op_multi_add_push_constants), {512, 1, 1}, {i+2}, 1);
  3321. ggml_vk_create_pipeline2(device, device->pipeline_multi_add_rms[i], "multi_add_rms_f32_" + std::to_string(i+1), multi_add_rms_f32_len, multi_add_rms_f32_data, "main", MAX_PARAMETER_COUNT, sizeof(vk_op_multi_add_push_constants), {512, 1, 1}, {i+2}, 1);
  3322. }
  3323. }
  3324. ggml_vk_create_pipeline(device, device->pipeline_add_id_f32, "add_id_f32", add_id_f32_len, add_id_f32_data, "main", 4, sizeof(vk_op_add_id_push_constants), {1, 1, 1}, {}, 1);
  3325. ggml_vk_create_pipeline(device, device->pipeline_acc_f32, "acc_f32", acc_f32_len, acc_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  3326. ggml_vk_create_pipeline(device, device->pipeline_concat_f32, "concat_f32", concat_f32_len, concat_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  3327. ggml_vk_create_pipeline(device, device->pipeline_concat_f16, "concat_f16", concat_f16_len, concat_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  3328. ggml_vk_create_pipeline(device, device->pipeline_concat_i32, "concat_i32", concat_i32_len, concat_i32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  3329. ggml_vk_create_pipeline(device, device->pipeline_upscale_nearest_f32, "upscale_f32", upscale_f32_len, upscale_f32_data, "main", 2, sizeof(vk_op_upscale_push_constants), {512, 1, 1}, {GGML_SCALE_MODE_NEAREST}, 1);
  3330. ggml_vk_create_pipeline(device, device->pipeline_upscale_bilinear_f32, "upscale_f32", upscale_f32_len, upscale_f32_data, "main", 2, sizeof(vk_op_upscale_push_constants), {512, 1, 1}, {GGML_SCALE_MODE_BILINEAR}, 1);
  3331. ggml_vk_create_pipeline(device, device->pipeline_upscale_bicubic_f32, "upscale_f32", upscale_f32_len, upscale_f32_data, "main", 2, sizeof(vk_op_upscale_push_constants), {512, 1, 1}, {GGML_SCALE_MODE_BICUBIC}, 1);
  3332. ggml_vk_create_pipeline(device, device->pipeline_scale_f32, "scale_f32", scale_f32_len, scale_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3333. ggml_vk_create_pipeline(device, device->pipeline_sqr_f32, "sqr_f32", sqr_f32_len, sqr_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3334. ggml_vk_create_pipeline(device, device->pipeline_sqrt_f32, "sqrt_f32", sqrt_f32_len, sqrt_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3335. ggml_vk_create_pipeline(device, device->pipeline_sin_f32, "sin_f32", sin_f32_len, sin_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3336. ggml_vk_create_pipeline(device, device->pipeline_cos_f32, "cos_f32", cos_f32_len, cos_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3337. if (device->float_controls_rte_fp16) {
  3338. ggml_vk_create_pipeline(device, device->pipeline_log[0], "log_f32_rte", log_f32_rte_len, log_f32_rte_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3339. ggml_vk_create_pipeline(device, device->pipeline_log[1], "log_f16_rte", log_f16_rte_len, log_f16_rte_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3340. } else {
  3341. ggml_vk_create_pipeline(device, device->pipeline_log[0], "log_f32", log_f32_len, log_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3342. ggml_vk_create_pipeline(device, device->pipeline_log[1], "log_f16", log_f16_len, log_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3343. }
  3344. ggml_vk_create_pipeline(device, device->pipeline_tri[0], "tri_f32", tri_f32_len, tri_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3345. ggml_vk_create_pipeline(device, device->pipeline_tri[1], "tri_f16", tri_f16_len, tri_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3346. ggml_vk_create_pipeline(device, device->pipeline_clamp_f32, "clamp_f32", clamp_f32_len, clamp_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3347. ggml_vk_create_pipeline(device, device->pipeline_pad_f32, "pad_f32", pad_f32_len, pad_f32_data, "main", 2, sizeof(vk_op_pad_push_constants), {512, 1, 1}, {}, 1);
  3348. ggml_vk_create_pipeline(device, device->pipeline_roll_f32, "roll_f32", roll_f32_len, roll_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3349. ggml_vk_create_pipeline(device, device->pipeline_repeat_f32, "repeat_f32", repeat_f32_len, repeat_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3350. ggml_vk_create_pipeline(device, device->pipeline_repeat_back_f32, "repeat_back_f32", repeat_back_f32_len, repeat_back_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3351. #define CREATE_UNARY(name) \
  3352. ggml_vk_create_pipeline(device, device->pipeline_ ## name [0], #name "_f32", name ## _f32_len, name ## _f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); \
  3353. ggml_vk_create_pipeline(device, device->pipeline_ ## name [1], #name "_f16", name ## _f16_len, name ## _f16_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  3354. CREATE_UNARY(gelu)
  3355. CREATE_UNARY(gelu_erf)
  3356. CREATE_UNARY(gelu_quick)
  3357. CREATE_UNARY(silu)
  3358. CREATE_UNARY(relu)
  3359. CREATE_UNARY(neg)
  3360. CREATE_UNARY(tanh)
  3361. CREATE_UNARY(sigmoid)
  3362. CREATE_UNARY(hardsigmoid)
  3363. CREATE_UNARY(hardswish)
  3364. CREATE_UNARY(abs)
  3365. CREATE_UNARY(softplus)
  3366. CREATE_UNARY(step)
  3367. CREATE_UNARY(round)
  3368. CREATE_UNARY(ceil)
  3369. CREATE_UNARY(floor)
  3370. CREATE_UNARY(trunc)
  3371. #undef CREATE_UNARY
  3372. #define CREATE_UNARY_RTE(name) \
  3373. if (device->float_controls_rte_fp16) { \
  3374. ggml_vk_create_pipeline(device, device->pipeline_ ## name [0], #name "_f32_rte", name ## _f32_rte_len, name ## _f32_rte_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); \
  3375. ggml_vk_create_pipeline(device, device->pipeline_ ## name [1], #name "_f16_rte", name ## _f16_rte_len, name ## _f16_rte_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); \
  3376. } else { \
  3377. ggml_vk_create_pipeline(device, device->pipeline_ ## name [0], #name "_f32", name ## _f32_len, name ## _f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); \
  3378. ggml_vk_create_pipeline(device, device->pipeline_ ## name [1], #name "_f16", name ## _f16_len, name ## _f16_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); \
  3379. }
  3380. CREATE_UNARY_RTE(exp)
  3381. #undef CREATE_UNARY_RTE
  3382. ggml_vk_create_pipeline(device, device->pipeline_add1_f16_f16, "add1_f16_f16", add1_f16_f16_len, add1_f16_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  3383. ggml_vk_create_pipeline(device, device->pipeline_add1_f16_f32, "add1_f16_f32", add1_f16_f32_len, add1_f16_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  3384. ggml_vk_create_pipeline(device, device->pipeline_add1_f32_f32, "add1_f32_f32", add1_f32_f32_len, add1_f32_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  3385. ggml_vk_create_pipeline(device, device->pipeline_arange_f32, "arange_f32", arange_f32_len, arange_f32_data, "main", 1, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3386. ggml_vk_create_pipeline(device, device->pipeline_fill_f32, "fill_f32", fill_f32_len, fill_f32_data, "main", 1, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  3387. #define CREATE_GLU(name) \
  3388. if (device->float_controls_rte_fp16) { \
  3389. ggml_vk_create_pipeline(device, device->pipeline_ ## name [0], #name "_f32_rte", name ## _f32_rte_len, name ## _f32_rte_data, "main", 3, sizeof(vk_op_glu_push_constants), {512, 1, 1}, {}, 1, true); \
  3390. ggml_vk_create_pipeline(device, device->pipeline_ ## name [1], #name "_f16_rte", name ## _f16_rte_len, name ## _f16_rte_data, "main", 3, sizeof(vk_op_glu_push_constants), {512, 1, 1}, {}, 1, true); \
  3391. } else { \
  3392. ggml_vk_create_pipeline(device, device->pipeline_ ## name [0], #name "_f32", name ## _f32_len, name ## _f32_data, "main", 3, sizeof(vk_op_glu_push_constants), {512, 1, 1}, {}, 1, true); \
  3393. ggml_vk_create_pipeline(device, device->pipeline_ ## name [1], #name "_f16", name ## _f16_len, name ## _f16_data, "main", 3, sizeof(vk_op_glu_push_constants), {512, 1, 1}, {}, 1, true); \
  3394. }
  3395. CREATE_GLU(geglu)
  3396. CREATE_GLU(reglu)
  3397. CREATE_GLU(swiglu)
  3398. CREATE_GLU(swiglu_oai)
  3399. CREATE_GLU(geglu_erf)
  3400. CREATE_GLU(geglu_quick)
  3401. #undef CREATE_GLU
  3402. ggml_vk_create_pipeline(device, device->pipeline_leaky_relu_f32, "leaky_relu_f32", leaky_relu_f32_len, leaky_relu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  3403. ggml_vk_create_pipeline(device, device->pipeline_silu_back_f32, "silu_back_f32", silu_back_f32_len, silu_back_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  3404. ggml_vk_create_pipeline(device, device->pipeline_diag_mask_inf_f32, "diag_mask_inf_f32", diag_mask_inf_f32_len, diag_mask_inf_f32_data, "main", 2, sizeof(vk_op_diag_mask_push_constants), {1, 512, 1}, {}, 1, true);
  3405. ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32, "soft_max_f32", soft_max_f32_len, soft_max_f32_data, "main", 4, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  3406. ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_wg512, "soft_max_f32_wg512", soft_max_f32_len, soft_max_f32_data, "main", 4, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { 512 }, 1);
  3407. ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_f16, "soft_max_f32_f16", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 4, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  3408. ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_f16_wg512, "soft_max_f32_f16_wg512", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 4, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { 512 }, 1);
  3409. ggml_vk_create_pipeline(device, device->pipeline_soft_max_back_f32, "soft_max_back_f32", soft_max_back_f32_len, soft_max_back_f32_data, "main", 3, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1, true);
  3410. ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f32, "rope_norm_f32", rope_norm_f32_len, rope_norm_f32_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3411. ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f32, "rope_neox_f32", rope_neox_f32_len, rope_neox_f32_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3412. ggml_vk_create_pipeline(device, device->pipeline_rope_multi_f32, "rope_multi_f32", rope_multi_f32_len, rope_multi_f32_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3413. ggml_vk_create_pipeline(device, device->pipeline_rope_vision_f32, "rope_vision_f32", rope_vision_f32_len, rope_vision_f32_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3414. if (device->float_controls_rte_fp16) {
  3415. ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f16, "rope_norm_f16", rope_norm_f16_rte_len, rope_norm_f16_rte_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3416. ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f16, "rope_neox_f16", rope_neox_f16_rte_len, rope_neox_f16_rte_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3417. ggml_vk_create_pipeline(device, device->pipeline_rope_multi_f16, "rope_multi_f16", rope_multi_f16_rte_len, rope_multi_f16_rte_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3418. ggml_vk_create_pipeline(device, device->pipeline_rope_vision_f16, "rope_vision_f16", rope_vision_f16_rte_len, rope_vision_f16_rte_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3419. ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f32_f16, "rope_norm_f32_f16", rope_norm_f32_f16_rte_len, rope_norm_f32_f16_rte_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3420. ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f32_f16, "rope_neox_f32_f16", rope_neox_f32_f16_rte_len, rope_neox_f32_f16_rte_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3421. } else {
  3422. ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f16, "rope_norm_f16", rope_norm_f16_len, rope_norm_f16_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3423. ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f16, "rope_neox_f16", rope_neox_f16_len, rope_neox_f16_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3424. ggml_vk_create_pipeline(device, device->pipeline_rope_multi_f16, "rope_multi_f16", rope_multi_f16_len, rope_multi_f16_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3425. ggml_vk_create_pipeline(device, device->pipeline_rope_vision_f16, "rope_vision_f16", rope_vision_f16_len, rope_vision_f16_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3426. ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f32_f16, "rope_norm_f32_f16", rope_norm_f32_f16_len, rope_norm_f32_f16_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3427. ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f32_f16, "rope_neox_f32_f16", rope_neox_f32_f16_len, rope_neox_f32_f16_data, "main", 5, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  3428. }
  3429. for (uint32_t i = 0; i < num_argsort_pipelines; ++i) {
  3430. uint32_t BLOCK_SIZE = 1u << std::min(i, device->max_workgroup_size_log2);
  3431. if (i <= device->max_workgroup_size_log2 &&
  3432. 2 * sizeof(int) * BLOCK_SIZE <= device->properties.limits.maxComputeSharedMemorySize) {
  3433. const uint32_t NCOLS_PADDED_LOG2 = i;
  3434. ggml_vk_create_pipeline2(device, device->pipeline_argsort_f32[i], "argsort_f32_"+std::to_string(i), argsort_f32_len, argsort_f32_data, "main", 3, sizeof(vk_op_argsort_push_constants), {BLOCK_SIZE, 1, 1}, {BLOCK_SIZE, NCOLS_PADDED_LOG2}, 1, true);
  3435. }
  3436. const uint32_t WG_UNROLL_FACTOR = BLOCK_SIZE > 1 ? 2 : 1;
  3437. BLOCK_SIZE /= WG_UNROLL_FACTOR;
  3438. ggml_vk_create_pipeline2(device, device->pipeline_argsort_large_f32[i], "argsort_large_f32_"+std::to_string(i), argsort_large_f32_len, argsort_large_f32_data, "main", 3, sizeof(vk_op_argsort_push_constants), {BLOCK_SIZE * WG_UNROLL_FACTOR, 1, 1}, {BLOCK_SIZE, WG_UNROLL_FACTOR}, 1, true);
  3439. }
  3440. for (uint32_t i = 0; i < num_topk_pipelines; ++i) {
  3441. const uint32_t BLOCK_SIZE = 1u << i;
  3442. const uint32_t NCOLS_PADDED_LOG2 = i;
  3443. if (i <= device->max_workgroup_size_log2) {
  3444. uint32_t nary_shmem = 2 * sizeof(int) * BLOCK_SIZE +
  3445. sizeof(int) * device->subgroup_size +
  3446. 2 * sizeof(int) +
  3447. (BLOCK_SIZE / device->subgroup_size) * sizeof(int);
  3448. if (device->subgroup_arithmetic && device->subgroup_require_full_support && device->subgroup_shuffle && device->subgroup_ballot &&
  3449. nary_shmem <= device->properties.limits.maxComputeSharedMemorySize) {
  3450. ggml_vk_create_pipeline2(device, device->pipeline_topk_f32[i], "topk_f32_"+std::to_string(i), topk_nary_search_f32_len, topk_nary_search_f32_data, "main", 2, sizeof(vk_op_topk_push_constants), {BLOCK_SIZE, 1, 1}, {BLOCK_SIZE, device->subgroup_size, device->subgroup_size_log2}, 1, true, true, device->subgroup_size);
  3451. } else if (2 * sizeof(int) * BLOCK_SIZE <= device->properties.limits.maxComputeSharedMemorySize) {
  3452. ggml_vk_create_pipeline2(device, device->pipeline_topk_f32[i], "topk_f32_"+std::to_string(i), topk_argsort_f32_len, topk_argsort_f32_data, "main", 2, sizeof(vk_op_topk_push_constants), {BLOCK_SIZE, 1, 1}, {BLOCK_SIZE, NCOLS_PADDED_LOG2}, 1, true);
  3453. }
  3454. }
  3455. }
  3456. ggml_vk_create_pipeline(device, device->pipeline_argmax_f32, "argmax_f32", argmax_f32_len, argmax_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  3457. ggml_vk_create_pipeline(device, device->pipeline_sum_rows_f32, "sum_rows_f32", sum_rows_f32_len, sum_rows_f32_data, "main", 2, sizeof(vk_op_sum_rows_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  3458. ggml_vk_create_pipeline(device, device->pipeline_cumsum_f32, "cumsum_f32", cumsum_f32_len, cumsum_f32_data, "main", 2, sizeof(vk_op_sum_rows_push_constants), {1, 1, 1}, { 128, device->subgroup_size }, 1, true, true, device->subgroup_size);
  3459. ggml_vk_create_pipeline(device, device->pipeline_count_equal_i32, "count_equal_i32", count_equal_i32_len, count_equal_i32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, { device->subgroup_size }, 1);
  3460. for (auto &s : device->pipeline_solve_tri_f32) {
  3461. const vk_solve_tri_pipeline_state &state = s.first;
  3462. ggml_vk_create_pipeline(
  3463. device, s.second, "solve_tri_f32",
  3464. solve_tri_f32_len, solve_tri_f32_data, "main", 3,
  3465. sizeof(vk_op_binary_push_constants), {1, 1, 1}, { 0, state.N, state.K }, 1, true);
  3466. }
  3467. #define IM2COL(bda) \
  3468. ggml_vk_create_pipeline(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32 ## bda ## _len, im2col_f32 ## bda ## _data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); \
  3469. ggml_vk_create_pipeline(device, device->pipeline_im2col_3d_f32, "im2col_3d_f32", im2col_3d_f32 ## bda ## _len, im2col_3d_f32 ## bda ## _data, "main", 2, sizeof(vk_op_im2col_3d_push_constants), {512, 1, 1}, { 512 }, 1, true); \
  3470. if (device->float_controls_rte_fp16) { \
  3471. ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_rte ## bda ## _len, im2col_f32_f16_rte ## bda ## _data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); \
  3472. ggml_vk_create_pipeline(device, device->pipeline_im2col_3d_f32_f16, "im2col_3d_f32_f16", im2col_3d_f32_f16_rte ## bda ## _len, im2col_3d_f32_f16_rte ## bda ## _data, "main", 2, sizeof(vk_op_im2col_3d_push_constants), {512, 1, 1}, { 512 }, 1, true); \
  3473. } else { \
  3474. ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16 ## bda ## _len, im2col_f32_f16 ## bda ## _data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); \
  3475. ggml_vk_create_pipeline(device, device->pipeline_im2col_3d_f32_f16, "im2col_3d_f32_f16", im2col_3d_f32_f16 ## bda ## _len, im2col_3d_f32_f16 ## bda ## _data, "main", 2, sizeof(vk_op_im2col_3d_push_constants), {512, 1, 1}, { 512 }, 1, true); \
  3476. }
  3477. if (device->shader_int64 && device->buffer_device_address) {
  3478. IM2COL(_bda)
  3479. } else {
  3480. IM2COL()
  3481. }
  3482. ggml_vk_create_pipeline(device, device->pipeline_timestep_embedding_f32, "timestep_embedding_f32", timestep_embedding_f32_len, timestep_embedding_f32_data, "main", 2, sizeof(vk_op_timestep_embedding_push_constants), {256, 1, 1}, {}, 1);
  3483. ggml_vk_create_pipeline(device, device->pipeline_conv_transpose_1d_f32, "conv_transpose_1d_f32", conv_transpose_1d_f32_len, conv_transpose_1d_f32_data, "main", 3, sizeof(vk_op_conv_transpose_1d_push_constants), {1, 1, 1}, {}, 1);
  3484. ggml_vk_create_pipeline(device, device->pipeline_pool2d_f32, "pool2d_f32", pool2d_f32_len, pool2d_f32_data, "main", 2, sizeof(vk_op_pool2d_push_constants), {512, 1, 1}, {}, 1);
  3485. ggml_vk_create_pipeline(device, device->pipeline_rwkv_wkv6_f32, "rwkv_wkv6_f32", rwkv_wkv6_f32_len, rwkv_wkv6_f32_data, "main", 7, sizeof(vk_op_rwkv_wkv6_push_constants), {1, 1, 1}, {device->subgroup_size}, 1);
  3486. ggml_vk_create_pipeline(device, device->pipeline_rwkv_wkv7_f32, "rwkv_wkv7_f32", rwkv_wkv7_f32_len, rwkv_wkv7_f32_data, "main", 8, sizeof(vk_op_rwkv_wkv7_push_constants), {1, 1, 1}, {device->subgroup_size}, 1);
  3487. if (device->subgroup_arithmetic && device->subgroup_require_full_support) {
  3488. ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d128, "ssm_scan_128_f32", ssm_scan_subgroup_f32_len, ssm_scan_subgroup_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {128, device->subgroup_size, 16}, 1, true, true);
  3489. ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d256, "ssm_scan_256_f32", ssm_scan_subgroup_f32_len, ssm_scan_subgroup_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {256, device->subgroup_size, 16}, 1, true, true);
  3490. } else {
  3491. ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d128, "ssm_scan_128_f32", ssm_scan_f32_len, ssm_scan_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {128, device->subgroup_size, 16}, 1, true, true);
  3492. ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d256, "ssm_scan_256_f32", ssm_scan_f32_len, ssm_scan_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {256, device->subgroup_size, 16}, 1, true, true);
  3493. }
  3494. ggml_vk_create_pipeline(device, device->pipeline_ssm_conv_f32, "ssm_conv_f32", ssm_conv_f32_len, ssm_conv_f32_data, "main", 3, sizeof(vk_op_ssm_conv_push_constants), {32, 1, 1}, {32}, 1);
  3495. ggml_vk_create_pipeline(device, device->pipeline_opt_step_adamw_f32, "opt_step_adamw_f32", opt_step_adamw_f32_len, opt_step_adamw_f32_data, "main", 5, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  3496. ggml_vk_create_pipeline(device, device->pipeline_opt_step_sgd_f32, "opt_step_sgd_f32", opt_step_sgd_f32_len, opt_step_sgd_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  3497. // conv2d, conv_transpose_2d
  3498. for (uint32_t s = 0; s < CONV_SHAPE_COUNT; ++s) {
  3499. uint32_t conv2d_WG_SIZE = 256;
  3500. uint32_t conv2d_BS_K = 128;
  3501. uint32_t conv2d_BS_CRS = 16;
  3502. uint32_t use_collectives = 0; // Enables subgroup ops for preventing the re-calculation of indices.
  3503. uint32_t conv2d_BS_NPQ = 128;
  3504. uint32_t conv2d_TS_K = 8;
  3505. uint32_t conv2d_SHMEM_PAD = 4;
  3506. bool conv2d_UNROLL = true;
  3507. #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  3508. if (device->coopmat2) {
  3509. conv2d_SHMEM_PAD = 8; // 8 float16_t
  3510. }
  3511. #endif
  3512. if (device->vendor_id == VK_VENDOR_ID_INTEL) {
  3513. conv2d_SHMEM_PAD = 0;
  3514. conv2d_UNROLL = false;
  3515. } else if (device->vendor_id == VK_VENDOR_ID_AMD) {
  3516. conv2d_SHMEM_PAD = device->architecture == vk_device_architecture::AMD_GCN ? 1 : 4;
  3517. }
  3518. switch (s) {
  3519. default:
  3520. case CONV_SHAPE_128x128:
  3521. conv2d_BS_K = conv_shapes_wg_denoms[CONV_SHAPE_128x128][0];
  3522. conv2d_BS_NPQ = conv_shapes_wg_denoms[CONV_SHAPE_128x128][1];
  3523. conv2d_BS_CRS = 16;
  3524. if (device->vendor_id == VK_VENDOR_ID_AMD && device->architecture != vk_device_architecture::AMD_GCN) {
  3525. conv2d_UNROLL = false;
  3526. }
  3527. break;
  3528. case CONV_SHAPE_64x32:
  3529. conv2d_BS_K = conv_shapes_wg_denoms[CONV_SHAPE_64x32][0];
  3530. conv2d_BS_NPQ = conv_shapes_wg_denoms[CONV_SHAPE_64x32][1];
  3531. conv2d_BS_CRS = 32;
  3532. conv2d_TS_K = 4;
  3533. break;
  3534. case CONV_SHAPE_32x256:
  3535. conv2d_BS_K = conv_shapes_wg_denoms[CONV_SHAPE_32x256][0];
  3536. conv2d_BS_NPQ = conv_shapes_wg_denoms[CONV_SHAPE_32x256][1];
  3537. conv2d_BS_CRS = 16;
  3538. break;
  3539. }
  3540. // Use collectives on pre-Turing NVIDIA GPUs and GCN AMD cards, which had slower integer math.
  3541. bool allow_collectives_nv = device->vendor_id != VK_VENDOR_ID_NVIDIA ||
  3542. device->architecture == vk_device_architecture::NVIDIA_PRE_TURING;
  3543. bool allow_collectives_amd = device->vendor_id != VK_VENDOR_ID_AMD ||
  3544. device->architecture == vk_device_architecture::AMD_GCN;
  3545. if (device->subgroup_shuffle &&
  3546. device->vendor_id != VK_VENDOR_ID_INTEL && // Do not enable collectives on Intel, see PR 14316.
  3547. allow_collectives_nv &&
  3548. allow_collectives_amd) {
  3549. use_collectives = 1;
  3550. conv2d_BS_CRS = std::min(
  3551. device->subgroup_size,
  3552. conv2d_BS_CRS); // CRS block size should be capped at subgroup size for correctness when shuffle is used.
  3553. }
  3554. uint32_t conv2d_shmem_req =
  3555. (conv2d_BS_K * (conv2d_BS_CRS + conv2d_SHMEM_PAD) + conv2d_BS_CRS * (conv2d_BS_NPQ + conv2d_SHMEM_PAD)) * sizeof(float);
  3556. if (device->properties.limits.maxComputeSharedMemorySize < conv2d_shmem_req) {
  3557. conv2d_BS_CRS = 8;
  3558. if (use_collectives) {
  3559. conv2d_BS_CRS = std::min(device->subgroup_size, conv2d_BS_CRS);
  3560. }
  3561. }
  3562. std::array<uint32_t, 3> wg_denoms = { conv2d_BS_K, conv2d_BS_NPQ, 1 };
  3563. std::vector<uint32_t> spec_constants = { conv2d_WG_SIZE, conv2d_BS_K, conv2d_BS_CRS, conv2d_BS_NPQ, conv2d_TS_K, use_collectives, conv2d_SHMEM_PAD };
  3564. #define CREATE_CONV(name, type_suffix, spv_suffix) \
  3565. for (auto &c : device->pipeline_##name##type_suffix[s]) { \
  3566. const vk_conv2d_pipeline_state &state = c.first; \
  3567. std::vector<uint32_t> spec_constants_cpy = spec_constants; \
  3568. spec_constants_cpy.push_back(state.s0); \
  3569. spec_constants_cpy.push_back(state.s1); \
  3570. spec_constants_cpy.push_back(state.p0); \
  3571. spec_constants_cpy.push_back(state.p1); \
  3572. spec_constants_cpy.push_back(state.d0); \
  3573. spec_constants_cpy.push_back(state.d1); \
  3574. spec_constants_cpy.push_back(state.KW); \
  3575. spec_constants_cpy.push_back(state.KH); \
  3576. ggml_vk_create_pipeline( \
  3577. device, c.second, #name #type_suffix, \
  3578. name##type_suffix##spv_suffix##_len, name##type_suffix##spv_suffix##_data, "main", 3, \
  3579. sizeof(vk_op_##name##_push_constants), wg_denoms, spec_constants_cpy, 1, true, use_collectives); \
  3580. }
  3581. #define CREATE_CONVS(spv_suffix) \
  3582. CREATE_CONV(conv2d, _f32, spv_suffix) \
  3583. CREATE_CONV(conv2d, _f16_f32, spv_suffix) \
  3584. if (device->properties.limits.maxPushConstantsSize >= sizeof(vk_op_conv_transpose_2d_push_constants)) { \
  3585. CREATE_CONV(conv_transpose_2d, _f32, spv_suffix) \
  3586. CREATE_CONV(conv_transpose_2d, _f16_f32, spv_suffix) \
  3587. }
  3588. #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  3589. if (device->coopmat2) {
  3590. CREATE_CONVS(_cm2)
  3591. } else
  3592. #endif
  3593. if (conv2d_UNROLL) {
  3594. CREATE_CONVS(_unroll)
  3595. } else {
  3596. CREATE_CONVS( )
  3597. }
  3598. #undef CREATE_CONV
  3599. #undef CREATE_CONVS
  3600. }
  3601. ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_whcn_f32, "conv2d_dw_whcn_f32", conv2d_dw_whcn_f32_len, conv2d_dw_whcn_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1);
  3602. ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_cwhn_f32, "conv2d_dw_cwhn_f32", conv2d_dw_cwhn_f32_len, conv2d_dw_cwhn_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1);
  3603. ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_whcn_f16_f32, "conv2d_dw_whcn_f16_f32", conv2d_dw_whcn_f16_f32_len, conv2d_dw_whcn_f16_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1);
  3604. ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_cwhn_f16_f32, "conv2d_dw_cwhn_f16_f32", conv2d_dw_cwhn_f16_f32_len, conv2d_dw_cwhn_f16_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1);
  3605. for (uint32_t i = 0; i < num_topk_moe_pipelines; ++i) {
  3606. ggml_vk_create_pipeline2(device, device->pipeline_topk_moe[i][TOPK_MOE_EARLY_SOFTMAX], "topk_moe_f32_early_softmax_"+std::to_string(i), topk_moe_f32_len, topk_moe_f32_data, "main", 3, sizeof(vk_op_topk_moe_push_constants), {1, 1, 1}, {device->subgroup_size, 1u<<i, 0, 0}, 1, true, true);
  3607. ggml_vk_create_pipeline2(device, device->pipeline_topk_moe[i][TOPK_MOE_EARLY_SOFTMAX_NORM], "topk_moe_f32_early_softmax_norm"+std::to_string(i), topk_moe_f32_len, topk_moe_f32_data, "main", 3, sizeof(vk_op_topk_moe_push_constants), {1, 1, 1}, {device->subgroup_size, 1u<<i, 1, 0}, 1, true, true);
  3608. ggml_vk_create_pipeline2(device, device->pipeline_topk_moe[i][TOPK_MOE_LATE_SOFTMAX], "topk_moe_f32_late_softmax"+std::to_string(i), topk_moe_f32_len, topk_moe_f32_data, "main", 3, sizeof(vk_op_topk_moe_push_constants), {1, 1, 1}, {device->subgroup_size, 1u<<i, 0, 1}, 1, true, true);
  3609. }
  3610. for (auto &c : compiles) {
  3611. c.wait();
  3612. }
  3613. }
  3614. static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props, vk_device_architecture arch);
  3615. static vk_device ggml_vk_get_device(size_t idx) {
  3616. VK_LOG_DEBUG("ggml_vk_get_device(" << idx << ")");
  3617. if (vk_instance.devices[idx] == nullptr) {
  3618. VK_LOG_DEBUG("Initializing new vk_device");
  3619. vk_device device = std::make_shared<vk_device_struct>();
  3620. vk_instance.devices[idx] = device;
  3621. #ifdef GGML_VULKAN_MEMORY_DEBUG
  3622. device->memory_logger = std::unique_ptr<vk_memory_logger>(new vk_memory_logger());
  3623. #endif
  3624. if (vk_perf_logger_enabled) {
  3625. device->perf_logger = std::unique_ptr<vk_perf_logger>(new vk_perf_logger());
  3626. }
  3627. size_t dev_num = vk_instance.device_indices[idx];
  3628. std::vector<vk::PhysicalDevice> physical_devices = vk_instance.instance.enumeratePhysicalDevices();
  3629. if (dev_num >= physical_devices.size()) {
  3630. std::cerr << "ggml_vulkan: Device with index " << dev_num << " does not exist." << std::endl;
  3631. throw std::runtime_error("Device not found");
  3632. }
  3633. device->physical_device = physical_devices[dev_num];
  3634. const std::vector<vk::ExtensionProperties> ext_props = device->physical_device.enumerateDeviceExtensionProperties();
  3635. device->architecture = get_device_architecture(device->physical_device);
  3636. const char* GGML_VK_PREFER_HOST_MEMORY = getenv("GGML_VK_PREFER_HOST_MEMORY");
  3637. device->prefer_host_memory = GGML_VK_PREFER_HOST_MEMORY != nullptr;
  3638. const char* GGML_VK_DISABLE_HOST_VISIBLE_VIDMEM = getenv("GGML_VK_DISABLE_HOST_VISIBLE_VIDMEM");
  3639. device->disable_host_visible_vidmem = GGML_VK_DISABLE_HOST_VISIBLE_VIDMEM != nullptr;
  3640. const char* GGML_VK_ALLOW_SYSMEM_FALLBACK = getenv("GGML_VK_ALLOW_SYSMEM_FALLBACK");
  3641. device->allow_sysmem_fallback = GGML_VK_ALLOW_SYSMEM_FALLBACK != nullptr;
  3642. const char* GGML_VK_DISABLE_GRAPH_OPTIMIZE = getenv("GGML_VK_DISABLE_GRAPH_OPTIMIZE");
  3643. device->disable_graph_optimize = GGML_VK_DISABLE_GRAPH_OPTIMIZE != nullptr;
  3644. bool fp16_storage = false;
  3645. bool fp16_compute = false;
  3646. bool maintenance4_support = false;
  3647. bool sm_builtins = false;
  3648. bool amd_shader_core_properties2 = false;
  3649. bool pipeline_robustness = false;
  3650. bool coopmat2_support = false;
  3651. bool pipeline_executable_properties_support = false;
  3652. device->coopmat_support = false;
  3653. device->integer_dot_product = false;
  3654. bool bfloat16_support = false;
  3655. for (const auto& properties : ext_props) {
  3656. if (strcmp("VK_KHR_maintenance4", properties.extensionName) == 0) {
  3657. maintenance4_support = true;
  3658. } else if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) {
  3659. fp16_storage = true;
  3660. } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
  3661. fp16_compute = true;
  3662. } else if (strcmp("VK_NV_shader_sm_builtins", properties.extensionName) == 0) {
  3663. sm_builtins = true;
  3664. } else if (strcmp("VK_AMD_shader_core_properties2", properties.extensionName) == 0) {
  3665. amd_shader_core_properties2 = true;
  3666. } else if (strcmp("VK_EXT_pipeline_robustness", properties.extensionName) == 0) {
  3667. pipeline_robustness = true;
  3668. } else if (strcmp("VK_EXT_subgroup_size_control", properties.extensionName) == 0) {
  3669. device->subgroup_size_control = true;
  3670. #if defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
  3671. } else if (strcmp("VK_KHR_cooperative_matrix", properties.extensionName) == 0 &&
  3672. !getenv("GGML_VK_DISABLE_COOPMAT")) {
  3673. device->coopmat_support = true;
  3674. device->coopmat_m = 0;
  3675. device->coopmat_n = 0;
  3676. device->coopmat_k = 0;
  3677. #endif
  3678. #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  3679. } else if (strcmp("VK_NV_cooperative_matrix2", properties.extensionName) == 0 &&
  3680. !getenv("GGML_VK_DISABLE_COOPMAT2")) {
  3681. coopmat2_support = true;
  3682. #endif
  3683. #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
  3684. } else if (strcmp("VK_KHR_shader_integer_dot_product", properties.extensionName) == 0 &&
  3685. !getenv("GGML_VK_DISABLE_INTEGER_DOT_PRODUCT")) {
  3686. device->integer_dot_product = true;
  3687. #endif
  3688. #if defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  3689. } else if (strcmp("VK_KHR_shader_bfloat16", properties.extensionName) == 0 &&
  3690. !getenv("GGML_VK_DISABLE_BFLOAT16")) {
  3691. bfloat16_support = true;
  3692. #endif
  3693. } else if (strcmp("VK_KHR_pipeline_executable_properties", properties.extensionName) == 0) {
  3694. pipeline_executable_properties_support = true;
  3695. }
  3696. }
  3697. vk::PhysicalDeviceProperties2 props2;
  3698. vk::PhysicalDeviceMaintenance3Properties props3;
  3699. vk::PhysicalDeviceMaintenance4Properties props4;
  3700. vk::PhysicalDeviceSubgroupProperties subgroup_props;
  3701. vk::PhysicalDeviceDriverProperties driver_props;
  3702. vk::PhysicalDeviceShaderSMBuiltinsPropertiesNV sm_props;
  3703. vk::PhysicalDeviceShaderCoreProperties2AMD amd_shader_core_properties2_props;
  3704. vk::PhysicalDeviceVulkan11Properties vk11_props;
  3705. vk::PhysicalDeviceVulkan12Properties vk12_props;
  3706. vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT subgroup_size_control_props;
  3707. vk::PhysicalDeviceShaderIntegerDotProductPropertiesKHR shader_integer_dot_product_props;
  3708. props2.pNext = &props3;
  3709. props3.pNext = &subgroup_props;
  3710. subgroup_props.pNext = &driver_props;
  3711. driver_props.pNext = &vk11_props;
  3712. vk11_props.pNext = &vk12_props;
  3713. VkBaseOutStructure * last_struct = (VkBaseOutStructure *)&vk12_props;
  3714. if (maintenance4_support) {
  3715. last_struct->pNext = (VkBaseOutStructure *)&props4;
  3716. last_struct = (VkBaseOutStructure *)&props4;
  3717. }
  3718. if (sm_builtins) {
  3719. last_struct->pNext = (VkBaseOutStructure *)&sm_props;
  3720. last_struct = (VkBaseOutStructure *)&sm_props;
  3721. }
  3722. if (amd_shader_core_properties2) {
  3723. last_struct->pNext = (VkBaseOutStructure *)&amd_shader_core_properties2_props;
  3724. last_struct = (VkBaseOutStructure *)&amd_shader_core_properties2_props;
  3725. }
  3726. if (device->subgroup_size_control) {
  3727. last_struct->pNext = (VkBaseOutStructure *)&subgroup_size_control_props;
  3728. last_struct = (VkBaseOutStructure *)&subgroup_size_control_props;
  3729. }
  3730. #if defined(VK_NV_cooperative_matrix2)
  3731. vk::PhysicalDeviceCooperativeMatrix2PropertiesNV coopmat2_props;
  3732. if (coopmat2_support) {
  3733. last_struct->pNext = (VkBaseOutStructure *)&coopmat2_props;
  3734. last_struct = (VkBaseOutStructure *)&coopmat2_props;
  3735. }
  3736. #endif
  3737. if (device->integer_dot_product) {
  3738. last_struct->pNext = (VkBaseOutStructure *)&shader_integer_dot_product_props;
  3739. last_struct = (VkBaseOutStructure *)&shader_integer_dot_product_props;
  3740. }
  3741. device->physical_device.getProperties2(&props2);
  3742. device->properties = props2.properties;
  3743. device->vendor_id = device->properties.vendorID;
  3744. device->driver_id = driver_props.driverID;
  3745. // Implementing the async backend interfaces seems broken on older Intel HW,
  3746. // see https://github.com/ggml-org/llama.cpp/issues/17302.
  3747. device->support_async = (device->vendor_id != VK_VENDOR_ID_INTEL ||
  3748. std::string(device->properties.deviceName.data()).find("(DG1)") == std::string::npos) &&
  3749. getenv("GGML_VK_DISABLE_ASYNC") == nullptr;
  3750. if (!device->support_async) {
  3751. GGML_LOG_DEBUG("ggml_vulkan: WARNING: Async execution disabled on certain Intel devices.\n");
  3752. }
  3753. const char* GGML_VK_FORCE_MAX_ALLOCATION_SIZE = getenv("GGML_VK_FORCE_MAX_ALLOCATION_SIZE");
  3754. if (GGML_VK_FORCE_MAX_ALLOCATION_SIZE != nullptr) {
  3755. device->max_memory_allocation_size = std::stoull(GGML_VK_FORCE_MAX_ALLOCATION_SIZE);
  3756. } else if (maintenance4_support) {
  3757. device->max_memory_allocation_size = std::min(props3.maxMemoryAllocationSize, props4.maxBufferSize);
  3758. } else {
  3759. device->max_memory_allocation_size = props3.maxMemoryAllocationSize;
  3760. }
  3761. const char* GGML_VK_FORCE_MAX_BUFFER_SIZE = getenv("GGML_VK_FORCE_MAX_BUFFER_SIZE");
  3762. if (GGML_VK_FORCE_MAX_BUFFER_SIZE != nullptr) {
  3763. device->max_buffer_size = std::stoull(GGML_VK_FORCE_MAX_BUFFER_SIZE);
  3764. } else if (maintenance4_support) {
  3765. device->max_buffer_size = props4.maxBufferSize;
  3766. } else {
  3767. device->max_buffer_size = device->max_memory_allocation_size;
  3768. }
  3769. const char* GGML_VK_SUBALLOCATION_BLOCK_SIZE = getenv("GGML_VK_SUBALLOCATION_BLOCK_SIZE");
  3770. if (GGML_VK_SUBALLOCATION_BLOCK_SIZE != nullptr) {
  3771. device->suballocation_block_size = std::stoull(GGML_VK_SUBALLOCATION_BLOCK_SIZE);
  3772. } else {
  3773. // Limit batching of allocations to 1GB by default to avoid fragmentation issues
  3774. device->suballocation_block_size = 1024*1024*1024;
  3775. }
  3776. device->suballocation_block_size = std::min(device->suballocation_block_size, device->max_memory_allocation_size);
  3777. device->subgroup_size = subgroup_props.subgroupSize;
  3778. device->subgroup_size_log2 = uint32_t(log2f(float(device->subgroup_size)));
  3779. device->uma = device->properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
  3780. if (sm_builtins) {
  3781. device->shader_core_count = sm_props.shaderSMCount;
  3782. } else if (amd_shader_core_properties2) {
  3783. device->shader_core_count = amd_shader_core_properties2_props.activeComputeUnitCount;
  3784. } else {
  3785. device->shader_core_count = 0;
  3786. }
  3787. device->float_controls_rte_fp16 = vk12_props.shaderRoundingModeRTEFloat16;
  3788. device->subgroup_arithmetic = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) &&
  3789. (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eArithmetic);
  3790. #ifdef __APPLE__
  3791. // Workaround for subgroup arithmetic failing on MoltenVK with AMD GPUs (issue 15846)
  3792. if (device->vendor_id == VK_VENDOR_ID_AMD) {
  3793. device->subgroup_arithmetic = false;
  3794. }
  3795. #endif
  3796. device->subgroup_shuffle = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) &&
  3797. (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eShuffle);
  3798. device->subgroup_clustered = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) &&
  3799. (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eClustered);
  3800. device->subgroup_ballot = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) &&
  3801. (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eBallot);
  3802. device->subgroup_vote = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) &&
  3803. (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eVote);
  3804. const bool force_disable_f16 = getenv("GGML_VK_DISABLE_F16") != nullptr;
  3805. device->fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
  3806. if (!ggml_vk_khr_cooperative_matrix_support(device->properties, driver_props, device->architecture)) {
  3807. device->coopmat_support = false;
  3808. }
  3809. device->integer_dot_product = device->integer_dot_product && shader_integer_dot_product_props.integerDotProduct4x8BitPackedSignedAccelerated;
  3810. device->max_workgroup_size_log2 = uint32_t(log2f(float(device->properties.limits.maxComputeWorkGroupInvocations)));
  3811. std::vector<vk::QueueFamilyProperties> queue_family_props = device->physical_device.getQueueFamilyProperties();
  3812. // Try to find a non-graphics compute queue and transfer-focused queues
  3813. const uint32_t compute_queue_family_index = ggml_vk_find_queue_family_index(queue_family_props, vk::QueueFlagBits::eCompute, vk::QueueFlagBits::eGraphics, -1, 1);
  3814. const uint32_t transfer_queue_family_index = ggml_vk_find_queue_family_index(queue_family_props, vk::QueueFlagBits::eTransfer, vk::QueueFlagBits::eCompute | vk::QueueFlagBits::eGraphics, compute_queue_family_index, 1);
  3815. const float priorities[] = { 1.0f, 1.0f };
  3816. device->single_queue = compute_queue_family_index == transfer_queue_family_index && queue_family_props[compute_queue_family_index].queueCount == 1;
  3817. std::vector<vk::DeviceQueueCreateInfo> device_queue_create_infos;
  3818. if (compute_queue_family_index != transfer_queue_family_index) {
  3819. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, priorities});
  3820. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), transfer_queue_family_index, 1, priorities + 1});
  3821. } else if(!device->single_queue) {
  3822. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 2, priorities});
  3823. } else {
  3824. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, priorities});
  3825. }
  3826. vk::DeviceCreateInfo device_create_info;
  3827. std::vector<const char *> device_extensions;
  3828. vk::PhysicalDeviceFeatures device_features = device->physical_device.getFeatures();
  3829. VkPhysicalDeviceFeatures2 device_features2;
  3830. device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
  3831. device_features2.pNext = nullptr;
  3832. device_features2.features = (VkPhysicalDeviceFeatures)device_features;
  3833. VkPhysicalDeviceVulkan11Features vk11_features;
  3834. vk11_features.pNext = nullptr;
  3835. vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
  3836. device_features2.pNext = &vk11_features;
  3837. VkPhysicalDeviceVulkan12Features vk12_features;
  3838. vk12_features.pNext = nullptr;
  3839. vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
  3840. vk11_features.pNext = &vk12_features;
  3841. last_struct = (VkBaseOutStructure *)&vk12_features;
  3842. VkPhysicalDevicePipelineRobustnessFeaturesEXT pl_robustness_features;
  3843. pl_robustness_features.pNext = nullptr;
  3844. pl_robustness_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT;
  3845. pl_robustness_features.pipelineRobustness = VK_FALSE;
  3846. if (pipeline_robustness) {
  3847. last_struct->pNext = (VkBaseOutStructure *)&pl_robustness_features;
  3848. last_struct = (VkBaseOutStructure *)&pl_robustness_features;
  3849. device_extensions.push_back("VK_EXT_pipeline_robustness");
  3850. }
  3851. VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroup_size_control_features;
  3852. subgroup_size_control_features.pNext = nullptr;
  3853. subgroup_size_control_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
  3854. subgroup_size_control_features.computeFullSubgroups = false;
  3855. subgroup_size_control_features.subgroupSizeControl = false;
  3856. if (device->subgroup_size_control) {
  3857. last_struct->pNext = (VkBaseOutStructure *)&subgroup_size_control_features;
  3858. last_struct = (VkBaseOutStructure *)&subgroup_size_control_features;
  3859. }
  3860. #if defined(VK_KHR_cooperative_matrix)
  3861. VkPhysicalDeviceCooperativeMatrixFeaturesKHR coopmat_features;
  3862. coopmat_features.pNext = nullptr;
  3863. coopmat_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR;
  3864. coopmat_features.cooperativeMatrix = VK_FALSE;
  3865. if (device->coopmat_support) {
  3866. last_struct->pNext = (VkBaseOutStructure *)&coopmat_features;
  3867. last_struct = (VkBaseOutStructure *)&coopmat_features;
  3868. }
  3869. #endif
  3870. #if defined(VK_NV_cooperative_matrix2)
  3871. VkPhysicalDeviceCooperativeMatrix2FeaturesNV coopmat2_features {};
  3872. coopmat2_features.pNext = nullptr;
  3873. coopmat2_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_2_FEATURES_NV;
  3874. if (coopmat2_support) {
  3875. last_struct->pNext = (VkBaseOutStructure *)&coopmat2_features;
  3876. last_struct = (VkBaseOutStructure *)&coopmat2_features;
  3877. device_extensions.push_back("VK_NV_cooperative_matrix2");
  3878. }
  3879. #endif
  3880. #if defined(VK_KHR_shader_bfloat16)
  3881. VkPhysicalDeviceShaderBfloat16FeaturesKHR bfloat16_features {};
  3882. bfloat16_features.pNext = nullptr;
  3883. bfloat16_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_BFLOAT16_FEATURES_KHR;
  3884. if (bfloat16_support) {
  3885. last_struct->pNext = (VkBaseOutStructure *)&bfloat16_features;
  3886. last_struct = (VkBaseOutStructure *)&bfloat16_features;
  3887. device_extensions.push_back("VK_KHR_shader_bfloat16");
  3888. }
  3889. #endif
  3890. VkPhysicalDeviceMaintenance4Features maint4_features {};
  3891. maint4_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES;
  3892. if (maintenance4_support) {
  3893. last_struct->pNext = (VkBaseOutStructure *)&maint4_features;
  3894. last_struct = (VkBaseOutStructure *)&maint4_features;
  3895. device_extensions.push_back("VK_KHR_maintenance4");
  3896. }
  3897. VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR shader_integer_dot_product_features {};
  3898. shader_integer_dot_product_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES_KHR;
  3899. if (device->integer_dot_product) {
  3900. last_struct->pNext = (VkBaseOutStructure *)&shader_integer_dot_product_features;
  3901. last_struct = (VkBaseOutStructure *)&shader_integer_dot_product_features;
  3902. device_extensions.push_back("VK_KHR_shader_integer_dot_product");
  3903. }
  3904. VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR pep_features {};
  3905. pep_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR;
  3906. if (pipeline_executable_properties_support) {
  3907. last_struct->pNext = (VkBaseOutStructure *)&pep_features;
  3908. last_struct = (VkBaseOutStructure *)&pep_features;
  3909. device_extensions.push_back("VK_KHR_pipeline_executable_properties");
  3910. }
  3911. vkGetPhysicalDeviceFeatures2(device->physical_device, &device_features2);
  3912. device->pipeline_executable_properties_support = pipeline_executable_properties_support;
  3913. device->fp16 = device->fp16 && vk12_features.shaderFloat16;
  3914. #if defined(VK_KHR_shader_bfloat16)
  3915. device->bf16 = bfloat16_support && bfloat16_features.shaderBFloat16Type;
  3916. #else
  3917. device->bf16 = false;
  3918. #endif
  3919. device->pipeline_robustness = pl_robustness_features.pipelineRobustness;
  3920. device->multi_add = vk12_props.shaderRoundingModeRTEFloat16 &&
  3921. device->properties.limits.maxPushConstantsSize >= sizeof(vk_op_multi_add_push_constants) &&
  3922. getenv("GGML_VK_DISABLE_MULTI_ADD") == nullptr;
  3923. device->shader_int64 = device_features2.features.shaderInt64;
  3924. device->buffer_device_address = vk12_features.bufferDeviceAddress;
  3925. device->vulkan_memory_model = vk12_features.vulkanMemoryModel;
  3926. if (device->subgroup_size_control) {
  3927. device->subgroup_min_size = subgroup_size_control_props.minSubgroupSize;
  3928. device->subgroup_max_size = subgroup_size_control_props.maxSubgroupSize;
  3929. device_extensions.push_back("VK_EXT_subgroup_size_control");
  3930. }
  3931. device->subgroup_size_control = device->subgroup_size_control &&
  3932. (subgroup_size_control_props.requiredSubgroupSizeStages & vk::ShaderStageFlagBits::eCompute) &&
  3933. subgroup_size_control_features.subgroupSizeControl;
  3934. device->subgroup_require_full_support = subgroup_size_control_features.computeFullSubgroups;
  3935. #if defined(VK_KHR_cooperative_matrix)
  3936. device->coopmat_support = device->coopmat_support && coopmat_features.cooperativeMatrix;
  3937. // coopmat1 fa shader currently assumes 32 invocations per subgroup
  3938. device->coopmat1_fa_support = device->coopmat_support && device->subgroup_require_full_support &&
  3939. device->subgroup_size_control && device->subgroup_min_size <= 32 &&
  3940. device->subgroup_max_size >= 32;
  3941. #endif
  3942. if (coopmat2_support) {
  3943. #if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  3944. if (coopmat2_features.cooperativeMatrixWorkgroupScope &&
  3945. coopmat2_features.cooperativeMatrixFlexibleDimensions &&
  3946. coopmat2_features.cooperativeMatrixReductions &&
  3947. coopmat2_features.cooperativeMatrixConversions &&
  3948. coopmat2_features.cooperativeMatrixPerElementOperations &&
  3949. coopmat2_features.cooperativeMatrixTensorAddressing &&
  3950. coopmat2_features.cooperativeMatrixBlockLoads &&
  3951. vk12_features.bufferDeviceAddress) {
  3952. std::vector<VkCooperativeMatrixFlexibleDimensionsPropertiesNV> flexible_dimensions;
  3953. uint32_t count = 0;
  3954. PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV
  3955. _vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV =
  3956. (PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV)
  3957. vk_instance.instance.getProcAddr("vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV");
  3958. _vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV(device->physical_device, &count, nullptr);
  3959. VkCooperativeMatrixFlexibleDimensionsPropertiesNV empty_prop {};
  3960. empty_prop.sType = VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_FLEXIBLE_DIMENSIONS_PROPERTIES_NV;
  3961. flexible_dimensions.resize(count, empty_prop);
  3962. _vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV(device->physical_device, &count, flexible_dimensions.data());
  3963. bool found_fp16_128 = false,
  3964. found_fp16_256 = false,
  3965. found_fp32_128 = false,
  3966. found_fp32_256 = false;
  3967. // need to support fp16*fp16 with fp16/fp32 accumulator, for workgroupsize 128
  3968. // with 32x16x16 and 256 with 32x32x16.
  3969. for (auto &prop : flexible_dimensions) {
  3970. if (prop.saturatingAccumulation == VK_FALSE &&
  3971. prop.scope == VK_SCOPE_WORKGROUP_KHR &&
  3972. prop.AType == VK_COMPONENT_TYPE_FLOAT16_KHR &&
  3973. prop.BType == VK_COMPONENT_TYPE_FLOAT16_KHR) {
  3974. if (prop.workgroupInvocations == 128 &&
  3975. prop.MGranularity <= 32 &&
  3976. prop.NGranularity <= 16 &&
  3977. prop.KGranularity <= 16) {
  3978. if (prop.CType == VK_COMPONENT_TYPE_FLOAT16_KHR &&
  3979. prop.ResultType == VK_COMPONENT_TYPE_FLOAT16_KHR) {
  3980. found_fp16_128 = true;
  3981. }
  3982. if (prop.CType == VK_COMPONENT_TYPE_FLOAT32_KHR &&
  3983. prop.ResultType == VK_COMPONENT_TYPE_FLOAT32_KHR) {
  3984. found_fp32_128 = true;
  3985. }
  3986. }
  3987. if (prop.workgroupInvocations == 256 &&
  3988. prop.MGranularity <= 32 &&
  3989. prop.NGranularity <= 32 &&
  3990. prop.KGranularity <= 16) {
  3991. if (prop.CType == VK_COMPONENT_TYPE_FLOAT16_KHR &&
  3992. prop.ResultType == VK_COMPONENT_TYPE_FLOAT16_KHR) {
  3993. found_fp16_256 = true;
  3994. }
  3995. if (prop.CType == VK_COMPONENT_TYPE_FLOAT32_KHR &&
  3996. prop.ResultType == VK_COMPONENT_TYPE_FLOAT32_KHR) {
  3997. found_fp32_256 = true;
  3998. }
  3999. }
  4000. }
  4001. }
  4002. if (found_fp16_128 && found_fp16_256 &&
  4003. found_fp32_128 && found_fp32_256 &&
  4004. coopmat2_props.cooperativeMatrixFlexibleDimensionsMaxDimension >= 512) {
  4005. device->coopmat2 = true;
  4006. }
  4007. }
  4008. #endif
  4009. }
  4010. if (!vk11_features.storageBuffer16BitAccess) {
  4011. std::cerr << "ggml_vulkan: device " << GGML_VK_NAME << idx << " does not support 16-bit storage." << std::endl;
  4012. throw std::runtime_error("Unsupported device");
  4013. }
  4014. device_extensions.push_back("VK_KHR_16bit_storage");
  4015. #ifdef GGML_VULKAN_VALIDATE
  4016. device_extensions.push_back("VK_KHR_shader_non_semantic_info");
  4017. #endif
  4018. if (device->fp16) {
  4019. device_extensions.push_back("VK_KHR_shader_float16_int8");
  4020. }
  4021. #if defined(VK_KHR_cooperative_matrix)
  4022. if (device->coopmat_support) {
  4023. // Query supported shapes
  4024. std::vector<VkCooperativeMatrixPropertiesKHR> cm_props;
  4025. PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR pfn_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR =
  4026. (PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR)vkGetInstanceProcAddr(vk_instance.instance, "vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR");
  4027. uint32_t cm_props_num;
  4028. pfn_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR(device->physical_device, &cm_props_num, nullptr);
  4029. cm_props.resize(cm_props_num);
  4030. for (auto& prop : cm_props) {
  4031. prop.sType = VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_KHR;
  4032. }
  4033. pfn_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR(device->physical_device, &cm_props_num, cm_props.data());
  4034. VK_LOG_DEBUG("ggml_vulkan: Cooperative Matrix Shapes: " << cm_props.size());
  4035. for (auto& prop : cm_props) {
  4036. VK_LOG_DEBUG("ggml_vulkan: M: " << prop.MSize << " N: " << prop.NSize << " K: " << prop.KSize << " A: " << vk::to_string((vk::ComponentTypeKHR)prop.AType) << " B: " << vk::to_string((vk::ComponentTypeKHR)prop.BType) << " C: " << vk::to_string((vk::ComponentTypeKHR)prop.CType) << " Result: " << vk::to_string((vk::ComponentTypeKHR)prop.ResultType) << " saturatingAccumulation: " << prop.saturatingAccumulation << " scope: " << vk::to_string((vk::ScopeKHR)prop.scope));
  4037. if ((vk::ComponentTypeKHR)prop.AType == vk::ComponentTypeKHR::eFloat16 &&
  4038. (vk::ComponentTypeKHR)prop.BType == vk::ComponentTypeKHR::eFloat16 &&
  4039. (vk::ScopeKHR)prop.scope == vk::ScopeKHR::eSubgroup
  4040. ) {
  4041. if ((vk::ComponentTypeKHR)prop.CType == vk::ComponentTypeKHR::eFloat32 &&
  4042. (vk::ComponentTypeKHR)prop.ResultType == vk::ComponentTypeKHR::eFloat32) {
  4043. // coopmat sizes not set yet
  4044. if (device->coopmat_m == 0) {
  4045. device->coopmat_acc_f32_support = true;
  4046. device->coopmat_m = prop.MSize;
  4047. device->coopmat_n = prop.NSize;
  4048. device->coopmat_k = prop.KSize;
  4049. } else if (device->coopmat_m == prop.MSize && device->coopmat_n == prop.NSize && device->coopmat_k == prop.KSize) {
  4050. // Only enable if shape is identical
  4051. device->coopmat_acc_f32_support = true;
  4052. }
  4053. if (prop.MSize == 16 && prop.NSize == 16 && prop.KSize == 16) {
  4054. device->coopmat_support_16x16x16_f32acc = true;
  4055. }
  4056. } else if ((vk::ComponentTypeKHR)prop.CType == vk::ComponentTypeKHR::eFloat16 &&
  4057. (vk::ComponentTypeKHR)prop.ResultType == vk::ComponentTypeKHR::eFloat16) {
  4058. // coopmat sizes not set yet
  4059. if (device->coopmat_m == 0) {
  4060. device->coopmat_acc_f16_support = true;
  4061. device->coopmat_m = prop.MSize;
  4062. device->coopmat_n = prop.NSize;
  4063. device->coopmat_k = prop.KSize;
  4064. } else if (device->coopmat_m == prop.MSize && device->coopmat_n == prop.NSize && device->coopmat_k == prop.KSize) {
  4065. // Only enable if shape is identical
  4066. device->coopmat_acc_f16_support = true;
  4067. }
  4068. if (prop.MSize == 16 && prop.NSize == 16 && prop.KSize == 16) {
  4069. device->coopmat_support_16x16x16_f16acc = true;
  4070. }
  4071. }
  4072. } else if ((vk::ComponentTypeKHR)prop.AType == vk::ComponentTypeKHR::eSint8 &&
  4073. (vk::ComponentTypeKHR)prop.BType == vk::ComponentTypeKHR::eSint8 &&
  4074. (vk::ComponentTypeKHR)prop.CType == vk::ComponentTypeKHR::eSint32 &&
  4075. (vk::ComponentTypeKHR)prop.ResultType == vk::ComponentTypeKHR::eSint32 &&
  4076. (vk::ScopeKHR)prop.scope == vk::ScopeKHR::eSubgroup &&
  4077. device->coopmat_int_m == 0
  4078. ) {
  4079. device->coopmat_int_support = true;
  4080. device->coopmat_int_m = prop.MSize;
  4081. device->coopmat_int_n = prop.NSize;
  4082. device->coopmat_int_k = prop.KSize;
  4083. }
  4084. #if defined(VK_KHR_shader_bfloat16) && defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  4085. if (prop.AType == VK_COMPONENT_TYPE_BFLOAT16_KHR &&
  4086. prop.BType == VK_COMPONENT_TYPE_BFLOAT16_KHR &&
  4087. prop.CType == VK_COMPONENT_TYPE_FLOAT32_KHR &&
  4088. prop.ResultType == VK_COMPONENT_TYPE_FLOAT32_KHR &&
  4089. (vk::ScopeKHR)prop.scope == vk::ScopeKHR::eSubgroup
  4090. ) {
  4091. // coopmat sizes not set yet
  4092. if (device->coopmat_m == 0) {
  4093. device->coopmat_bf16_support = true;
  4094. device->coopmat_m = prop.MSize;
  4095. device->coopmat_n = prop.NSize;
  4096. device->coopmat_k = prop.KSize;
  4097. } else if (device->coopmat_m == prop.MSize && device->coopmat_n == prop.NSize && device->coopmat_k == prop.KSize) {
  4098. // Only enable if shape is identical
  4099. device->coopmat_bf16_support = true;
  4100. }
  4101. }
  4102. #endif
  4103. }
  4104. if (device->coopmat_m == 0 || !device->coopmat_acc_f32_support) {
  4105. // No suitable matmul mode found
  4106. GGML_LOG_DEBUG("ggml_vulkan: WARNING: No suitable matrix core mode found. Disabling matrix cores.\n");
  4107. device->coopmat_support = false;
  4108. }
  4109. if (getenv("GGML_VK_DISABLE_BFLOAT16")) {
  4110. device->coopmat_bf16_support = false;
  4111. }
  4112. }
  4113. if (device->coopmat_support) {
  4114. device_extensions.push_back("VK_KHR_cooperative_matrix");
  4115. }
  4116. #if defined(VK_KHR_shader_bfloat16)
  4117. if (device->coopmat_bf16_support) {
  4118. device_extensions.push_back("VK_KHR_shader_bfloat16");
  4119. }
  4120. #endif
  4121. #endif
  4122. device->name = GGML_VK_NAME + std::to_string(idx);
  4123. device_create_info = {
  4124. vk::DeviceCreateFlags(),
  4125. device_queue_create_infos,
  4126. {},
  4127. device_extensions
  4128. };
  4129. device_create_info.setPNext(&device_features2);
  4130. device->device = device->physical_device.createDevice(device_create_info);
  4131. // Queues
  4132. ggml_vk_create_queue(device, device->compute_queue, compute_queue_family_index, 0, { vk::PipelineStageFlagBits::eComputeShader | vk::PipelineStageFlagBits::eTransfer }, false);
  4133. // Shaders
  4134. // Disable matmul tile sizes early if performance low or not supported
  4135. for (uint32_t i = 0; i < GGML_TYPE_COUNT; ++i) {
  4136. switch (device->vendor_id) {
  4137. #ifndef GGML_VULKAN_RUN_TESTS
  4138. case VK_VENDOR_ID_AMD:
  4139. case VK_VENDOR_ID_INTEL:
  4140. device->mul_mat_l[i] = false;
  4141. device->mul_mat_m[i] = true;
  4142. device->mul_mat_s[i] = true;
  4143. device->mul_mat_id_l[i] = false;
  4144. device->mul_mat_id_m[i] = true;
  4145. device->mul_mat_id_s[i] = true;
  4146. break;
  4147. case VK_VENDOR_ID_APPLE:
  4148. device->mul_mat_l[i] = false;
  4149. device->mul_mat_m[i] = true;
  4150. device->mul_mat_s[i] = false;
  4151. device->mul_mat_id_l[i] = false;
  4152. device->mul_mat_id_m[i] = true;
  4153. device->mul_mat_id_s[i] = false;
  4154. break;
  4155. #endif
  4156. default:
  4157. device->mul_mat_l[i] = true;
  4158. device->mul_mat_m[i] = true;
  4159. device->mul_mat_s[i] = true;
  4160. device->mul_mat_id_l[i] = true;
  4161. device->mul_mat_id_m[i] = true;
  4162. device->mul_mat_id_s[i] = true;
  4163. break;
  4164. }
  4165. }
  4166. std::vector<vk::DescriptorSetLayoutBinding> dsl_binding;
  4167. std::vector<vk::DescriptorBindingFlags> dsl_binding_flags;
  4168. for (uint32_t i = 0; i < MAX_PARAMETER_COUNT; i++) {
  4169. dsl_binding.push_back({i, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eCompute});
  4170. dsl_binding_flags.push_back({});
  4171. }
  4172. vk::DescriptorSetLayoutBindingFlagsCreateInfo dslbfci = { dsl_binding_flags };
  4173. vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_create_info(
  4174. {},
  4175. dsl_binding);
  4176. descriptor_set_layout_create_info.setPNext(&dslbfci);
  4177. device->dsl = device->device.createDescriptorSetLayout(descriptor_set_layout_create_info);
  4178. ggml_vk_load_shaders(device);
  4179. if (!device->single_queue) {
  4180. const uint32_t transfer_queue_index = compute_queue_family_index == transfer_queue_family_index ? 1 : 0;
  4181. ggml_vk_create_queue(device, device->transfer_queue, transfer_queue_family_index, transfer_queue_index, { vk::PipelineStageFlagBits::eTransfer }, true);
  4182. } else {
  4183. // TODO: Use pointer or reference to avoid copy
  4184. device->transfer_queue.copyFrom(device->compute_queue);
  4185. device->transfer_queue.cmd_pool.init(device, &device->transfer_queue);
  4186. }
  4187. device->buffer_type = {
  4188. /* .iface = */ ggml_backend_vk_buffer_type_interface,
  4189. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_vk_reg(), idx),
  4190. /* .context = */ new ggml_backend_vk_buffer_type_context{ device->name, device },
  4191. };
  4192. device->fence = device->device.createFence({});
  4193. device->idx = idx;
  4194. device->disable_fusion = getenv("GGML_VK_DISABLE_FUSION") != nullptr;
  4195. device->add_rms_fusion = !device->disable_fusion &&
  4196. device->subgroup_arithmetic &&
  4197. device->vendor_id != VK_VENDOR_ID_INTEL;
  4198. device->partials_binding_alignment =
  4199. std::max(4u, (uint32_t)device->properties.limits.minStorageBufferOffsetAlignment);
  4200. device->mmvq_mode = 0;
  4201. if (getenv("GGML_VK_DISABLE_MMVQ")) {
  4202. device->mmvq_mode = -1;
  4203. } else if (getenv("GGML_VK_FORCE_MMVQ")) {
  4204. device->mmvq_mode = 1;
  4205. }
  4206. return device;
  4207. }
  4208. return vk_instance.devices[idx];
  4209. }
  4210. static void ggml_vk_print_gpu_info(size_t idx) {
  4211. GGML_ASSERT(idx < vk_instance.device_indices.size());
  4212. size_t dev_num = vk_instance.device_indices[idx];
  4213. VK_LOG_DEBUG("ggml_vk_print_gpu_info(" << dev_num << ")");
  4214. GGML_ASSERT(vk_instance_initialized);
  4215. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  4216. if (dev_num >= devices.size()) {
  4217. std::cerr << "ggml_vulkan: Device with index " << dev_num << " does not exist." << std::endl;
  4218. throw std::runtime_error("Device not found");
  4219. }
  4220. vk::PhysicalDevice physical_device = devices[dev_num];
  4221. std::vector<vk::ExtensionProperties> ext_props = physical_device.enumerateDeviceExtensionProperties();
  4222. bool fp16_storage = false;
  4223. bool fp16_compute = false;
  4224. bool coopmat_support = false;
  4225. bool coopmat2_support = false;
  4226. bool integer_dot_product = false;
  4227. bool bfloat16_support = false;
  4228. for (auto properties : ext_props) {
  4229. if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) {
  4230. fp16_storage = true;
  4231. } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
  4232. fp16_compute = true;
  4233. #if defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
  4234. } else if (strcmp("VK_KHR_cooperative_matrix", properties.extensionName) == 0 &&
  4235. !getenv("GGML_VK_DISABLE_COOPMAT")) {
  4236. coopmat_support = true;
  4237. #endif
  4238. #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
  4239. } else if (strcmp("VK_NV_cooperative_matrix2", properties.extensionName) == 0 &&
  4240. !getenv("GGML_VK_DISABLE_COOPMAT2")) {
  4241. coopmat2_support = true;
  4242. #endif
  4243. #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT)
  4244. } else if (strcmp("VK_KHR_shader_integer_dot_product", properties.extensionName) == 0 &&
  4245. !getenv("GGML_VK_DISABLE_INTEGER_DOT_PRODUCT")) {
  4246. integer_dot_product = true;
  4247. #endif
  4248. #if defined(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT)
  4249. } else if (strcmp("VK_KHR_shader_bfloat16", properties.extensionName) == 0 &&
  4250. !getenv("GGML_VK_DISABLE_BFLOAT16")) {
  4251. bfloat16_support = true;
  4252. #endif
  4253. }
  4254. }
  4255. const vk_device_architecture device_architecture = get_device_architecture(physical_device);
  4256. const char* GGML_VK_DISABLE_F16 = getenv("GGML_VK_DISABLE_F16");
  4257. bool force_disable_f16 = GGML_VK_DISABLE_F16 != nullptr;
  4258. bool fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
  4259. vk::PhysicalDeviceProperties2 props2;
  4260. vk::PhysicalDeviceMaintenance3Properties props3;
  4261. vk::PhysicalDeviceSubgroupProperties subgroup_props;
  4262. vk::PhysicalDeviceDriverProperties driver_props;
  4263. vk::PhysicalDeviceShaderIntegerDotProductPropertiesKHR shader_integer_dot_product_props;
  4264. props2.pNext = &props3;
  4265. props3.pNext = &subgroup_props;
  4266. subgroup_props.pNext = &driver_props;
  4267. // Pointer to the last chain element
  4268. VkBaseOutStructure * last_struct = (VkBaseOutStructure *)&driver_props;
  4269. if (integer_dot_product) {
  4270. last_struct->pNext = (VkBaseOutStructure *)&shader_integer_dot_product_props;
  4271. last_struct = (VkBaseOutStructure *)&shader_integer_dot_product_props;
  4272. }
  4273. physical_device.getProperties2(&props2);
  4274. VkPhysicalDeviceFeatures2 device_features2;
  4275. device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
  4276. device_features2.pNext = nullptr;
  4277. VkPhysicalDeviceVulkan11Features vk11_features;
  4278. vk11_features.pNext = nullptr;
  4279. vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
  4280. device_features2.pNext = &vk11_features;
  4281. VkPhysicalDeviceVulkan12Features vk12_features;
  4282. vk12_features.pNext = nullptr;
  4283. vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
  4284. vk11_features.pNext = &vk12_features;
  4285. // Pointer to the last chain element
  4286. last_struct = (VkBaseOutStructure *)&vk12_features;
  4287. #if defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
  4288. VkPhysicalDeviceCooperativeMatrixFeaturesKHR coopmat_features;
  4289. coopmat_features.pNext = nullptr;
  4290. coopmat_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR;
  4291. coopmat_features.cooperativeMatrix = VK_FALSE;
  4292. if (coopmat_support) {
  4293. last_struct->pNext = (VkBaseOutStructure *)&coopmat_features;
  4294. last_struct = (VkBaseOutStructure *)&coopmat_features;
  4295. }
  4296. #endif
  4297. VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR shader_integer_dot_product_features {};
  4298. shader_integer_dot_product_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES_KHR;
  4299. if (integer_dot_product) {
  4300. last_struct->pNext = (VkBaseOutStructure *)&shader_integer_dot_product_features;
  4301. last_struct = (VkBaseOutStructure *)&shader_integer_dot_product_features;
  4302. }
  4303. #if defined(VK_KHR_shader_bfloat16)
  4304. VkPhysicalDeviceShaderBfloat16FeaturesKHR bfloat16_features {};
  4305. bfloat16_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_BFLOAT16_FEATURES_KHR;
  4306. if (bfloat16_support) {
  4307. last_struct->pNext = (VkBaseOutStructure *)&bfloat16_features;
  4308. last_struct = (VkBaseOutStructure *)&bfloat16_features;
  4309. }
  4310. #endif
  4311. vkGetPhysicalDeviceFeatures2(physical_device, &device_features2);
  4312. fp16 = fp16 && vk12_features.shaderFloat16;
  4313. #if defined(VK_KHR_shader_bfloat16)
  4314. bool bf16 = bfloat16_support && bfloat16_features.shaderBFloat16Type;
  4315. #else
  4316. bool bf16 = false;
  4317. #endif
  4318. uint32_t default_subgroup_size = get_subgroup_size("", device_architecture);
  4319. const size_t subgroup_size = (default_subgroup_size != 0) ? default_subgroup_size : subgroup_props.subgroupSize;
  4320. const bool uma = props2.properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
  4321. integer_dot_product = integer_dot_product
  4322. && shader_integer_dot_product_props.integerDotProduct4x8BitPackedSignedAccelerated
  4323. && shader_integer_dot_product_features.shaderIntegerDotProduct;
  4324. coopmat_support = coopmat_support
  4325. #if defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
  4326. && coopmat_features.cooperativeMatrix
  4327. #endif
  4328. && ggml_vk_khr_cooperative_matrix_support(props2.properties, driver_props, device_architecture);
  4329. std::string matrix_cores = coopmat2_support ? "NV_coopmat2" : coopmat_support ? "KHR_coopmat" : "none";
  4330. std::string device_name = props2.properties.deviceName.data();
  4331. GGML_LOG_DEBUG("ggml_vulkan: %zu = %s (%s) | uma: %d | fp16: %d | bf16: %d | warp size: %zu | shared memory: %d | int dot: %d | matrix cores: %s\n",
  4332. idx, device_name.c_str(), driver_props.driverName.data(), uma, fp16, bf16, subgroup_size,
  4333. props2.properties.limits.maxComputeSharedMemorySize, integer_dot_product, matrix_cores.c_str());
  4334. if (props2.properties.deviceType == vk::PhysicalDeviceType::eCpu) {
  4335. GGML_LOG_DEBUG("ggml_vulkan: Warning: Device type is CPU. This is probably not the device you want.\n");
  4336. }
  4337. }
  4338. static bool ggml_vk_instance_validation_ext_available();
  4339. static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions);
  4340. static bool ggml_vk_instance_debug_utils_ext_available(const std::vector<vk::ExtensionProperties> & instance_extensions);
  4341. static bool ggml_vk_device_is_supported(const vk::PhysicalDevice & vkdev);
  4342. static DispatchLoaderDynamic ggml_vk_default_dispatcher_instance;
  4343. DispatchLoaderDynamic & ggml_vk_default_dispatcher() {
  4344. return ggml_vk_default_dispatcher_instance;
  4345. }
  4346. static void ggml_vk_instance_init() {
  4347. if (vk_instance_initialized) {
  4348. return;
  4349. }
  4350. VK_LOG_DEBUG("ggml_vk_instance_init()");
  4351. // See https://github.com/KhronosGroup/Vulkan-Hpp?tab=readme-ov-file#extensions--per-device-function-pointers-
  4352. ggml_vk_default_dispatcher_instance.init(vkGetInstanceProcAddr);
  4353. uint32_t api_version = vk::enumerateInstanceVersion();
  4354. if (api_version < VK_API_VERSION_1_2) {
  4355. std::cerr << "ggml_vulkan: Error: Vulkan 1.2 required." << std::endl;
  4356. throw vk::SystemError(vk::Result::eErrorFeatureNotPresent, "Vulkan 1.2 required");
  4357. }
  4358. vk::ApplicationInfo app_info{ "ggml-vulkan", 1, nullptr, 0, api_version };
  4359. const std::vector<vk::ExtensionProperties> instance_extensions = vk::enumerateInstanceExtensionProperties();
  4360. const bool validation_ext = ggml_vk_instance_validation_ext_available();
  4361. #ifdef __APPLE__
  4362. const bool portability_enumeration_ext = ggml_vk_instance_portability_enumeration_ext_available(instance_extensions);
  4363. #endif
  4364. const bool debug_utils_ext = ggml_vk_instance_debug_utils_ext_available(instance_extensions) && getenv("GGML_VK_DEBUG_MARKERS") != nullptr;
  4365. std::vector<const char*> layers;
  4366. if (validation_ext) {
  4367. layers.push_back("VK_LAYER_KHRONOS_validation");
  4368. }
  4369. std::vector<const char*> extensions;
  4370. if (validation_ext) {
  4371. extensions.push_back("VK_EXT_validation_features");
  4372. }
  4373. #ifdef __APPLE__
  4374. if (portability_enumeration_ext) {
  4375. extensions.push_back("VK_KHR_portability_enumeration");
  4376. }
  4377. #endif
  4378. if (debug_utils_ext) {
  4379. extensions.push_back("VK_EXT_debug_utils");
  4380. }
  4381. vk::InstanceCreateInfo instance_create_info(vk::InstanceCreateFlags{}, &app_info, layers, extensions);
  4382. #ifdef __APPLE__
  4383. if (portability_enumeration_ext) {
  4384. instance_create_info.flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
  4385. }
  4386. #endif
  4387. std::vector<vk::ValidationFeatureEnableEXT> features_enable;
  4388. vk::ValidationFeaturesEXT validation_features;
  4389. if (validation_ext) {
  4390. features_enable = { vk::ValidationFeatureEnableEXT::eBestPractices };
  4391. validation_features = {
  4392. features_enable,
  4393. {},
  4394. };
  4395. validation_features.setPNext(nullptr);
  4396. instance_create_info.setPNext(&validation_features);
  4397. GGML_LOG_DEBUG("ggml_vulkan: Validation layers enabled\n");
  4398. }
  4399. vk_instance.instance = vk::createInstance(instance_create_info);
  4400. vk_instance_initialized = true;
  4401. if (debug_utils_ext) {
  4402. vk_instance.debug_utils_support = true;
  4403. vk_instance.pfn_vkSetDebugUtilsObjectNameEXT = (PFN_vkSetDebugUtilsObjectNameEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkSetDebugUtilsObjectNameEXT");
  4404. vk_instance.pfn_vkQueueBeginDebugUtilsLabelEXT = (PFN_vkQueueBeginDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkQueueBeginDebugUtilsLabelEXT");
  4405. vk_instance.pfn_vkQueueEndDebugUtilsLabelEXT = (PFN_vkQueueEndDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkQueueEndDebugUtilsLabelEXT");
  4406. vk_instance.pfn_vkCmdBeginDebugUtilsLabelEXT = (PFN_vkCmdBeginDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkCmdBeginDebugUtilsLabelEXT");
  4407. vk_instance.pfn_vkCmdEndDebugUtilsLabelEXT = (PFN_vkCmdEndDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkCmdEndDebugUtilsLabelEXT");
  4408. vk_instance.pfn_vkCmdInsertDebugUtilsLabelEXT = (PFN_vkCmdInsertDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkCmdInsertDebugUtilsLabelEXT");
  4409. }
  4410. vk_perf_logger_enabled = getenv("GGML_VK_PERF_LOGGER") != nullptr;
  4411. // See https://github.com/KhronosGroup/Vulkan-Hpp?tab=readme-ov-file#extensions--per-device-function-pointers-
  4412. VULKAN_HPP_DEFAULT_DISPATCHER.init(vk_instance.instance);
  4413. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  4414. // Emulate behavior of CUDA_VISIBLE_DEVICES for Vulkan
  4415. char * devices_env = getenv("GGML_VK_VISIBLE_DEVICES");
  4416. if (devices_env != nullptr) {
  4417. size_t num_available_devices = devices.size();
  4418. std::string devices(devices_env);
  4419. std::replace(devices.begin(), devices.end(), ',', ' ');
  4420. std::stringstream ss(devices);
  4421. size_t tmp;
  4422. while (ss >> tmp) {
  4423. if(tmp >= num_available_devices) {
  4424. std::cerr << "ggml_vulkan: Invalid device index " << tmp << " in GGML_VK_VISIBLE_DEVICES." << std::endl;
  4425. throw std::runtime_error("Invalid Vulkan device index");
  4426. }
  4427. vk_instance.device_indices.push_back(tmp);
  4428. }
  4429. } else {
  4430. // If no vulkan devices are found, return early
  4431. if (devices.empty()) {
  4432. GGML_LOG_INFO("ggml_vulkan: No devices found.\n");
  4433. return;
  4434. }
  4435. // Default to using all dedicated GPUs
  4436. for (size_t i = 0; i < devices.size(); i++) {
  4437. vk::PhysicalDeviceProperties2 new_props;
  4438. vk::PhysicalDeviceDriverProperties new_driver;
  4439. vk::PhysicalDeviceIDProperties new_id;
  4440. new_props.pNext = &new_driver;
  4441. new_driver.pNext = &new_id;
  4442. devices[i].getProperties2(&new_props);
  4443. if ((new_props.properties.deviceType == vk::PhysicalDeviceType::eDiscreteGpu || new_props.properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu) && ggml_vk_device_is_supported(devices[i])) {
  4444. // Check if there are two physical devices corresponding to the same GPU
  4445. auto old_device = std::find_if(
  4446. vk_instance.device_indices.begin(),
  4447. vk_instance.device_indices.end(),
  4448. [&devices, &new_id](const size_t k){
  4449. vk::PhysicalDeviceProperties2 old_props;
  4450. vk::PhysicalDeviceIDProperties old_id;
  4451. old_props.pNext = &old_id;
  4452. devices[k].getProperties2(&old_props);
  4453. bool equals = std::equal(std::begin(old_id.deviceUUID), std::end(old_id.deviceUUID), std::begin(new_id.deviceUUID));
  4454. equals = equals || (
  4455. old_id.deviceLUIDValid && new_id.deviceLUIDValid &&
  4456. std::equal(std::begin(old_id.deviceLUID), std::end(old_id.deviceLUID), std::begin(new_id.deviceLUID))
  4457. );
  4458. return equals;
  4459. }
  4460. );
  4461. if (old_device == vk_instance.device_indices.end()) {
  4462. vk_instance.device_indices.push_back(i);
  4463. } else {
  4464. // There can be two physical devices corresponding to the same GPU if there are 2 different drivers
  4465. // This can cause error when splitting layers aross the devices, need to keep only 1
  4466. VK_LOG_DEBUG("Device " << i << " and device " << *old_device << " have the same deviceUUID");
  4467. vk::PhysicalDeviceProperties2 old_props;
  4468. vk::PhysicalDeviceDriverProperties old_driver;
  4469. old_props.pNext = &old_driver;
  4470. devices[*old_device].getProperties2(&old_props);
  4471. std::map<vk::DriverId, int> driver_priorities {};
  4472. int old_priority = std::numeric_limits<int>::max();
  4473. int new_priority = std::numeric_limits<int>::max();
  4474. // Check https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkDriverId.html for the list of driver id
  4475. // Smaller number -> higher priority
  4476. switch (old_props.properties.vendorID) {
  4477. case VK_VENDOR_ID_AMD:
  4478. driver_priorities[vk::DriverId::eMesaRadv] = 1;
  4479. driver_priorities[vk::DriverId::eAmdOpenSource] = 2;
  4480. driver_priorities[vk::DriverId::eAmdProprietary] = 3;
  4481. break;
  4482. case VK_VENDOR_ID_INTEL:
  4483. driver_priorities[vk::DriverId::eIntelOpenSourceMESA] = 1;
  4484. driver_priorities[vk::DriverId::eIntelProprietaryWindows] = 2;
  4485. break;
  4486. case VK_VENDOR_ID_NVIDIA:
  4487. driver_priorities[vk::DriverId::eNvidiaProprietary] = 1;
  4488. #if defined(VK_API_VERSION_1_3) && VK_HEADER_VERSION >= 235
  4489. driver_priorities[vk::DriverId::eMesaNvk] = 2;
  4490. #endif
  4491. break;
  4492. }
  4493. driver_priorities[vk::DriverId::eMesaDozen] = 100;
  4494. if (driver_priorities.count(old_driver.driverID)) {
  4495. old_priority = driver_priorities[old_driver.driverID];
  4496. }
  4497. if (driver_priorities.count(new_driver.driverID)) {
  4498. new_priority = driver_priorities[new_driver.driverID];
  4499. }
  4500. if (new_priority < old_priority) {
  4501. auto r = std::remove(vk_instance.device_indices.begin(), vk_instance.device_indices.end(), *old_device);
  4502. vk_instance.device_indices.erase(r, vk_instance.device_indices.end());
  4503. vk_instance.device_indices.push_back(i);
  4504. VK_LOG_DEBUG("Prioritize device " << i << " driver " << new_driver.driverName << " over device " << *old_device << " driver " << old_driver.driverName);
  4505. }
  4506. else {
  4507. VK_LOG_DEBUG("Prioritize device " << *old_device << " driver " << old_driver.driverName << " over device " << i << " driver " << new_driver.driverName << std::endl);
  4508. }
  4509. }
  4510. }
  4511. }
  4512. // If no GPUs found, fall back to the first non-CPU device.
  4513. // If only CPU devices are available, return without devices.
  4514. if (vk_instance.device_indices.empty()) {
  4515. for (size_t i = 0; i < devices.size(); i++) {
  4516. if (devices[i].getProperties().deviceType != vk::PhysicalDeviceType::eCpu) {
  4517. vk_instance.device_indices.push_back(i);
  4518. break;
  4519. }
  4520. }
  4521. }
  4522. if (vk_instance.device_indices.empty()) {
  4523. GGML_LOG_INFO("ggml_vulkan: No devices found.\n");
  4524. return;
  4525. }
  4526. }
  4527. GGML_LOG_DEBUG("ggml_vulkan: Found %zu Vulkan devices:\n", vk_instance.device_indices.size());
  4528. for (size_t i = 0; i < vk_instance.device_indices.size(); i++) {
  4529. vk::PhysicalDevice vkdev = devices[vk_instance.device_indices[i]];
  4530. std::vector<vk::ExtensionProperties> extensionprops = vkdev.enumerateDeviceExtensionProperties();
  4531. bool membudget_supported = false;
  4532. for (const auto & ext : extensionprops) {
  4533. if (strcmp(VK_EXT_MEMORY_BUDGET_EXTENSION_NAME, ext.extensionName) == 0) {
  4534. membudget_supported = true;
  4535. break;
  4536. }
  4537. }
  4538. vk_instance.device_supports_membudget.push_back(membudget_supported);
  4539. ggml_vk_print_gpu_info(i);
  4540. }
  4541. }
  4542. static void ggml_vk_init(ggml_backend_vk_context * ctx, size_t idx) {
  4543. VK_LOG_DEBUG("ggml_vk_init(" << ctx->name << ", " << idx << ")");
  4544. ggml_vk_instance_init();
  4545. GGML_ASSERT(idx < vk_instance.device_indices.size());
  4546. ctx->name = GGML_VK_NAME + std::to_string(idx);
  4547. ctx->device = ggml_vk_get_device(idx);
  4548. ctx->semaphore_idx = 0;
  4549. ctx->event_idx = 0;
  4550. ctx->prealloc_size_x = 0;
  4551. ctx->prealloc_size_y = 0;
  4552. ctx->prealloc_size_split_k = 0;
  4553. // Fixed size of 1KB, for deterministic behavior
  4554. ctx->prealloc_size_add_rms_partials = 1024;
  4555. ctx->fence = ctx->device->device.createFence({});
  4556. ctx->almost_ready_fence = ctx->device->device.createFence({});
  4557. ctx->compute_cmd_pool.init(ctx->device, &ctx->device->compute_queue);
  4558. ctx->transfer_cmd_pool.init(ctx->device, &ctx->device->transfer_queue);
  4559. #ifdef GGML_VULKAN_CHECK_RESULTS
  4560. const char* skip_checks = getenv("GGML_VULKAN_SKIP_CHECKS");
  4561. vk_skip_checks = (skip_checks == NULL ? 0 : atoi(skip_checks));
  4562. const char* output_tensor = getenv("GGML_VULKAN_OUTPUT_TENSOR");
  4563. vk_output_tensor = (output_tensor == NULL ? 0 : atoi(output_tensor));
  4564. #endif
  4565. }
  4566. static vk_pipeline ggml_vk_get_to_fp16(ggml_backend_vk_context * ctx, ggml_type type) {
  4567. VK_LOG_DEBUG("ggml_vk_get_to_fp16()");
  4568. switch (type) {
  4569. case GGML_TYPE_F32:
  4570. case GGML_TYPE_Q4_0:
  4571. case GGML_TYPE_Q4_1:
  4572. case GGML_TYPE_Q5_0:
  4573. case GGML_TYPE_Q5_1:
  4574. case GGML_TYPE_Q8_0:
  4575. case GGML_TYPE_Q2_K:
  4576. case GGML_TYPE_Q3_K:
  4577. case GGML_TYPE_Q4_K:
  4578. case GGML_TYPE_Q5_K:
  4579. case GGML_TYPE_Q6_K:
  4580. case GGML_TYPE_IQ1_S:
  4581. case GGML_TYPE_IQ1_M:
  4582. case GGML_TYPE_IQ2_XXS:
  4583. case GGML_TYPE_IQ2_XS:
  4584. case GGML_TYPE_IQ2_S:
  4585. case GGML_TYPE_IQ3_XXS:
  4586. case GGML_TYPE_IQ3_S:
  4587. case GGML_TYPE_IQ4_XS:
  4588. case GGML_TYPE_IQ4_NL:
  4589. case GGML_TYPE_MXFP4:
  4590. break;
  4591. default:
  4592. return nullptr;
  4593. }
  4594. return ctx->device->pipeline_dequant[type];
  4595. }
  4596. static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type, ggml_prec prec) {
  4597. VK_LOG_DEBUG("ggml_vk_get_mul_mat_mat_pipeline(" << ggml_type_name(src0_type) << ", " << ggml_type_name(src1_type) << ", " << prec << ")");
  4598. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  4599. return ctx->device->pipeline_matmul_f32;
  4600. }
  4601. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
  4602. return ctx->device->pipeline_matmul_f32_f16;
  4603. }
  4604. if (src0_type == GGML_TYPE_BF16 && src1_type == GGML_TYPE_BF16) {
  4605. return ctx->device->pipeline_matmul_bf16;
  4606. }
  4607. if (prec == GGML_PREC_DEFAULT && ctx->device->fp16 && !(ctx->device->coopmat_support && !ctx->device->coopmat_acc_f16_support)) {
  4608. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  4609. return ctx->device->pipeline_matmul_f16_f32.f16acc;
  4610. }
  4611. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  4612. return ctx->device->pipeline_matmul_f16.f16acc;
  4613. }
  4614. } else {
  4615. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  4616. return ctx->device->pipeline_matmul_f16_f32.f32acc;
  4617. }
  4618. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  4619. return ctx->device->pipeline_matmul_f16.f32acc;
  4620. }
  4621. }
  4622. // MMQ
  4623. if (src1_type == GGML_TYPE_Q8_1) {
  4624. vk_matmul_pipeline pipelines = ctx->device->pipeline_dequant_mul_mat_mat_q8_1[src0_type].f32acc;
  4625. if (pipelines->is_empty()) {
  4626. return nullptr;
  4627. }
  4628. return pipelines;
  4629. }
  4630. if (src1_type != GGML_TYPE_F32 && !ctx->device->coopmat2) {
  4631. return nullptr;
  4632. }
  4633. switch (src0_type) {
  4634. case GGML_TYPE_Q4_0:
  4635. case GGML_TYPE_Q4_1:
  4636. case GGML_TYPE_Q5_0:
  4637. case GGML_TYPE_Q5_1:
  4638. case GGML_TYPE_Q8_0:
  4639. case GGML_TYPE_Q2_K:
  4640. case GGML_TYPE_Q3_K:
  4641. case GGML_TYPE_Q4_K:
  4642. case GGML_TYPE_Q5_K:
  4643. case GGML_TYPE_Q6_K:
  4644. case GGML_TYPE_IQ1_S:
  4645. case GGML_TYPE_IQ1_M:
  4646. case GGML_TYPE_IQ2_XXS:
  4647. case GGML_TYPE_IQ2_XS:
  4648. case GGML_TYPE_IQ2_S:
  4649. case GGML_TYPE_IQ3_XXS:
  4650. case GGML_TYPE_IQ3_S:
  4651. case GGML_TYPE_IQ4_XS:
  4652. case GGML_TYPE_IQ4_NL:
  4653. case GGML_TYPE_MXFP4:
  4654. break;
  4655. default:
  4656. return nullptr;
  4657. }
  4658. if (ctx->device->coopmat2) {
  4659. assert(src1_type == GGML_TYPE_F16);
  4660. return prec == GGML_PREC_DEFAULT ? ctx->device->pipeline_dequant_mul_mat_mat_f16[src0_type].f16acc : ctx->device->pipeline_dequant_mul_mat_mat_f16[src0_type].f32acc;
  4661. }
  4662. if (ctx->device->coopmat_support) {
  4663. return (ctx->device->fp16 && ctx->device->coopmat_acc_f16_support && prec == GGML_PREC_DEFAULT) ? ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f16acc : ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f32acc;
  4664. }
  4665. return (ctx->device->fp16 && prec == GGML_PREC_DEFAULT) ? ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f16acc : ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f32acc;
  4666. }
  4667. static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type, uint32_t num_cols, uint32_t m, uint32_t k) {
  4668. VK_LOG_DEBUG("ggml_vk_get_dequantize_mul_mat_vec()");
  4669. GGML_ASSERT(b_type == GGML_TYPE_F32 || b_type == GGML_TYPE_F16 || b_type == GGML_TYPE_Q8_1);
  4670. GGML_ASSERT(num_cols >= 1 && num_cols <= mul_mat_vec_max_cols);
  4671. if (b_type == GGML_TYPE_Q8_1) {
  4672. switch (a_type) {
  4673. case GGML_TYPE_Q4_0:
  4674. case GGML_TYPE_Q4_1:
  4675. case GGML_TYPE_Q5_0:
  4676. case GGML_TYPE_Q5_1:
  4677. case GGML_TYPE_Q8_0:
  4678. break;
  4679. default:
  4680. return nullptr;
  4681. }
  4682. }
  4683. switch (a_type) {
  4684. case GGML_TYPE_F32:
  4685. case GGML_TYPE_F16:
  4686. case GGML_TYPE_BF16:
  4687. case GGML_TYPE_Q4_0:
  4688. case GGML_TYPE_Q4_1:
  4689. case GGML_TYPE_Q5_0:
  4690. case GGML_TYPE_Q5_1:
  4691. case GGML_TYPE_Q8_0:
  4692. case GGML_TYPE_Q2_K:
  4693. case GGML_TYPE_Q3_K:
  4694. case GGML_TYPE_Q4_K:
  4695. case GGML_TYPE_Q5_K:
  4696. case GGML_TYPE_Q6_K:
  4697. case GGML_TYPE_IQ1_S:
  4698. case GGML_TYPE_IQ1_M:
  4699. case GGML_TYPE_IQ2_XXS:
  4700. case GGML_TYPE_IQ2_XS:
  4701. case GGML_TYPE_IQ2_S:
  4702. case GGML_TYPE_IQ3_XXS:
  4703. case GGML_TYPE_IQ3_S:
  4704. case GGML_TYPE_IQ4_XS:
  4705. case GGML_TYPE_IQ4_NL:
  4706. case GGML_TYPE_MXFP4:
  4707. break;
  4708. default:
  4709. return nullptr;
  4710. }
  4711. // heuristic to choose workgroup size
  4712. uint32_t dmmv_wg = DMMV_WG_SIZE_SUBGROUP;
  4713. if ((ctx->device->vendor_id == VK_VENDOR_ID_NVIDIA && ctx->device->architecture != vk_device_architecture::NVIDIA_PRE_TURING) || ctx->device->vendor_id == VK_VENDOR_ID_INTEL) {
  4714. // Prefer larger workgroups when M is small, to spread the work out more
  4715. // and keep more SMs busy.
  4716. // q6_k seems to prefer small workgroup size even for "medium" values of M.
  4717. if (a_type == GGML_TYPE_Q6_K) {
  4718. if (m < 4096 && k >= 1024) {
  4719. dmmv_wg = DMMV_WG_SIZE_LARGE;
  4720. }
  4721. } else {
  4722. if (m <= 8192 && k >= 1024) {
  4723. dmmv_wg = DMMV_WG_SIZE_LARGE;
  4724. }
  4725. }
  4726. }
  4727. if (b_type == GGML_TYPE_Q8_1) {
  4728. if (ctx->device->vendor_id == VK_VENDOR_ID_INTEL) {
  4729. dmmv_wg = DMMV_WG_SIZE_SUBGROUP;
  4730. }
  4731. return ctx->device->pipeline_dequant_mul_mat_vec_q8_1_f32[dmmv_wg][a_type][num_cols-1];
  4732. }
  4733. return b_type == GGML_TYPE_F32 ? ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[dmmv_wg][a_type][num_cols-1] : ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[dmmv_wg][a_type][num_cols-1];
  4734. }
  4735. static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_id_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type, ggml_prec prec) {
  4736. VK_LOG_DEBUG("ggml_vk_get_mul_mat_mat_id_pipeline()");
  4737. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  4738. return ctx->device->pipeline_matmul_id_f32;
  4739. }
  4740. if (src0_type == GGML_TYPE_BF16 && src1_type == GGML_TYPE_BF16) {
  4741. return ctx->device->pipeline_matmul_id_bf16;
  4742. }
  4743. if (prec == GGML_PREC_DEFAULT && ctx->device->fp16 && !(ctx->device->coopmat_support && !ctx->device->coopmat_acc_f16_support)) {
  4744. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  4745. return ctx->device->pipeline_matmul_id_f16_f32.f16acc;
  4746. }
  4747. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  4748. return ctx->device->pipeline_matmul_id_f16.f16acc;
  4749. }
  4750. } else {
  4751. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  4752. return ctx->device->pipeline_matmul_id_f16_f32.f32acc;
  4753. }
  4754. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  4755. return ctx->device->pipeline_matmul_id_f16.f32acc;
  4756. }
  4757. }
  4758. // MMQ
  4759. if (src1_type == GGML_TYPE_Q8_1) {
  4760. vk_matmul_pipeline pipelines = ctx->device->pipeline_dequant_mul_mat_mat_id_q8_1[src0_type].f32acc;
  4761. if (pipelines->is_empty()) {
  4762. return nullptr;
  4763. }
  4764. return pipelines;
  4765. }
  4766. GGML_ASSERT(src1_type == GGML_TYPE_F32 || (ctx->device->coopmat2 && src1_type == GGML_TYPE_F16));
  4767. switch (src0_type) {
  4768. case GGML_TYPE_Q4_0:
  4769. case GGML_TYPE_Q4_1:
  4770. case GGML_TYPE_Q5_0:
  4771. case GGML_TYPE_Q5_1:
  4772. case GGML_TYPE_Q8_0:
  4773. case GGML_TYPE_Q2_K:
  4774. case GGML_TYPE_Q3_K:
  4775. case GGML_TYPE_Q4_K:
  4776. case GGML_TYPE_Q5_K:
  4777. case GGML_TYPE_Q6_K:
  4778. case GGML_TYPE_IQ1_S:
  4779. case GGML_TYPE_IQ1_M:
  4780. case GGML_TYPE_IQ2_XXS:
  4781. case GGML_TYPE_IQ2_XS:
  4782. case GGML_TYPE_IQ2_S:
  4783. case GGML_TYPE_IQ3_XXS:
  4784. case GGML_TYPE_IQ3_S:
  4785. case GGML_TYPE_IQ4_XS:
  4786. case GGML_TYPE_IQ4_NL:
  4787. case GGML_TYPE_MXFP4:
  4788. break;
  4789. default:
  4790. return nullptr;
  4791. }
  4792. vk_matmul_pipeline2& mmp = ctx->device->pipeline_dequant_mul_mat_mat_id[src0_type];
  4793. // XXX TODO 'prec' is not actually allowed in mul_mat_id.
  4794. bool prefer_fp16acc = ctx->device->fp16 /*&& prec == GGML_PREC_DEFAULT*/;
  4795. bool support_fp16acc = !mmp.f16acc->is_empty();
  4796. bool support_fp32acc = !mmp.f32acc->is_empty();
  4797. if (support_fp16acc && (prefer_fp16acc || !support_fp32acc)) {
  4798. return mmp.f16acc;
  4799. } else {
  4800. GGML_ASSERT(support_fp32acc);
  4801. return mmp.f32acc;
  4802. }
  4803. }
  4804. static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec_id(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type) {
  4805. VK_LOG_DEBUG("ggml_vk_get_dequantize_mul_mat_vec_id()");
  4806. GGML_ASSERT(b_type == GGML_TYPE_F32);
  4807. switch (a_type) {
  4808. case GGML_TYPE_F32:
  4809. case GGML_TYPE_F16:
  4810. case GGML_TYPE_BF16:
  4811. case GGML_TYPE_Q4_0:
  4812. case GGML_TYPE_Q4_1:
  4813. case GGML_TYPE_Q5_0:
  4814. case GGML_TYPE_Q5_1:
  4815. case GGML_TYPE_Q8_0:
  4816. case GGML_TYPE_Q2_K:
  4817. case GGML_TYPE_Q3_K:
  4818. case GGML_TYPE_Q4_K:
  4819. case GGML_TYPE_Q5_K:
  4820. case GGML_TYPE_Q6_K:
  4821. case GGML_TYPE_IQ1_S:
  4822. case GGML_TYPE_IQ1_M:
  4823. case GGML_TYPE_IQ2_XXS:
  4824. case GGML_TYPE_IQ2_XS:
  4825. case GGML_TYPE_IQ2_S:
  4826. case GGML_TYPE_IQ3_XXS:
  4827. case GGML_TYPE_IQ3_S:
  4828. case GGML_TYPE_IQ4_XS:
  4829. case GGML_TYPE_IQ4_NL:
  4830. case GGML_TYPE_MXFP4:
  4831. break;
  4832. default:
  4833. return nullptr;
  4834. }
  4835. return ctx->device->pipeline_dequant_mul_mat_vec_id_f32[a_type];
  4836. }
  4837. static void * ggml_vk_host_malloc(vk_device& device, size_t size) {
  4838. VK_LOG_MEMORY("ggml_vk_host_malloc(" << size << ")");
  4839. vk_buffer buf = ggml_vk_create_buffer(device, size,
  4840. {vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  4841. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent});
  4842. if(!(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible)) {
  4843. fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory\n",
  4844. size/1024.0/1024.0);
  4845. device->device.freeMemory(buf->device_memory);
  4846. device->device.destroyBuffer(buf->buffer);
  4847. return nullptr;
  4848. }
  4849. std::lock_guard<std::recursive_mutex> guard(device->mutex);
  4850. device->pinned_memory.push_back(std::make_tuple(buf->ptr, size, buf));
  4851. return buf->ptr;
  4852. }
  4853. static void ggml_vk_host_free(vk_device& device, void* ptr) {
  4854. if (ptr == nullptr) {
  4855. return;
  4856. }
  4857. VK_LOG_MEMORY("ggml_vk_host_free(" << ptr << ")");
  4858. std::lock_guard<std::recursive_mutex> guard(device->mutex);
  4859. vk_buffer buf;
  4860. size_t index;
  4861. for (size_t i = 0; i < device->pinned_memory.size(); i++) {
  4862. const uint8_t* addr = (const uint8_t*) std::get<0>(device->pinned_memory[i]);
  4863. const uint8_t* endr = addr + std::get<1>(device->pinned_memory[i]);
  4864. if (ptr >= addr && ptr < endr) {
  4865. buf = std::get<2>(device->pinned_memory[i]);
  4866. index = i;
  4867. break;
  4868. }
  4869. }
  4870. if (buf == nullptr) {
  4871. fprintf(stderr, "WARNING: failed to free pinned memory: memory not in map\n");
  4872. return;
  4873. }
  4874. ggml_vk_destroy_buffer(buf);
  4875. device->pinned_memory.erase(device->pinned_memory.begin() + index);
  4876. }
  4877. static void ggml_vk_host_get(const vk_device& device, const void * ptr, vk_buffer& buf, size_t& buf_offset) {
  4878. std::lock_guard<std::recursive_mutex> guard(device->mutex);
  4879. buf = nullptr;
  4880. buf_offset = 0;
  4881. for (size_t i = 0; i < device->pinned_memory.size(); i++) {
  4882. const uint8_t* addr = (const uint8_t*) std::get<0>(device->pinned_memory[i]);
  4883. const uint8_t* endr = addr + std::get<1>(device->pinned_memory[i]);
  4884. if (ptr >= addr && ptr < endr) {
  4885. buf = std::get<2>(device->pinned_memory[i]);
  4886. buf_offset = ((const uint8_t *)ptr) - addr;
  4887. break;
  4888. }
  4889. }
  4890. }
  4891. static vk_subbuffer ggml_vk_tensor_subbuffer(
  4892. const ggml_backend_vk_context * ctx, const ggml_tensor * tensor, bool allow_misalign = false) {
  4893. vk_buffer buffer = nullptr;
  4894. size_t offset = 0;
  4895. if (ctx->device->uma) {
  4896. ggml_vk_host_get(ctx->device, tensor->data, buffer, offset);
  4897. }
  4898. if (!buffer) {
  4899. auto buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
  4900. buffer = buf_ctx->dev_buffer;
  4901. offset = vk_tensor_offset(tensor) + tensor->view_offs;
  4902. }
  4903. GGML_ASSERT(buffer != nullptr);
  4904. size_t size = ggml_nbytes(tensor);
  4905. size_t misalign_bytes = offset & (ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1);
  4906. // The shader must support misaligned offsets when indexing into the buffer
  4907. GGML_ASSERT(allow_misalign || misalign_bytes == 0);
  4908. offset &= ~misalign_bytes;
  4909. size += misalign_bytes;
  4910. return vk_subbuffer{buffer, offset, size};
  4911. }
  4912. static vk_submission ggml_vk_begin_submission(vk_device& device, vk_command_pool& p, bool one_time = true) {
  4913. vk_submission s;
  4914. s.buffer = ggml_vk_create_cmd_buffer(device, p);
  4915. if (one_time) {
  4916. s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
  4917. } else {
  4918. s.buffer.begin({ vk::CommandBufferUsageFlags{} });
  4919. }
  4920. return s;
  4921. }
  4922. template <typename T> size_t push_constant_size(const T &t) {
  4923. static_assert(std::is_class<T>::value, "T must be a struct/class");
  4924. GGML_UNUSED(t);
  4925. return sizeof(T);
  4926. }
  4927. template <typename T> size_t push_constant_size(const std::vector<T> &t) {
  4928. GGML_UNUSED(t);
  4929. return sizeof(T) * t.size();
  4930. }
  4931. template <typename T, uint32_t N> size_t push_constant_size(const std::array<T, N> &t) {
  4932. GGML_UNUSED(t);
  4933. return sizeof(T) * N;
  4934. }
  4935. template <typename T> const T *push_constant_data(const T &t) {
  4936. static_assert(std::is_class<T>::value, "T must be a struct/class");
  4937. return &t;
  4938. }
  4939. template <typename T> const T *push_constant_data(const std::vector<T> &t) {
  4940. return t.data();
  4941. }
  4942. template <typename T, uint32_t N> const T *push_constant_data(const std::array<T, N> &t) {
  4943. return t.data();
  4944. }
  4945. template <typename T>
  4946. static void ggml_vk_dispatch_pipeline(ggml_backend_vk_context* ctx, vk_context& subctx, vk_pipeline& pipeline, std::initializer_list<vk::DescriptorBufferInfo> const& descriptor_buffer_infos, const T &push_constants, std::array<uint32_t, 3> elements) {
  4947. const uint32_t wg0 = CEIL_DIV(elements[0], pipeline->wg_denoms[0]);
  4948. const uint32_t wg1 = CEIL_DIV(elements[1], pipeline->wg_denoms[1]);
  4949. const uint32_t wg2 = CEIL_DIV(elements[2], pipeline->wg_denoms[2]);
  4950. VK_LOG_DEBUG("ggml_vk_dispatch_pipeline(" << pipeline->name << ", {";
  4951. for (auto& buffer : descriptor_buffer_infos) {
  4952. std::cerr << "(" << buffer.buffer << ", " << buffer.offset << ", " << buffer.range << "), ";
  4953. }
  4954. std::cerr << "}, (" << wg0 << "," << wg1 << "," << wg2 << "))");
  4955. GGML_ASSERT(ctx->descriptor_set_idx < ctx->descriptor_sets.size());
  4956. GGML_ASSERT(descriptor_buffer_infos.size() <= MAX_PARAMETER_COUNT);
  4957. GGML_ASSERT(pipeline->parameter_count == descriptor_buffer_infos.size());
  4958. vk::DescriptorSet& descriptor_set = ctx->descriptor_sets[ctx->descriptor_set_idx++];
  4959. vk::WriteDescriptorSet write_descriptor_set{ descriptor_set, 0, 0, pipeline->parameter_count, vk::DescriptorType::eStorageBuffer, nullptr, descriptor_buffer_infos.begin() };
  4960. ctx->device->device.updateDescriptorSets({ write_descriptor_set }, {});
  4961. subctx->s->buffer.pushConstants(pipeline->layout, vk::ShaderStageFlagBits::eCompute, 0, push_constant_size(push_constants), push_constant_data(push_constants));
  4962. subctx->s->buffer.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->pipeline);
  4963. subctx->s->buffer.bindDescriptorSets(vk::PipelineBindPoint::eCompute,
  4964. pipeline->layout,
  4965. 0,
  4966. { descriptor_set },
  4967. {});
  4968. subctx->s->buffer.dispatch(wg0, wg1, wg2);
  4969. }
  4970. static void ggml_vk_end_submission(vk_submission& s, std::vector<vk_semaphore> wait_semaphores, std::vector<vk_semaphore> signal_semaphores) {
  4971. s.buffer.end();
  4972. s.wait_semaphores = std::move(wait_semaphores);
  4973. s.signal_semaphores = std::move(signal_semaphores);
  4974. }
  4975. static void ggml_vk_ctx_end(vk_context& ctx) {
  4976. VK_LOG_DEBUG("ggml_vk_ctx_end(" << ctx << ", " << ctx->seqs.size() << ")");
  4977. if (ctx->s == nullptr) {
  4978. return;
  4979. }
  4980. ctx->s->buffer.end();
  4981. ctx->s = nullptr;
  4982. }
  4983. static void ggml_vk_ctx_begin(vk_device& device, vk_context& subctx) {
  4984. VK_LOG_DEBUG("ggml_vk_ctx_begin(" << device->name << ")");
  4985. if (subctx->s != nullptr) {
  4986. ggml_vk_ctx_end(subctx);
  4987. }
  4988. subctx->seqs.push_back({ ggml_vk_begin_submission(device, *subctx->p) });
  4989. subctx->s = subctx->seqs[subctx->seqs.size() - 1].data();
  4990. }
  4991. static size_t ggml_vk_align_size(size_t width, size_t align) {
  4992. VK_LOG_DEBUG("ggml_vk_align_size(" << width << ", " << align << ")");
  4993. return CEIL_DIV(width, align) * align;
  4994. }
  4995. static void deferred_memcpy(void * dst, const void * src, size_t size, std::vector<vk_staging_memcpy>* memcpys = nullptr) {
  4996. if (memcpys == nullptr) {
  4997. memcpy(dst, src, size);
  4998. } else {
  4999. memcpys->emplace_back(dst, src, size);
  5000. }
  5001. }
  5002. static void deferred_memset(void * dst, uint32_t val, size_t size, std::vector<vk_staging_memset>* memsets = nullptr) {
  5003. if (memsets == nullptr) {
  5004. memset(dst, val, size);
  5005. } else {
  5006. memsets->emplace_back(dst, val, size);
  5007. }
  5008. }
  5009. static void ggml_vk_ensure_sync_staging_buffer(vk_device& device, size_t size) {
  5010. if (device->sync_staging == nullptr || device->sync_staging->size < size) {
  5011. VK_LOG_MEMORY("ggml_vk_ensure_sync_staging_buffer(" << size << ")");
  5012. ggml_vk_destroy_buffer(device->sync_staging);
  5013. device->sync_staging = ggml_vk_create_buffer_check(device, size,
  5014. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  5015. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  5016. }
  5017. }
  5018. static void ggml_vk_ensure_sync_staging_buffer(ggml_backend_vk_context * ctx, size_t size) {
  5019. if (ctx->sync_staging == nullptr || ctx->sync_staging->size < size) {
  5020. VK_LOG_MEMORY("ggml_vk_ensure_sync_staging_buffer(" << size << ")");
  5021. ggml_vk_destroy_buffer(ctx->sync_staging);
  5022. ctx->sync_staging = ggml_vk_create_buffer_check(ctx->device, size,
  5023. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  5024. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  5025. }
  5026. }
  5027. static void ggml_vk_buffer_write_nc_async(ggml_backend_vk_context * ctx, vk_context& subctx, vk_buffer& dst, size_t offset, const ggml_tensor * tensor, bool sync_staging = false) {
  5028. VK_LOG_DEBUG("ggml_vk_buffer_write_nc_async(" << tensor << ")");
  5029. GGML_ASSERT(!ggml_is_contiguous(tensor));
  5030. // Buffer is already mapped
  5031. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  5032. std::cerr << "ggml_vulkan: buffer_write_nc_async dst buffer is host_visible. Use synchronous write." << std::endl;
  5033. GGML_ABORT("fatal error");
  5034. }
  5035. // Check if src is pinned memory
  5036. vk_buffer buf = nullptr;
  5037. size_t buf_offset = 0;
  5038. ggml_vk_host_get(ctx->device, tensor->data, buf, buf_offset);
  5039. const uint64_t ne0 = tensor->ne[0];
  5040. const uint64_t ne1 = tensor->ne[1];
  5041. const uint64_t ne2 = tensor->ne[2];
  5042. const uint64_t ne3 = tensor->ne[3];
  5043. const uint64_t nb0 = tensor->nb[0];
  5044. const uint64_t nb1 = tensor->nb[1];
  5045. const uint64_t nb2 = tensor->nb[2];
  5046. const uint64_t nb3 = tensor->nb[3];
  5047. const ggml_type type = tensor->type;
  5048. const uint64_t ts = ggml_type_size(type);
  5049. const uint64_t bs = ggml_blck_size(type);
  5050. const uint64_t dstnb0 = ts;
  5051. const uint64_t dstnb1 = dstnb0*(ne0/bs);
  5052. const uint64_t dstnb2 = dstnb1*ne1;
  5053. const uint64_t dstnb3 = dstnb2*ne2;
  5054. const uint64_t ne = ggml_nelements(tensor);
  5055. if (buf != nullptr) {
  5056. // Memory is pinned, use as staging buffer
  5057. std::vector<vk::BufferCopy> slices;
  5058. for (uint64_t i3 = 0; i3 < ne3; i3++) {
  5059. for (uint64_t i2 = 0; i2 < ne2; i2++) {
  5060. // Find longest contiguous slice
  5061. if (ne1*nb1 == dstnb2) {
  5062. slices.push_back({ buf_offset + i3*nb3 + i2*nb2, offset + i3*dstnb3 + i2*dstnb2, dstnb2 });
  5063. } else {
  5064. for (uint64_t i1 = 0; i1 < ne1; i1++) {
  5065. if (ne0*nb0/bs == dstnb1) {
  5066. slices.push_back({ buf_offset + i3*nb3 + i2*nb2 + i1*nb1, offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1, dstnb1 });
  5067. } else {
  5068. const uint64_t s_off = buf_offset + i3*nb3 + i2*nb2 + i1*nb1;
  5069. const uint64_t d_off = offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1;
  5070. for (uint64_t i0 = 0; i0 < ne0; i0++) {
  5071. slices.push_back({ s_off + i1*nb0, d_off + i0*dstnb0, dstnb0 });
  5072. }
  5073. }
  5074. }
  5075. }
  5076. }
  5077. }
  5078. ggml_vk_sync_buffers(ctx, subctx);
  5079. subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices);
  5080. return;
  5081. }
  5082. if (!sync_staging) {
  5083. GGML_ABORT("Asynchronous write to non-pinned memory not supported");
  5084. }
  5085. // Staging buffer required
  5086. vk_buffer& staging = ctx->device->sync_staging;
  5087. const uint64_t copy_size = ts*ne/bs;
  5088. ggml_vk_ensure_sync_staging_buffer(ctx->device, copy_size);
  5089. VkBufferCopy buf_copy{ 0, offset, copy_size };
  5090. ggml_vk_sync_buffers(ctx, subctx);
  5091. vkCmdCopyBuffer(subctx->s->buffer, (VkBuffer)staging->buffer, (VkBuffer)dst->buffer, 1, &buf_copy);
  5092. for (uint64_t i3 = 0; i3 < ne3; i3++) {
  5093. for (uint64_t i2 = 0; i2 < ne2; i2++) {
  5094. // Find longest contiguous slice
  5095. if (ne1*nb1 == dstnb2) {
  5096. deferred_memcpy((uint8_t *)staging->ptr + i3*dstnb3 + i2*dstnb2, (const uint8_t *) tensor->data + buf_offset + i3*nb3 + i2*nb2, dstnb2, &subctx->in_memcpys);
  5097. } else {
  5098. for (uint64_t i1 = 0; i1 < ne1; i1++) {
  5099. if (ne0*nb0/bs == dstnb1) {
  5100. deferred_memcpy((uint8_t *)staging->ptr + i3*dstnb3 + i2*dstnb2 + i1*dstnb1, (const uint8_t *) tensor->data + buf_offset + i3*nb3 + i2*nb2 + i1*nb1, dstnb1, &subctx->in_memcpys);
  5101. } else {
  5102. const uint64_t s_off = buf_offset + i3*nb3 + i2*nb2 + i1*nb1;
  5103. const uint64_t d_off = i3*dstnb3 + i2*dstnb2 + i1*dstnb1;
  5104. for (uint64_t i0 = 0; i0 < ne0; i0++) {
  5105. deferred_memcpy((uint8_t *)staging->ptr + d_off + i0*dstnb0, (const uint8_t *) tensor->data + s_off + i0*nb0, dstnb0, &subctx->in_memcpys);
  5106. }
  5107. }
  5108. }
  5109. }
  5110. }
  5111. }
  5112. }
  5113. static void ggml_vk_buffer_write_2d_async(vk_context subctx, vk_buffer& dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height, bool sync_staging = false) {
  5114. VK_LOG_DEBUG("ggml_vk_buffer_write_2d_async(" << width << ", " << height << ")");
  5115. // Buffer is already mapped
  5116. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  5117. std::cerr << "ggml_vulkan: buffer_write_async dst buffer is host_visible. Use synchronous write." << std::endl;
  5118. GGML_ABORT("fatal error");
  5119. }
  5120. // Check if src is pinned memory
  5121. vk_buffer buf = nullptr;
  5122. size_t buf_offset = 0;
  5123. ggml_vk_host_get(dst->device, src, buf, buf_offset);
  5124. if (buf != nullptr) {
  5125. // Memory is pinned, use as staging buffer
  5126. std::vector<vk::BufferCopy> slices(1);
  5127. if (width == spitch) {
  5128. // Only do single write if stride is equal
  5129. slices[0].srcOffset = buf_offset;
  5130. slices[0].dstOffset = offset;
  5131. slices[0].size = width * height;
  5132. } else {
  5133. slices.resize(height);
  5134. for (size_t i = 0; i < height; i++) {
  5135. slices[i].srcOffset = buf_offset + i * spitch;
  5136. slices[i].dstOffset = offset + i * width;
  5137. slices[i].size = width;
  5138. }
  5139. }
  5140. ggml_vk_sync_buffers(nullptr, subctx);
  5141. subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices);
  5142. return;
  5143. }
  5144. VK_LOG_DEBUG("STAGING");
  5145. if (!sync_staging) {
  5146. GGML_ABORT("Asynchronous write to non-pinned memory not supported");
  5147. }
  5148. // Staging buffer required
  5149. const size_t copy_size = width*height;
  5150. ggml_vk_ensure_sync_staging_buffer(dst->device, copy_size);
  5151. vk_buffer& staging_buffer = dst->device->sync_staging;
  5152. VkBufferCopy buf_copy = {
  5153. 0,
  5154. offset,
  5155. copy_size};
  5156. ggml_vk_sync_buffers(nullptr, subctx);
  5157. vkCmdCopyBuffer(subctx->s->buffer, (VkBuffer)staging_buffer->buffer, (VkBuffer)dst->buffer, 1, &buf_copy);
  5158. if (width == spitch) {
  5159. deferred_memcpy((uint8_t *)staging_buffer->ptr, src, width * height, &subctx->in_memcpys);
  5160. } else {
  5161. for (size_t i = 0; i < height; i++) {
  5162. deferred_memcpy((uint8_t *)staging_buffer->ptr + i * width, (const uint8_t *) src + i * spitch, width, &subctx->in_memcpys);
  5163. }
  5164. }
  5165. }
  5166. static void ggml_vk_buffer_write_async(vk_context subctx, vk_buffer& dst, size_t offset, const void * src, size_t size, bool sync_staging = false) {
  5167. VK_LOG_DEBUG("ggml_vk_buffer_write_async(" << size << ")");
  5168. return ggml_vk_buffer_write_2d_async(subctx, dst, offset, src, size, size, 1, sync_staging);
  5169. }
  5170. static void ggml_vk_buffer_write_2d(vk_buffer& dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height) {
  5171. VK_LOG_DEBUG("ggml_vk_buffer_write_2d(" << width << ", " << height << ")");
  5172. // Buffer is already mapped
  5173. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  5174. GGML_ASSERT(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostCoherent);
  5175. for (size_t i = 0; i < height; i++) {
  5176. memcpy((uint8_t *)dst->ptr + offset + i * width, (const uint8_t *) src + i * spitch, width);
  5177. }
  5178. } else {
  5179. std::lock_guard<std::recursive_mutex> guard(dst->device->mutex);
  5180. vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue.cmd_pool);
  5181. ggml_vk_ctx_begin(dst->device, subctx);
  5182. ggml_vk_buffer_write_2d_async(subctx, dst, offset, src, spitch, width, height, true);
  5183. ggml_vk_ctx_end(subctx);
  5184. for (auto& cpy : subctx->in_memcpys) {
  5185. memcpy(cpy.dst, cpy.src, cpy.n);
  5186. }
  5187. for (auto& mset : subctx->memsets) {
  5188. memset(mset.dst, mset.val, mset.n);
  5189. }
  5190. ggml_vk_submit(subctx, dst->device->fence);
  5191. VK_CHECK(dst->device->device.waitForFences({ dst->device->fence }, true, UINT64_MAX), "vk_buffer_write_2d waitForFences");
  5192. dst->device->device.resetFences({ dst->device->fence });
  5193. ggml_vk_queue_command_pools_cleanup(dst->device);
  5194. }
  5195. }
  5196. static void ggml_vk_buffer_write(vk_buffer& dst, size_t offset, const void * src, size_t size) {
  5197. VK_LOG_DEBUG("ggml_vk_buffer_write(" << size << ")");
  5198. ggml_vk_buffer_write_2d(dst, offset, src, 0, size, 1);
  5199. }
  5200. static bool ggml_vk_buffer_read_2d_async(vk_context subctx, vk_buffer& src, size_t offset, void * dst, size_t spitch, size_t dpitch, size_t width, size_t height, bool sync_staging = false) {
  5201. VK_LOG_DEBUG("ggml_vk_buffer_read_2d_async(offset=" << offset << ", width=" << width << ", height=" << height << ")");
  5202. GGML_ASSERT(width > 0);
  5203. GGML_ASSERT(height > 0);
  5204. GGML_ASSERT(src != nullptr);
  5205. // TODO: staging_offset is not used
  5206. // Check if dst is pinned memory
  5207. vk_buffer buf = nullptr;
  5208. size_t buf_offset = 0;
  5209. ggml_vk_host_get(src->device, dst, buf, buf_offset);
  5210. std::vector<vk::BufferCopy> slices(1);
  5211. if (width == spitch && width == dpitch) {
  5212. // Only do single write if stride is equal
  5213. slices[0].srcOffset = offset;
  5214. slices[0].dstOffset = buf_offset;
  5215. slices[0].size = width * height;
  5216. } else {
  5217. slices.resize(height);
  5218. for (size_t i = 0; i < height; i++) {
  5219. slices[i].srcOffset = offset + i * spitch;
  5220. slices[i].dstOffset = buf_offset + i * dpitch;
  5221. slices[i].size = width;
  5222. }
  5223. }
  5224. if (buf != nullptr) {
  5225. // Memory is pinned, use as staging buffer
  5226. ggml_vk_sync_buffers(nullptr, subctx);
  5227. subctx->s->buffer.copyBuffer(src->buffer, buf->buffer, slices);
  5228. return true;
  5229. }
  5230. VK_LOG_DEBUG("STAGING");
  5231. if (!sync_staging) {
  5232. // copy was not handled caller needs to fall back
  5233. return false;
  5234. }
  5235. // Fall back to staging buffer
  5236. const size_t copy_size = dpitch * height;
  5237. ggml_vk_ensure_sync_staging_buffer(src->device, copy_size);
  5238. vk_buffer& staging_buffer = src->device->sync_staging;
  5239. ggml_vk_sync_buffers(nullptr, subctx);
  5240. subctx->s->buffer.copyBuffer(src->buffer, staging_buffer->buffer, slices);
  5241. deferred_memcpy(dst, staging_buffer->ptr, copy_size, &subctx->out_memcpys);
  5242. return true;
  5243. }
  5244. static bool ggml_vk_buffer_read_async(vk_context subctx, vk_buffer& src, size_t offset, void * dst, size_t size, bool sync_staging = false) {
  5245. return ggml_vk_buffer_read_2d_async(subctx, src, offset, dst, size, size, size, 1, sync_staging);
  5246. }
  5247. static void ggml_vk_buffer_read(vk_buffer& src, size_t offset, void * dst, size_t size) {
  5248. VK_LOG_DEBUG("ggml_vk_buffer_read(" << src->buffer << ", " << offset << ", " << size << ")");
  5249. // If the device is not an UMA device the memory is host-accessible through rebar. While writing
  5250. // through PCIe is sufficient fast reading back data from PCIe is slower than going through
  5251. // the HW device to host copy path.
  5252. if(src->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible && src->device->uma) {
  5253. GGML_ASSERT(src->memory_property_flags & vk::MemoryPropertyFlagBits::eHostCoherent);
  5254. memcpy(dst, (uint8_t *) src->ptr + offset, size);
  5255. } else {
  5256. std::lock_guard<std::recursive_mutex> guard(src->device->mutex);
  5257. vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue.cmd_pool);
  5258. ggml_vk_ctx_begin(src->device, subctx);
  5259. bool ret = ggml_vk_buffer_read_async(subctx, src, offset, dst, size, true);
  5260. GGML_ASSERT(ret);
  5261. ggml_vk_ctx_end(subctx);
  5262. ggml_vk_submit(subctx, src->device->fence);
  5263. VK_CHECK(src->device->device.waitForFences({ src->device->fence }, true, UINT64_MAX), "vk_buffer_read waitForFences");
  5264. src->device->device.resetFences({ src->device->fence });
  5265. ggml_vk_queue_command_pools_cleanup(src->device);
  5266. for (auto& cpy : subctx->out_memcpys) {
  5267. memcpy(cpy.dst, cpy.src, cpy.n);
  5268. }
  5269. }
  5270. }
  5271. static void ggml_vk_buffer_copy_async(vk_context& ctx, vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) {
  5272. VK_LOG_DEBUG("ggml_vk_buffer_copy_async(" << size << ")");
  5273. // Make sure both buffers are on same device
  5274. GGML_ASSERT(src->device == dst->device);
  5275. VkBufferCopy bc{ src_offset, dst_offset, size };
  5276. vkCmdCopyBuffer(ctx->s->buffer, (VkBuffer)src->buffer, (VkBuffer)dst->buffer, 1, &bc);
  5277. }
  5278. static void ggml_vk_buffer_copy(vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) {
  5279. if (src->device == dst->device) {
  5280. std::lock_guard<std::recursive_mutex> guard(src->device->mutex);
  5281. VK_LOG_DEBUG("ggml_vk_buffer_copy(SINGLE_DEVICE, " << size << ")");
  5282. // Copy within the device
  5283. vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue.cmd_pool);
  5284. ggml_vk_ctx_begin(src->device, subctx);
  5285. ggml_vk_buffer_copy_async(subctx, dst, dst_offset, src, src_offset, size);
  5286. ggml_vk_ctx_end(subctx);
  5287. ggml_vk_submit(subctx, src->device->fence);
  5288. VK_CHECK(src->device->device.waitForFences({ src->device->fence }, true, UINT64_MAX), "vk_buffer_copy waitForFences");
  5289. src->device->device.resetFences({ src->device->fence });
  5290. ggml_vk_queue_command_pools_cleanup(src->device);
  5291. } else {
  5292. VK_LOG_DEBUG("ggml_vk_buffer_copy(MULTI_DEVICE, " << size << ")");
  5293. // Copy device to device
  5294. ggml_vk_ensure_sync_staging_buffer(src->device, size);
  5295. // Copy to src staging buffer
  5296. ggml_vk_buffer_copy(src->device->sync_staging, 0, src, src_offset, size);
  5297. // Copy to dst buffer
  5298. ggml_vk_buffer_write_2d(dst, dst_offset, src->device->sync_staging->ptr, 0, size, 1);
  5299. }
  5300. }
  5301. static void ggml_vk_buffer_memset_async(vk_context& ctx, vk_buffer& dst, size_t offset, uint32_t c, size_t size) {
  5302. VK_LOG_DEBUG("ggml_vk_buffer_memset_async(" << offset << ", " << c << ", " << size << ")");
  5303. if (dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible &&
  5304. dst->device->uma) {
  5305. deferred_memset((uint8_t*)dst->ptr + offset, c, size, &ctx->memsets);
  5306. return;
  5307. }
  5308. // Fall back to GPU fillBuffer for non-UMA or non-host-visible buffers
  5309. ctx->s->buffer.fillBuffer(dst->buffer, offset, size, c);
  5310. }
  5311. static void ggml_vk_buffer_memset(vk_buffer& dst, size_t offset, uint32_t c, size_t size) {
  5312. VK_LOG_DEBUG("ggml_vk_buffer_memset(" << offset << ", " << c << ", " << size << ")");
  5313. if (dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible &&
  5314. dst->device->uma) {
  5315. memset((uint8_t*)dst->ptr + offset, c, size);
  5316. return;
  5317. }
  5318. std::lock_guard<std::recursive_mutex> guard(dst->device->mutex);
  5319. vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue.cmd_pool);
  5320. ggml_vk_ctx_begin(dst->device, subctx);
  5321. subctx->s->buffer.fillBuffer(dst->buffer, offset, size, c);
  5322. ggml_vk_ctx_end(subctx);
  5323. ggml_vk_submit(subctx, dst->device->fence);
  5324. VK_CHECK(dst->device->device.waitForFences({ dst->device->fence }, true, UINT64_MAX), "vk_memset waitForFences");
  5325. dst->device->device.resetFences({ dst->device->fence });
  5326. ggml_vk_queue_command_pools_cleanup(dst->device);
  5327. }
  5328. static uint32_t ggml_vk_guess_split_k(ggml_backend_vk_context * ctx, uint32_t m, uint32_t n, uint32_t k, bool disable_split_k, const vk_pipeline& pipeline) {
  5329. VK_LOG_DEBUG("ggml_vk_guess_split_k(" << m << ", " << n << ", " << k << ", " << disable_split_k << ")");
  5330. if (disable_split_k) {
  5331. return 1;
  5332. }
  5333. uint32_t split_k = 1;
  5334. if (ctx->device->shader_core_count != 0 && m >= pipeline->wg_denoms[0] && n >= pipeline->wg_denoms[1]) {
  5335. // If k is 'large' and the SMs will fill less than halfway, use split_k.
  5336. uint32_t m_tiles = CEIL_DIV(m, pipeline->wg_denoms[0]);
  5337. uint32_t n_tiles = CEIL_DIV(n, pipeline->wg_denoms[1]);
  5338. if (k >= 2048) {
  5339. if (m_tiles * n_tiles <= ctx->device->shader_core_count / 2) {
  5340. split_k = ctx->device->shader_core_count / (m_tiles * n_tiles);
  5341. } else if (m_tiles * n_tiles <= ctx->device->shader_core_count * 2 / 3) {
  5342. split_k = 3;
  5343. }
  5344. // Cap the split at 8x. Unless k is huge this is a lot of overhead.
  5345. split_k = std::min(split_k, 8u);
  5346. // ggml_vk_matmul will align the splits to be a multiple of 256.
  5347. // If this rounded up size would cause the last split to be empty,
  5348. // then reduce the split count.
  5349. while (true) {
  5350. if (split_k == 1) {
  5351. break;
  5352. }
  5353. uint32_t k_split = CEIL_DIV(k, split_k);
  5354. k_split = ROUNDUP_POW2(k_split, 256);
  5355. if (k_split * (split_k - 1) < k) {
  5356. break;
  5357. }
  5358. split_k--;
  5359. }
  5360. }
  5361. }
  5362. return split_k;
  5363. }
  5364. static vk_pipeline ggml_vk_guess_matmul_pipeline(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, uint32_t m, uint32_t n, bool aligned, ggml_type src0_type, ggml_type src1_type) {
  5365. VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline(" << m << ", " << n << ", " << aligned << ", " << ggml_type_name(src0_type) << ", " << ggml_type_name(src1_type) << ")");
  5366. if (ctx->device->coopmat2) {
  5367. const uint32_t shader_core_count = ctx->device->shader_core_count;
  5368. const uint32_t tiles_l = CEIL_DIV(m, mmp->a_l->wg_denoms[0]) * CEIL_DIV(n, mmp->a_l->wg_denoms[1]);
  5369. const uint32_t tiles_m = CEIL_DIV(m, mmp->a_m->wg_denoms[0]) * CEIL_DIV(n, mmp->a_m->wg_denoms[1]);
  5370. // Use large shader when the N dimension is greater than the medium shader's tile size
  5371. uint32_t crossover_large = mmp->m->wg_denoms[1];
  5372. // Prefer large over medium if either:
  5373. // - medium or large tiles would overfill the GPU
  5374. // - large tiles with a split_k==3 fits in the GPU and medium tiles with split_k==2 does not
  5375. // (medium with split_k==2 is probably better if it fits - more workgroups running and less split_k overhead)
  5376. bool prefer_large = tiles_m > shader_core_count || tiles_l > shader_core_count ||
  5377. // split_k==3 with large tiles likely better than medium tiles with no split_k.
  5378. (tiles_l <= shader_core_count / 3 && tiles_m > shader_core_count / 2);
  5379. if ((ctx->device->mul_mat_l[src0_type] && (n > crossover_large && prefer_large)) || (!ctx->device->mul_mat_m[src0_type] && !ctx->device->mul_mat_s[src0_type])) {
  5380. return aligned ? mmp->a_l : mmp->l;
  5381. }
  5382. // Use medium shader when the N dimension is greater than the small shader's tile size
  5383. uint32_t crossover_medium = mmp->s->wg_denoms[1];
  5384. if ((ctx->device->mul_mat_m[src0_type] && (n > crossover_medium)) || !ctx->device->mul_mat_s[src0_type]) {
  5385. return aligned ? mmp->a_m : mmp->m;
  5386. }
  5387. return aligned ? mmp->a_s : mmp->s;
  5388. }
  5389. if ((ctx->device->mul_mat_s[src0_type] && (m <= 32 || n <= 32)) || (!ctx->device->mul_mat_m[src0_type] && !ctx->device->mul_mat_l[src0_type])) {
  5390. return aligned ? mmp->a_s : mmp->s;
  5391. }
  5392. if ((ctx->device->mul_mat_m[src0_type] && (m <= 64 || n <= 64)) || !ctx->device->mul_mat_l[src0_type]) {
  5393. return aligned ? mmp->a_m : mmp->m;
  5394. }
  5395. return aligned ? mmp->a_l : mmp->l;
  5396. GGML_UNUSED(src1_type);
  5397. }
  5398. static uint32_t ggml_vk_guess_matmul_pipeline_align(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, ggml_type src0_type, ggml_type src1_type) {
  5399. VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline_align(" << m << ", " << n << ", " << ggml_type_name(src0_type) << ", " << ggml_type_name(src1_type) << ")");
  5400. return ggml_vk_guess_matmul_pipeline(ctx, mmp, m, n, true, src0_type, src1_type)->align;
  5401. }
  5402. static void ggml_vk_matmul(
  5403. ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline& pipeline,
  5404. vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& split_k_buffer,
  5405. uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d,
  5406. uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d,
  5407. uint32_t split_k, uint32_t batch, uint32_t ne02, uint32_t ne12, uint32_t broadcast2, uint32_t broadcast3,
  5408. uint32_t padded_n) {
  5409. VK_LOG_DEBUG("ggml_vk_matmul(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), d: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), split_k: (" << (split_k_buffer.buffer != nullptr ? split_k_buffer.buffer->buffer : VK_NULL_HANDLE) << ", " << split_k_buffer.offset << ", " << split_k_buffer.size << "), m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ", split_k: " << split_k << ", batch: " << batch << ", ne02: " << ne02 << ", ne12: " << ne12 << ", broadcast2: " << broadcast2 << ", broadcast3: " << broadcast3 << ", padded_n: " << padded_n << ")");
  5410. if (split_k == 1) {
  5411. const vk_mat_mat_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, k, ne02, ne12, broadcast2, broadcast3, padded_n };
  5412. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d }, pc, { m, n, batch });
  5413. return;
  5414. }
  5415. if (ctx->prealloc_split_k_need_sync) {
  5416. ggml_vk_sync_buffers(ctx, subctx);
  5417. }
  5418. GGML_ASSERT(batch_stride_d == m * n);
  5419. // Round the split size up to a multiple of 256 (k-quant alignment)
  5420. uint32_t k_split = CEIL_DIV(k, split_k);
  5421. k_split = ROUNDUP_POW2(k_split, 256);
  5422. const vk_mat_mat_push_constants pc1 = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, k_split, ne02, ne12, broadcast2, broadcast3, padded_n };
  5423. // Make sure enough workgroups get assigned for split k to work
  5424. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, split_k_buffer }, pc1, { (CEIL_DIV(m, pipeline->wg_denoms[0]) * pipeline->wg_denoms[0]) * split_k, n, batch });
  5425. ggml_vk_sync_buffers(ctx, subctx);
  5426. const std::array<uint32_t, 2> pc2 = { (uint32_t)(m * n * batch), split_k };
  5427. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_matmul_split_k_reduce, { split_k_buffer, d }, pc2, { m * n * batch, 1, 1 });
  5428. ctx->prealloc_split_k_need_sync = true;
  5429. }
  5430. static vk_pipeline ggml_vk_guess_matmul_id_pipeline(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, uint32_t m, uint32_t n, bool aligned, ggml_type src0_type) {
  5431. VK_LOG_DEBUG("ggml_vk_guess_matmul_id_pipeline(" << m << ", " << n << ", " << aligned << ", " << ggml_type_name(src0_type) << ")");
  5432. if (ctx->device->coopmat2) {
  5433. // Use large shader when the N dimension is greater than the medium shader's tile size
  5434. uint32_t crossover_large = mmp->m->wg_denoms[1];
  5435. if ((ctx->device->mul_mat_id_l[src0_type] && (n > crossover_large)) || (!ctx->device->mul_mat_id_m[src0_type] && !ctx->device->mul_mat_id_s[src0_type])) {
  5436. return aligned ? mmp->a_l : mmp->l;
  5437. }
  5438. // Use medium shader when the N dimension is greater than the small shader's tile size
  5439. uint32_t crossover_medium = mmp->s->wg_denoms[1];
  5440. if ((ctx->device->mul_mat_id_m[src0_type] && (n > crossover_medium)) || !ctx->device->mul_mat_id_s[src0_type]) {
  5441. return aligned ? mmp->a_m : mmp->m;
  5442. }
  5443. return aligned ? mmp->a_s : mmp->s;
  5444. }
  5445. if ((ctx->device->mul_mat_id_s[src0_type] && (m <= 32 || n <= 32)) || (!ctx->device->mul_mat_id_m[src0_type] && !ctx->device->mul_mat_id_l[src0_type])) {
  5446. return aligned ? mmp->a_s : mmp->s;
  5447. }
  5448. if ((ctx->device->mul_mat_id_m[src0_type] && (m <= 64 || n <= 64)) || !ctx->device->mul_mat_id_l[src0_type]) {
  5449. return aligned ? mmp->a_m : mmp->m;
  5450. }
  5451. return aligned ? mmp->a_l : mmp->l;
  5452. }
  5453. static uint32_t ggml_vk_guess_matmul_id_pipeline_align(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, ggml_type src0_type) {
  5454. VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline_align(" << m << ", " << n << ", " << ggml_type_name(src0_type) << ")");
  5455. return ggml_vk_guess_matmul_id_pipeline(ctx, mmp, m, n, true, src0_type)->align;
  5456. }
  5457. static void ggml_vk_matmul_id(
  5458. ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline& pipeline,
  5459. vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& ids,
  5460. uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d,
  5461. uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d,
  5462. uint32_t n_as, uint32_t nei0, uint32_t nei1, uint32_t nbi1, uint32_t ne11,
  5463. uint32_t padded_n) {
  5464. VK_LOG_DEBUG("ggml_vk_matmul_id(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), d: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), ids: (" << ids.buffer->buffer << ", " << ids.offset << ", " << ids.size << "), " <<
  5465. "m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", " <<
  5466. "batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ", " <<
  5467. "n_as: " << n_as << ", nei0: " << nei0 << ", nei1: " << nei1 << ", nbi1: " << nbi1 << ", ne11: " << ne11 << ")");
  5468. const vk_mat_mat_id_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d,
  5469. nei0, nei1, nbi1, ne11, padded_n };
  5470. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d, ids }, pc, { m, nei1, n_as });
  5471. }
  5472. static bool ggml_vk_dim01_contiguous(const ggml_tensor * tensor) {
  5473. return
  5474. tensor->nb[0] == ggml_type_size(tensor->type) &&
  5475. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  5476. (tensor->ne[3] == 1 || tensor->nb[3] == tensor->nb[2]*tensor->ne[2]);
  5477. }
  5478. static vk_pipeline ggml_vk_get_cpy_pipeline(ggml_backend_vk_context * ctx, const ggml_tensor * src, const ggml_tensor * dst, ggml_type to) {
  5479. // Choose "contiguous copy" shader if src/dst are contiguous
  5480. bool contig = ggml_is_contiguous(src) && (!dst || ggml_is_contiguous(dst));
  5481. // Use optimized "transpose" shader if src dim1 is the innermost dimension.
  5482. bool transpose = dst && src->nb[1] == ggml_type_size(to) && ggml_are_same_shape(dst, src);
  5483. if (transpose && src->type == to) {
  5484. if (ggml_type_size(to) == 4) {
  5485. return ctx->device->pipeline_cpy_transpose_32;
  5486. } else if (ggml_type_size(to) == 2) {
  5487. return ctx->device->pipeline_cpy_transpose_16;
  5488. }
  5489. }
  5490. if (src->type == GGML_TYPE_F32 && to == GGML_TYPE_F32) {
  5491. if (contig) {
  5492. return ctx->device->pipeline_contig_cpy_f32_f32;
  5493. } else {
  5494. return ctx->device->pipeline_cpy_f32_f32;
  5495. }
  5496. }
  5497. if (src->type == GGML_TYPE_F32 && to == GGML_TYPE_F16) {
  5498. if (contig) {
  5499. return ctx->device->pipeline_contig_cpy_f32_f16;
  5500. } else {
  5501. return ctx->device->pipeline_cpy_f32_f16;
  5502. }
  5503. }
  5504. if (src->type == GGML_TYPE_F16 && to == GGML_TYPE_F16) {
  5505. if (contig) {
  5506. return ctx->device->pipeline_contig_cpy_f16_f16;
  5507. } else {
  5508. return ctx->device->pipeline_cpy_f16_f16;
  5509. }
  5510. }
  5511. if (src->type == GGML_TYPE_F16 && to == GGML_TYPE_F32) {
  5512. if (contig) {
  5513. return ctx->device->pipeline_contig_cpy_f16_f32;
  5514. } else {
  5515. return ctx->device->pipeline_cpy_f16_f32;
  5516. }
  5517. }
  5518. if (src->type == GGML_TYPE_F32 && to == GGML_TYPE_BF16) {
  5519. if (contig) {
  5520. return ctx->device->pipeline_contig_cpy_f32_bf16;
  5521. } else {
  5522. return ctx->device->pipeline_cpy_f32_bf16;
  5523. }
  5524. }
  5525. if (src->type == GGML_TYPE_F32 && to == GGML_TYPE_I32) {
  5526. if (contig) {
  5527. return ctx->device->pipeline_contig_cpy_f32_i32;
  5528. } else {
  5529. return ctx->device->pipeline_cpy_f32_i32;
  5530. }
  5531. }
  5532. if (src->type == GGML_TYPE_I32 && to == GGML_TYPE_F32) {
  5533. if (contig) {
  5534. return ctx->device->pipeline_contig_cpy_i32_f32;
  5535. } else {
  5536. return ctx->device->pipeline_cpy_i32_f32;
  5537. }
  5538. }
  5539. if (src->type == GGML_TYPE_F32) {
  5540. switch (to) {
  5541. case GGML_TYPE_Q4_0:
  5542. case GGML_TYPE_Q4_1:
  5543. case GGML_TYPE_Q5_0:
  5544. case GGML_TYPE_Q5_1:
  5545. case GGML_TYPE_Q8_0:
  5546. case GGML_TYPE_IQ4_NL:
  5547. return ctx->device->pipeline_cpy_f32_quant[to];
  5548. default:
  5549. break;
  5550. }
  5551. }
  5552. if (to == GGML_TYPE_F32) {
  5553. switch (src->type) {
  5554. case GGML_TYPE_Q4_0:
  5555. case GGML_TYPE_Q4_1:
  5556. case GGML_TYPE_Q5_0:
  5557. case GGML_TYPE_Q5_1:
  5558. case GGML_TYPE_Q8_0:
  5559. case GGML_TYPE_IQ4_NL:
  5560. return ctx->device->pipeline_cpy_quant_f32[src->type];
  5561. default:
  5562. break;
  5563. }
  5564. }
  5565. if (src->type == to) {
  5566. // Copy two or four bytes at a time, depending on block size.
  5567. // For quantized types, we scale by block size/type size. But
  5568. // this path is also used for bf16->bf16 for example, where the
  5569. // type size must be exactly 2 or 4.
  5570. GGML_ASSERT(ggml_is_quantized(to) || ggml_type_size(src->type) == 2 || ggml_type_size(src->type) == 4);
  5571. if ((ggml_type_size(src->type) % 4) == 0) {
  5572. if (contig) {
  5573. return ctx->device->pipeline_contig_cpy_f32_f32;
  5574. } else {
  5575. return ctx->device->pipeline_cpy_f32_f32;
  5576. }
  5577. } else {
  5578. if (contig) {
  5579. return ctx->device->pipeline_contig_cpy_f16_f16;
  5580. } else {
  5581. return ctx->device->pipeline_cpy_f16_f16;
  5582. }
  5583. }
  5584. }
  5585. std::cerr << "Missing CPY op for types: " << ggml_type_name(src->type) << " " << ggml_type_name(to) << std::endl;
  5586. GGML_ABORT("fatal error");
  5587. }
  5588. static void ggml_vk_cpy_to_contiguous(ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline pipeline, const ggml_tensor * tensor, const vk_subbuffer & in, const vk_subbuffer & out) {
  5589. VK_LOG_DEBUG("ggml_vk_cpy_to_contiguous((" << tensor << ", type=" << tensor->type << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << "), ";
  5590. std::cerr << "buffer in size=" << in.buffer->size << ", buffer out size=" << out.buffer->size << ")");
  5591. const int tensor_type_size = ggml_type_size(tensor->type);
  5592. const uint32_t ne = ggml_nelements(tensor);
  5593. std::array<uint32_t, 3> elements;
  5594. if (ne > 262144) {
  5595. elements = { 512, 512, CEIL_DIV(ne, 262144) };
  5596. } else if (ne > 512) {
  5597. elements = { 512, CEIL_DIV(ne, 512), 1 };
  5598. } else {
  5599. elements = { ne, 1, 1 };
  5600. }
  5601. vk_op_unary_push_constants pc = {
  5602. (uint32_t)ne,
  5603. (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->ne[2], (uint32_t)tensor->ne[3], (uint32_t)tensor->nb[0] / tensor_type_size, (uint32_t)tensor->nb[1] / tensor_type_size, (uint32_t)tensor->nb[2] / tensor_type_size, (uint32_t)tensor->nb[3] / tensor_type_size,
  5604. (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->ne[2], (uint32_t)tensor->ne[3], 1 , (uint32_t)tensor->ne[0] , (uint32_t)(tensor->ne[0] * tensor->ne[1]) , (uint32_t)(tensor->ne[0] * tensor->ne[1] * tensor->ne[2]),
  5605. 0,
  5606. 0.0f, 0.0f,
  5607. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  5608. };
  5609. init_pushconst_fastdiv(pc);
  5610. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { in, out }, pc, elements);
  5611. ggml_vk_sync_buffers(ctx, subctx);
  5612. }
  5613. static vk_pipeline ggml_vk_get_quantize_pipeline(ggml_backend_vk_context * ctx, ggml_type type) {
  5614. switch(type) {
  5615. case GGML_TYPE_Q8_1:
  5616. return ctx->device->pipeline_quantize_q8_1_x4;
  5617. default:
  5618. std::cerr << "Missing quantize pipeline for type: " << ggml_type_name(type) << std::endl;
  5619. GGML_ABORT("fatal error");
  5620. }
  5621. }
  5622. static void ggml_vk_quantize_q8_1(ggml_backend_vk_context * ctx, vk_context& subctx, const vk_subbuffer & in, const vk_subbuffer & out, uint32_t ne) {
  5623. VK_LOG_DEBUG("ggml_vk_quantize_q8_1(" << "buffer in size=" << in.buffer->size << ", buffer out size=" << out.buffer->size << ", " << ne << ")");
  5624. vk_pipeline pipeline = ggml_vk_get_quantize_pipeline(ctx, GGML_TYPE_Q8_1);
  5625. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { in, out }, std::array<uint32_t, 1>{ne}, { ne, 1, 1 });
  5626. ggml_vk_sync_buffers(ctx, subctx);
  5627. }
  5628. static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool disable_split_k) {
  5629. VK_LOG_DEBUG("ggml_vk_mul_mat_q_f16((" << src0 << ", name=" << src0->name << ", type=" << ggml_type_name(src0->type) << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  5630. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << ggml_type_name(src1->type) << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  5631. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << ggml_type_name(dst->type) << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  5632. std::cerr << "))");
  5633. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16); // NOLINT
  5634. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  5635. const uint64_t ne00 = src0->ne[0];
  5636. const uint64_t ne01 = src0->ne[1];
  5637. const uint64_t ne02 = src0->ne[2];
  5638. const uint64_t ne03 = src0->ne[3];
  5639. const uint64_t ne10 = src1->ne[0];
  5640. const uint64_t ne11 = src1->ne[1];
  5641. const uint64_t ne12 = src1->ne[2];
  5642. const uint64_t ne13 = src1->ne[3];
  5643. const uint64_t ne21 = dst->ne[1];
  5644. const uint32_t stride_d = dst->nb[1] / ggml_type_size(dst->type);
  5645. const uint32_t stride_batch_d = stride_d*ne21;
  5646. const uint64_t r2 = ne12 / ne02;
  5647. const uint64_t r3 = ne13 / ne03;
  5648. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  5649. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  5650. ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  5651. vk_buffer d_Qx = nullptr;
  5652. size_t qx_buf_offset = 0;
  5653. vk_buffer d_Qy = nullptr;
  5654. size_t qy_buf_offset = 0;
  5655. bool src0_uma = false;
  5656. bool src1_uma = false;
  5657. if (ctx->device->uma) {
  5658. ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
  5659. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  5660. src0_uma = d_Qx != nullptr;
  5661. src1_uma = d_Qy != nullptr;
  5662. }
  5663. // Reformat and convert to fp16 if non-contiguous, or for coopmat2 for better perf
  5664. const bool x_non_contig = (ctx->device->coopmat2 && src0->type == GGML_TYPE_F32) ||
  5665. !ggml_vk_dim01_contiguous(src0);
  5666. const bool y_non_contig = (ctx->device->coopmat2 && src1->type == GGML_TYPE_F32) ||
  5667. (src0->type == GGML_TYPE_BF16 && src1->type != GGML_TYPE_BF16) ||
  5668. !ggml_vk_dim01_contiguous(src1);
  5669. // If src0 is BF16, try to use a BF16 x BF16 multiply
  5670. ggml_type f16_type = src0->type == GGML_TYPE_BF16 ? GGML_TYPE_BF16 : GGML_TYPE_F16;
  5671. const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig;
  5672. bool quantize_y = ctx->device->integer_dot_product && src1->type == GGML_TYPE_F32 && ggml_is_contiguous(src1) && !y_non_contig && (ne11 * ne10) % 4 == 0;
  5673. // Check for mmq first
  5674. vk_matmul_pipeline mmp = quantize_y ? ggml_vk_get_mul_mat_mat_pipeline(ctx, src0->type, GGML_TYPE_Q8_1, (ggml_prec)dst->op_params[0]) : nullptr;
  5675. if (mmp == nullptr) {
  5676. // Fall back to f16 dequant mul mat
  5677. mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, src0->type, y_non_contig ? f16_type : src1->type, (ggml_prec)dst->op_params[0]);
  5678. quantize_y = false;
  5679. }
  5680. const bool qx_needs_dequant = mmp == nullptr || x_non_contig;
  5681. const bool qy_needs_dequant = !quantize_y && ((src1->type != f16_type && !y_f32_kernel) || y_non_contig);
  5682. if (qx_needs_dequant) {
  5683. // Fall back to dequant + f16 mulmat
  5684. mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, f16_type, y_f32_kernel ? GGML_TYPE_F32 : f16_type, (ggml_prec)dst->op_params[0]);
  5685. }
  5686. // Not implemented
  5687. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  5688. const uint32_t kpad = quantize_y ? 0 : ggml_vk_align_size(ne10, ggml_vk_guess_matmul_pipeline_align(ctx, mmp, ne01, ne11, qx_needs_dequant ? f16_type : src0->type, quantize_y ? GGML_TYPE_Q8_1 : (y_f32_kernel ? GGML_TYPE_F32 : src1->type)));
  5689. const bool aligned = !quantize_y && ne10 == kpad && ne01 > 8 && ne11 > 8;
  5690. vk_pipeline pipeline = ggml_vk_guess_matmul_pipeline(ctx, mmp, ne01, ne11, aligned, qx_needs_dequant ? f16_type : src0->type, quantize_y ? GGML_TYPE_Q8_1 : (y_f32_kernel ? GGML_TYPE_F32 : src1->type));
  5691. // Reserve extra storage in the N dimension for the Y matrix, so we can avoid bounds-checking
  5692. uint32_t padded_n = qy_needs_dequant ? ROUNDUP_POW2(ne11, pipeline->wg_denoms[1]) : ne11;
  5693. const uint64_t x_ne = ggml_nelements(src0);
  5694. // 128 elements per Q8_1 x4 block
  5695. const uint64_t y_ne = padded_n * ne10 * ne12 * ne13;
  5696. const uint64_t d_ne = ggml_nelements(dst);
  5697. const uint32_t split_k = ggml_vk_guess_split_k(ctx, ne01, ne11, ne10, disable_split_k, pipeline);
  5698. const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type);
  5699. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  5700. const uint64_t x_sz = !qx_needs_dequant ? qx_sz : sizeof(ggml_fp16_t) * x_ne;
  5701. const uint64_t y_sz = quantize_y ? (ggml_vk_align_size(y_ne, 128) * ggml_type_size(GGML_TYPE_Q8_1) / ggml_blck_size(GGML_TYPE_Q8_1)) : (y_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne);
  5702. const uint64_t d_sz = sizeof(float) * d_ne;
  5703. vk_pipeline to_fp16_vk_0 = nullptr;
  5704. vk_pipeline to_fp16_vk_1 = nullptr;
  5705. vk_pipeline to_q8_1 = nullptr;
  5706. if (x_non_contig) {
  5707. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0, nullptr, f16_type);
  5708. } else {
  5709. to_fp16_vk_0 = ggml_vk_get_to_fp16(ctx, src0->type);
  5710. }
  5711. if (y_non_contig) {
  5712. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1, nullptr, f16_type);
  5713. } else {
  5714. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  5715. }
  5716. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  5717. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  5718. if (quantize_y) {
  5719. to_q8_1 = ggml_vk_get_quantize_pipeline(ctx, GGML_TYPE_Q8_1);
  5720. }
  5721. {
  5722. const uint64_t split_k_size = split_k > 1 ? d_sz * split_k : 0;
  5723. if (
  5724. (qx_needs_dequant && x_sz > ctx->device->properties.limits.maxStorageBufferRange) ||
  5725. (qy_needs_dequant && y_sz > ctx->device->properties.limits.maxStorageBufferRange) ||
  5726. (split_k > 1 && split_k_size > ctx->device->properties.limits.maxStorageBufferRange)) {
  5727. GGML_ABORT("Requested preallocation size is too large");
  5728. }
  5729. if (qx_needs_dequant && ctx->prealloc_size_x < x_sz) {
  5730. ctx->prealloc_size_x = x_sz;
  5731. ggml_vk_preallocate_buffers(ctx, subctx);
  5732. }
  5733. if ((qy_needs_dequant || quantize_y) && ctx->prealloc_size_y < y_sz) {
  5734. ctx->prealloc_size_y = y_sz;
  5735. ggml_vk_preallocate_buffers(ctx, subctx);
  5736. }
  5737. if (split_k > 1 && ctx->prealloc_size_split_k < split_k_size) {
  5738. ctx->prealloc_size_split_k = split_k_size;
  5739. ggml_vk_preallocate_buffers(ctx, subctx);
  5740. }
  5741. // Request descriptor sets
  5742. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  5743. if (qx_needs_dequant) {
  5744. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_0, 1);
  5745. }
  5746. if (qy_needs_dequant) {
  5747. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_1, 1);
  5748. }
  5749. if (quantize_y) {
  5750. ggml_pipeline_request_descriptor_sets(ctx, to_q8_1, 1);
  5751. }
  5752. if (split_k > 1) {
  5753. ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_matmul_split_k_reduce, 1);
  5754. }
  5755. }
  5756. vk_buffer d_D = dst_buf_ctx->dev_buffer;
  5757. const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  5758. GGML_ASSERT(d_D != nullptr);
  5759. GGML_ASSERT(d_D->size >= d_buf_offset + d_sz);
  5760. vk_buffer d_X;
  5761. uint64_t x_buf_offset = 0;
  5762. vk_buffer d_Y;
  5763. uint64_t y_buf_offset = 0;
  5764. if (!src0_uma) {
  5765. d_Qx = src0_buf_ctx->dev_buffer;
  5766. qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  5767. GGML_ASSERT(d_Qx != nullptr);
  5768. }
  5769. if (!src1_uma) {
  5770. d_Qy = src1_buf_ctx->dev_buffer;
  5771. qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  5772. GGML_ASSERT(d_Qy != nullptr);
  5773. }
  5774. if (qx_needs_dequant) {
  5775. d_X = ctx->prealloc_x;
  5776. GGML_ASSERT(d_X->size >= x_sz);
  5777. } else {
  5778. d_X = d_Qx;
  5779. x_buf_offset = qx_buf_offset;
  5780. GGML_ASSERT(qx_sz == x_sz);
  5781. }
  5782. if (qy_needs_dequant) {
  5783. d_Y = ctx->prealloc_y;
  5784. GGML_ASSERT(d_Y->size >= y_sz);
  5785. } else if (quantize_y) {
  5786. d_Y = ctx->prealloc_y;
  5787. GGML_ASSERT(d_Y->size >= CEIL_DIV(y_sz, 144) * 144);
  5788. } else {
  5789. d_Y = d_Qy;
  5790. y_buf_offset = qy_buf_offset;
  5791. GGML_ASSERT(qy_sz == y_sz);
  5792. }
  5793. if (x_non_contig || qx_needs_dequant) {
  5794. if (ctx->prealloc_x_need_sync) {
  5795. ggml_vk_sync_buffers(ctx, subctx);
  5796. }
  5797. }
  5798. if (x_non_contig) {
  5799. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, ggml_vk_subbuffer(ctx, d_Qx, qx_buf_offset), ggml_vk_subbuffer(ctx, d_X, 0));
  5800. } else if (qx_needs_dequant) {
  5801. const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) };
  5802. ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0, { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz }, vk_subbuffer{ d_X, 0, x_sz } }, pc, { (uint32_t)(x_ne), 1, 1});
  5803. ggml_vk_sync_buffers(ctx, subctx);
  5804. }
  5805. if (y_non_contig) {
  5806. if (ctx->prealloc_y_last_pipeline_used != to_fp16_vk_1.get() ||
  5807. ctx->prealloc_y_last_tensor_used != src1) {
  5808. if (ctx->prealloc_y_need_sync) {
  5809. ggml_vk_sync_buffers(ctx, subctx);
  5810. }
  5811. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, ggml_vk_subbuffer(ctx, d_Qy, qy_buf_offset), ggml_vk_subbuffer(ctx, d_Y, 0));
  5812. ctx->prealloc_y_last_pipeline_used = to_fp16_vk_1.get();
  5813. ctx->prealloc_y_last_tensor_used = src1;
  5814. }
  5815. }
  5816. if (quantize_y) {
  5817. if (ctx->prealloc_y_last_pipeline_used != to_q8_1.get() ||
  5818. ctx->prealloc_y_last_tensor_used != src1) {
  5819. if (ctx->prealloc_y_need_sync) {
  5820. ggml_vk_sync_buffers(ctx, subctx);
  5821. }
  5822. ggml_vk_quantize_q8_1(ctx, subctx, ggml_vk_subbuffer(ctx, d_Qy, qy_buf_offset), ggml_vk_subbuffer(ctx, d_Y, 0), y_ne);
  5823. ctx->prealloc_y_last_pipeline_used = to_q8_1.get();
  5824. ctx->prealloc_y_last_tensor_used = src1;
  5825. }
  5826. }
  5827. uint32_t stride_batch_x = ne00*ne01;
  5828. uint32_t stride_batch_y = ne10*ne11;
  5829. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  5830. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  5831. }
  5832. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant && !quantize_y) {
  5833. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  5834. }
  5835. // compute
  5836. ggml_vk_matmul(
  5837. ctx, subctx, pipeline,
  5838. { d_X, x_buf_offset, x_sz }, { d_Y, y_buf_offset, y_sz },
  5839. ggml_vk_subbuffer(ctx, d_D, d_buf_offset), { ctx->prealloc_split_k, 0, d_sz * split_k },
  5840. ne01, ne11, ne10,
  5841. ne10, ne10, stride_d, stride_batch_x, stride_batch_y, stride_batch_d,
  5842. split_k, ne12*ne13, ne02, ne12, r2, r3, padded_n
  5843. ); // NOLINT
  5844. if (x_non_contig || qx_needs_dequant) {
  5845. ctx->prealloc_x_need_sync = true;
  5846. }
  5847. if (y_non_contig || quantize_y) {
  5848. ctx->prealloc_y_need_sync = true;
  5849. }
  5850. }
  5851. // Device tuning
  5852. static bool ggml_vk_should_use_mmvq(const vk_device& device, uint32_t m, uint32_t n, uint32_t k, ggml_type src0_type) {
  5853. if (device->mmvq_mode == 1) {
  5854. return true;
  5855. } else if (device->mmvq_mode == -1) {
  5856. return false;
  5857. }
  5858. // MMVQ is generally good for batches
  5859. if (n > 1) {
  5860. return true;
  5861. }
  5862. switch (device->vendor_id) {
  5863. case VK_VENDOR_ID_NVIDIA:
  5864. switch (src0_type) {
  5865. case GGML_TYPE_Q8_0:
  5866. return device->architecture == vk_device_architecture::NVIDIA_PRE_TURING;
  5867. default:
  5868. return true;
  5869. }
  5870. case VK_VENDOR_ID_AMD:
  5871. switch (src0_type) {
  5872. case GGML_TYPE_Q8_0:
  5873. return device->architecture == vk_device_architecture::AMD_GCN;
  5874. default:
  5875. return true;
  5876. }
  5877. case VK_VENDOR_ID_INTEL:
  5878. switch (src0_type) {
  5879. // From tests on A770 Linux, may need more tuning
  5880. case GGML_TYPE_Q4_0:
  5881. case GGML_TYPE_Q5_1:
  5882. return false;
  5883. default:
  5884. return true;
  5885. }
  5886. default:
  5887. return true;
  5888. }
  5889. GGML_UNUSED(m);
  5890. GGML_UNUSED(k);
  5891. }
  5892. static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const struct ggml_cgraph * cgraph, int node_idx) {
  5893. ggml_tensor * dst = cgraph->nodes[node_idx];
  5894. const ggml_tensor * src0 = dst->src[0];
  5895. const ggml_tensor * src1 = dst->src[1];
  5896. VK_LOG_DEBUG("ggml_vk_mul_mat_vec_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  5897. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  5898. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  5899. std::cerr << ")),)");
  5900. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16); // NOLINT
  5901. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  5902. const uint64_t ne00 = src0->ne[0];
  5903. const uint64_t ne01 = src0->ne[1];
  5904. const uint64_t ne02 = src0->ne[2];
  5905. const uint64_t ne03 = src0->ne[3];
  5906. const uint64_t ne10 = src1->ne[0];
  5907. const uint64_t ne11 = src1->ne[1];
  5908. const uint64_t ne12 = src1->ne[2];
  5909. const uint64_t ne13 = src1->ne[3];
  5910. const uint64_t ne20 = dst->ne[0];
  5911. const uint64_t ne21 = dst->ne[1];
  5912. // const uint64_t ne22 = dst->ne[2];
  5913. // const uint64_t ne23 = dst->ne[3];
  5914. const uint64_t r2 = ne12 / ne02;
  5915. const uint64_t r3 = ne13 / ne03;
  5916. // batch_n indicates that we need to compute a few vector results, and this assumes
  5917. // ne12 and ne13 are 1. It overloads the batch_strides to hold the row strides.
  5918. GGML_ASSERT(ne11 == 1 || ne12 * ne13 == 1);
  5919. bool batch_n = ne11 > 1;
  5920. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  5921. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  5922. const bool f16_f32_kernel = src1->type == GGML_TYPE_F32;
  5923. bool quantize_y = ctx->device->integer_dot_product && src1->type == GGML_TYPE_F32 && ggml_is_contiguous(src1) && !y_non_contig && (ne11 * ne10) % 4 == 0 && ggml_vk_should_use_mmvq(ctx->device, ne01, ne11, ne10, src0->type);
  5924. vk_pipeline to_fp16_vk_0 = nullptr;
  5925. vk_pipeline to_fp16_vk_1 = nullptr;
  5926. if (x_non_contig) {
  5927. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0, nullptr, src0->type);
  5928. }
  5929. if (y_non_contig) {
  5930. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1, nullptr, src1->type);
  5931. } else {
  5932. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  5933. }
  5934. // Check for mmq first
  5935. vk_pipeline dmmv = quantize_y ? ggml_vk_get_dequantize_mul_mat_vec(ctx, src0->type, GGML_TYPE_Q8_1, ne11, ne20, ne00) : nullptr;
  5936. vk_pipeline to_q8_1 = nullptr;
  5937. if (dmmv == nullptr) {
  5938. // Fall back to f16 dequant mul mat
  5939. dmmv = ggml_vk_get_dequantize_mul_mat_vec(ctx, src0->type, src1->type, ne11, ne20, ne00);
  5940. quantize_y = false;
  5941. }
  5942. if (quantize_y) {
  5943. to_q8_1 = ggml_vk_get_quantize_pipeline(ctx, GGML_TYPE_Q8_1);
  5944. }
  5945. const bool qx_needs_dequant = x_non_contig;
  5946. const bool qy_needs_dequant = !quantize_y && ((src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig);
  5947. // Not implemented
  5948. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  5949. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  5950. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  5951. GGML_ASSERT(dmmv != nullptr);
  5952. const uint64_t x_ne = ggml_nelements(src0);
  5953. const uint64_t y_ne = ggml_nelements(src1);
  5954. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
  5955. const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) : qx_sz;
  5956. const uint64_t y_sz = quantize_y ? (ggml_vk_align_size(y_ne, 128) * ggml_type_size(GGML_TYPE_Q8_1) / ggml_blck_size(GGML_TYPE_Q8_1)) :
  5957. (f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne);
  5958. {
  5959. if (
  5960. (qx_needs_dequant && x_sz > ctx->device->properties.limits.maxStorageBufferRange) ||
  5961. (qy_needs_dequant && y_sz > ctx->device->properties.limits.maxStorageBufferRange)) {
  5962. GGML_ABORT("Requested preallocation size is too large");
  5963. }
  5964. if (qx_needs_dequant && ctx->prealloc_size_x < x_sz) {
  5965. ctx->prealloc_size_x = x_sz;
  5966. ggml_vk_preallocate_buffers(ctx, subctx);
  5967. }
  5968. if ((qy_needs_dequant || quantize_y) && ctx->prealloc_size_y < y_sz) {
  5969. ctx->prealloc_size_y = y_sz;
  5970. ggml_vk_preallocate_buffers(ctx, subctx);
  5971. }
  5972. // Request descriptor sets
  5973. if (qx_needs_dequant) {
  5974. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_0, 1);
  5975. }
  5976. if (qy_needs_dequant) {
  5977. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_1, 1);
  5978. }
  5979. if (quantize_y) {
  5980. ggml_pipeline_request_descriptor_sets(ctx, to_q8_1, 1);
  5981. }
  5982. ggml_pipeline_request_descriptor_sets(ctx, dmmv, 1);
  5983. }
  5984. vk_subbuffer d_D = ggml_vk_tensor_subbuffer(ctx, cgraph->nodes[node_idx + ctx->num_additional_fused_ops]);
  5985. vk_subbuffer d_Qx = ggml_vk_tensor_subbuffer(ctx, src0);
  5986. vk_subbuffer d_Qy = ggml_vk_tensor_subbuffer(ctx, src1);
  5987. vk_subbuffer d_X, d_Y;
  5988. if (qx_needs_dequant) {
  5989. d_X = { ctx->prealloc_x, 0, ctx->prealloc_x->size };
  5990. } else {
  5991. d_X = d_Qx;
  5992. GGML_ASSERT(qx_sz == x_sz);
  5993. }
  5994. if (qy_needs_dequant || quantize_y) {
  5995. d_Y = { ctx->prealloc_y, 0, ctx->prealloc_y->size };
  5996. } else {
  5997. d_Y = d_Qy;
  5998. }
  5999. if (x_non_contig) {
  6000. if (ctx->prealloc_x_need_sync) {
  6001. ggml_vk_sync_buffers(ctx, subctx);
  6002. }
  6003. GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment));
  6004. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, d_Qx, d_X);
  6005. }
  6006. if (y_non_contig) {
  6007. GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne);
  6008. if (ctx->prealloc_y_last_pipeline_used != to_fp16_vk_1.get() ||
  6009. ctx->prealloc_y_last_tensor_used != src1) {
  6010. if (ctx->prealloc_y_need_sync) {
  6011. ggml_vk_sync_buffers(ctx, subctx);
  6012. }
  6013. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, d_Qy, d_Y);
  6014. ctx->prealloc_y_last_pipeline_used = to_fp16_vk_1.get();
  6015. ctx->prealloc_y_last_tensor_used = src1;
  6016. }
  6017. }
  6018. if (quantize_y) {
  6019. if (ctx->prealloc_y_last_pipeline_used != to_q8_1.get() ||
  6020. ctx->prealloc_y_last_tensor_used != src1) {
  6021. if (ctx->prealloc_y_need_sync) {
  6022. ggml_vk_sync_buffers(ctx, subctx);
  6023. }
  6024. ggml_vk_quantize_q8_1(ctx, subctx, d_Qy, d_Y, y_ne);
  6025. ctx->prealloc_y_last_pipeline_used = to_q8_1.get();
  6026. ctx->prealloc_y_last_tensor_used = src1;
  6027. }
  6028. }
  6029. // For batch_n, the A matrix is the same for each batch, and B/D use the row stride as the batch stride
  6030. uint32_t stride_batch_x = batch_n ? 0 : ne00*ne01;
  6031. uint32_t stride_batch_y = batch_n ? ne10 : (ne10*ne11);
  6032. uint32_t stride_batch_d = batch_n ? ne20 : (ne20*ne21);
  6033. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  6034. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  6035. }
  6036. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  6037. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  6038. }
  6039. const uint32_t max_groups_x = ctx->device->properties.limits.maxComputeWorkGroupCount[0];
  6040. uint32_t groups_x = ne01;
  6041. uint32_t groups_z = 1;
  6042. if (ne01 > max_groups_x) {
  6043. groups_z = 64;
  6044. groups_x = CEIL_DIV(groups_x, groups_z);
  6045. }
  6046. uint32_t fusion_flags = 0;
  6047. vk_subbuffer d_F0 = d_D;
  6048. if (ctx->num_additional_fused_ops > 0) {
  6049. const ggml_tensor * add = cgraph->nodes[node_idx + 1];
  6050. const ggml_tensor * bias = add->src[0] == dst ? add->src[1] : add->src[0];
  6051. d_F0 = ggml_vk_tensor_subbuffer(ctx, bias);
  6052. fusion_flags |= MAT_VEC_FUSION_FLAGS_BIAS0;
  6053. }
  6054. vk_subbuffer d_F1 = d_D;
  6055. if (ctx->num_additional_fused_ops == 2) {
  6056. const ggml_tensor * add = cgraph->nodes[node_idx + 2];
  6057. const ggml_tensor * bias = add->src[0] == cgraph->nodes[node_idx + 1] ? add->src[1] : add->src[0];
  6058. d_F1 = ggml_vk_tensor_subbuffer(ctx, bias);
  6059. fusion_flags |= MAT_VEC_FUSION_FLAGS_BIAS1;
  6060. }
  6061. // compute
  6062. const vk_mat_vec_push_constants pc = {
  6063. (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01,
  6064. stride_batch_x, stride_batch_y, stride_batch_d,
  6065. fusion_flags,
  6066. (uint32_t)ne02, (uint32_t)ne12, (uint32_t)r2, (uint32_t)r3,
  6067. };
  6068. ggml_vk_dispatch_pipeline(ctx, subctx, dmmv,
  6069. {
  6070. d_X,
  6071. d_Y,
  6072. d_D,
  6073. d_F0,
  6074. d_F1,
  6075. },
  6076. pc, { groups_x, (uint32_t)(ne12 * ne13), groups_z });
  6077. if (x_non_contig) {
  6078. ctx->prealloc_x_need_sync = true;
  6079. }
  6080. if (y_non_contig || quantize_y) {
  6081. ctx->prealloc_y_need_sync = true;
  6082. }
  6083. }
  6084. static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const struct ggml_cgraph * cgraph, int node_idx) {
  6085. ggml_tensor * dst = cgraph->nodes[node_idx];
  6086. const ggml_tensor * src0 = dst->src[0];
  6087. const ggml_tensor * src1 = dst->src[1];
  6088. VK_LOG_DEBUG("ggml_vk_mul_mat_p021_f16_f32(" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  6089. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  6090. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  6091. std::cerr << "))");
  6092. GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
  6093. GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // NOLINT
  6094. GGML_ASSERT(src1->nb[0] <= src1->nb[1] && src1->nb[2] <= src1->nb[3]); // NOLINT
  6095. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6096. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6097. const uint64_t ne00 = src0->ne[0];
  6098. const uint64_t ne01 = src0->ne[1];
  6099. const uint64_t ne02 = src0->ne[2];
  6100. // const uint64_t ne03 = src0->ne[3];
  6101. //const uint64_t ne10 = src1->ne[0];
  6102. const uint64_t ne11 = src1->ne[1];
  6103. const uint64_t ne12 = src1->ne[2];
  6104. // const uint64_t ne13 = src1->ne[3];
  6105. GGML_ASSERT(ne11 == 1);
  6106. // With grouped query attention there are > 1 Q matrices per K, V matrix.
  6107. uint32_t gqa_ratio = (uint32_t)ne12 / (uint32_t)ne02;
  6108. if (gqa_ratio > 8 || gqa_ratio == 0 || ne12 != ne02 * gqa_ratio) {
  6109. gqa_ratio = 1;
  6110. }
  6111. {
  6112. // Request descriptor sets
  6113. ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32[gqa_ratio - 1], 1);
  6114. }
  6115. vk_subbuffer d_D = ggml_vk_tensor_subbuffer(ctx, cgraph->nodes[node_idx + ctx->num_additional_fused_ops], true);
  6116. vk_subbuffer d_Qx = ggml_vk_tensor_subbuffer(ctx, src0);
  6117. vk_subbuffer d_Qy = ggml_vk_tensor_subbuffer(ctx, src1, true);
  6118. vk_subbuffer d_F0 = d_D;
  6119. uint32_t fusion_flags = 0;
  6120. if (ctx->num_additional_fused_ops > 0) {
  6121. const ggml_tensor * add = cgraph->nodes[node_idx + 1];
  6122. const ggml_tensor * bias = add->src[0] == dst ? add->src[1] : add->src[0];
  6123. d_F0 = ggml_vk_tensor_subbuffer(ctx, bias);
  6124. fusion_flags |= MAT_VEC_FUSION_FLAGS_BIAS0;
  6125. }
  6126. vk_subbuffer d_F1 = d_D;
  6127. if (ctx->num_additional_fused_ops > 1) {
  6128. const ggml_tensor * bias = cgraph->nodes[node_idx + 2]->src[1];
  6129. d_F1 = ggml_vk_tensor_subbuffer(ctx, bias);
  6130. fusion_flags |= MAT_VEC_FUSION_FLAGS_BIAS1;
  6131. }
  6132. // compute
  6133. vk_mat_vec_p021_push_constants pc = {
  6134. (uint32_t)ne00, (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne12,
  6135. 0, 0, fusion_flags
  6136. };
  6137. init_pushconst_tensor_offsets(ctx, pc, src0, src1, nullptr, nullptr, cgraph->nodes[node_idx + ctx->num_additional_fused_ops]);
  6138. uint32_t workgroups_z = (uint32_t)ne12;
  6139. // When gqa_ratio > 1, each invocation does multiple rows and we can launch fewer workgroups
  6140. if (gqa_ratio > 1) {
  6141. workgroups_z /= gqa_ratio;
  6142. }
  6143. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32[gqa_ratio - 1],
  6144. {
  6145. d_Qx,
  6146. d_Qy,
  6147. d_D,
  6148. d_F0,
  6149. d_F1,
  6150. }, pc, { 1, (uint32_t)ne01, workgroups_z });
  6151. }
  6152. static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const struct ggml_cgraph * cgraph, int node_idx) {
  6153. ggml_tensor * dst = cgraph->nodes[node_idx];
  6154. const ggml_tensor * src0 = dst->src[0];
  6155. const ggml_tensor * src1 = dst->src[1];
  6156. VK_LOG_DEBUG("ggml_vk_mul_mat_nc_f16_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  6157. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  6158. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  6159. std::cerr << "))");
  6160. GGML_ASSERT(!ggml_is_transposed(src0));
  6161. GGML_ASSERT(!ggml_is_transposed(src1));
  6162. GGML_ASSERT(!ggml_is_permuted(src0));
  6163. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6164. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6165. const uint64_t ne00 = src0->ne[0];
  6166. const uint64_t ne01 = src0->ne[1];
  6167. const uint64_t ne02 = src0->ne[2];
  6168. const uint64_t ne03 = src0->ne[3];
  6169. const uint64_t nb01 = src0->nb[1];
  6170. const uint64_t nb02 = src0->nb[2];
  6171. const uint64_t nb12 = src1->nb[2];
  6172. // const uint64_t ne10 = src1->ne[0];
  6173. const uint64_t ne11 = src1->ne[1];
  6174. const uint64_t ne12 = src1->ne[2];
  6175. // const uint64_t ne13 = src1->ne[3];
  6176. const uint32_t nb03 = (uint32_t)(src0->nb[3] / sizeof(ggml_fp16_t));
  6177. const uint32_t nb13 = (uint32_t)(src1->nb[3] / sizeof(float));
  6178. const uint32_t nb23 = (uint32_t)(dst->nb[3] / sizeof(float));
  6179. GGML_ASSERT(ne11 == 1);
  6180. GGML_ASSERT(src0->ne[3] == src1->ne[3]); // checked in supports_op
  6181. const uint32_t row_stride_x = nb01 / sizeof(ggml_fp16_t);
  6182. const uint32_t channel_stride_x = nb02 / sizeof(ggml_fp16_t);
  6183. const uint32_t channel_stride_y = nb12 / sizeof(float);
  6184. {
  6185. // Request descriptor sets
  6186. ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32, 1);
  6187. }
  6188. vk_subbuffer d_D = ggml_vk_tensor_subbuffer(ctx, cgraph->nodes[node_idx + ctx->num_additional_fused_ops], true);
  6189. vk_subbuffer d_Qx = ggml_vk_tensor_subbuffer(ctx, src0);
  6190. vk_subbuffer d_Qy = ggml_vk_tensor_subbuffer(ctx, src1, true);
  6191. vk_subbuffer d_F0 = d_D;
  6192. uint32_t fusion_flags = 0;
  6193. if (ctx->num_additional_fused_ops > 0) {
  6194. const ggml_tensor * add = cgraph->nodes[node_idx + 1];
  6195. const ggml_tensor * bias = add->src[0] == dst ? add->src[1] : add->src[0];
  6196. d_F0 = ggml_vk_tensor_subbuffer(ctx, bias);
  6197. fusion_flags |= MAT_VEC_FUSION_FLAGS_BIAS0;
  6198. }
  6199. vk_subbuffer d_F1 = d_D;
  6200. if (ctx->num_additional_fused_ops > 1) {
  6201. const ggml_tensor * bias = cgraph->nodes[node_idx + 2]->src[1];
  6202. d_F1 = ggml_vk_tensor_subbuffer(ctx, bias);
  6203. fusion_flags |= MAT_VEC_FUSION_FLAGS_BIAS1;
  6204. }
  6205. // compute
  6206. vk_mat_vec_nc_push_constants pc = {
  6207. (uint32_t)ne00, (uint32_t)ne01,
  6208. row_stride_x, channel_stride_x, channel_stride_y,
  6209. (uint32_t)(ne12 / ne02), (uint32_t)ne12,
  6210. 0, 0,
  6211. nb03, nb13, nb23, fusion_flags
  6212. };
  6213. init_pushconst_tensor_offsets(ctx, pc, src0, src1, nullptr, nullptr, cgraph->nodes[node_idx + ctx->num_additional_fused_ops]);
  6214. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32,
  6215. {
  6216. d_Qx,
  6217. d_Qy,
  6218. d_D,
  6219. d_F0,
  6220. d_F1,
  6221. }, pc, { (uint32_t)ne03, (uint32_t)ne01, (uint32_t)ne12 });
  6222. }
  6223. static void ggml_vk_mul_mat(ggml_backend_vk_context * ctx, vk_context& subctx, const struct ggml_cgraph * cgraph, int node_idx) {
  6224. ggml_tensor * dst = cgraph->nodes[node_idx];
  6225. ggml_tensor * src0 = dst->src[0];
  6226. ggml_tensor * src1 = dst->src[1];
  6227. VK_LOG_DEBUG("ggml_vk_mul_mat(" << src0 << ", " << src1 << ", " << dst << ")");
  6228. // Handle huge A matrix by splitting the M dimensions. This works well for convolution use cases
  6229. // where the M dimension is very large.
  6230. // Split_k doesn't work with M splitting.
  6231. const size_t nbytes = ggml_nbytes(src0);
  6232. const bool needs_split = nbytes > ctx->device->properties.limits.maxStorageBufferRange;
  6233. if (needs_split) {
  6234. // Choose the number of rows that can fit (and divide by two, to allow for any additional offsets)
  6235. const uint32_t M_split = ctx->device->properties.limits.maxStorageBufferRange / (2 * src0->nb[1]);
  6236. uint32_t m_offset = 0;
  6237. while (m_offset < dst->ne[0]) {
  6238. const uint32_t cur_M_size = std::min(M_split, (uint32_t)(dst->ne[0] - m_offset));
  6239. ggml_tensor dst2 = *dst;
  6240. ggml_tensor src02 = *src0;
  6241. dst2.view_src = dst->view_src ? dst->view_src : dst;
  6242. src02.view_src = src0->view_src ? src0->view_src : src0;
  6243. dst2.view_offs += m_offset * dst->nb[0];
  6244. src02.view_offs += m_offset * src0->nb[1];
  6245. dst2.ne[0] = cur_M_size;
  6246. src02.ne[1] = cur_M_size;
  6247. ggml_vk_mul_mat_q_f16(ctx, subctx, &src02, src1, &dst2, true);
  6248. m_offset += cur_M_size;
  6249. }
  6250. } else if (src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && dst->ne[1] == 1 &&
  6251. // detect 0213 permutation, and batch size of 1
  6252. src0->nb[0] <= src0->nb[2] &&
  6253. src0->nb[2] <= src0->nb[1] &&
  6254. src0->nb[1] <= src0->nb[3] &&
  6255. src1->nb[0] <= src1->nb[2] &&
  6256. src1->nb[2] <= src1->nb[1] &&
  6257. src1->nb[1] <= src1->nb[3] &&
  6258. src0->ne[3] == 1 &&
  6259. src1->ne[3] == 1) {
  6260. ggml_vk_mul_mat_vec_p021_f16_f32(ctx, subctx, cgraph, node_idx);
  6261. } else if (src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && dst->ne[1] == 1 &&
  6262. !ggml_is_permuted(src0) && !ggml_is_permuted(src1)) {
  6263. ggml_vk_mul_mat_vec_nc_f16_f32(ctx, subctx, cgraph, node_idx);
  6264. // mul_mat_vec supports batching ne12*ne13 when ne11==1, or treating ne11 as the batch size (up to four)
  6265. // when ne12 and ne13 are one.
  6266. } else if ((dst->ne[1] == 1 || (dst->ne[1] <= mul_mat_vec_max_cols && src1->ne[2] * src1->ne[3] == 1)) &&
  6267. (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16 || ggml_is_quantized(src0->type))) {
  6268. ggml_vk_mul_mat_vec_q_f16(ctx, subctx, cgraph, node_idx);
  6269. } else {
  6270. ggml_vk_mul_mat_q_f16(ctx, subctx, src0, src1, dst, false);
  6271. }
  6272. }
  6273. static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst) {
  6274. VK_LOG_DEBUG("ggml_vk_mul_mat_id_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  6275. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  6276. std::cerr << "), (" << ids << ", name=" << ids->name << ", type=" << ids->type << ", ne0=" << ids->ne[0] << ", ne1=" << ids->ne[1] << ", ne2=" << ids->ne[2] << ", ne3=" << ids->ne[3] << ", nb0=" << ids->nb[0] << ", nb1=" << ids->nb[1] << ", nb2=" << ids->nb[2] << ", nb3=" << ids->nb[3];
  6277. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  6278. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  6279. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  6280. const uint64_t ne00 = src0->ne[0];
  6281. const uint64_t ne01 = src0->ne[1];
  6282. const uint64_t ne02 = src0->ne[2];
  6283. // const uint64_t ne03 = src0->ne[3];
  6284. const uint64_t ne10 = src1->ne[0];
  6285. const uint64_t ne11 = src1->ne[1];
  6286. const uint64_t ne12 = src1->ne[2];
  6287. const uint64_t ne13 = src1->ne[3];
  6288. const uint64_t nei0 = ids->ne[0];
  6289. const uint64_t nei1 = ids->ne[1];
  6290. const uint32_t nbi1 = ids->nb[1];
  6291. const uint32_t nbi2 = ids->nb[2];
  6292. const uint64_t ne20 = dst->ne[0];
  6293. const uint64_t ne21 = dst->ne[1];
  6294. // const uint64_t ne22 = dst->ne[2];
  6295. // const uint64_t ne23 = dst->ne[3];
  6296. const uint64_t n_as = ne02;
  6297. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  6298. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  6299. ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  6300. ggml_backend_vk_buffer_context * ids_buf_ctx = (ggml_backend_vk_buffer_context *)ids->buffer->context;
  6301. vk_buffer d_Qx = nullptr;
  6302. size_t qx_buf_offset = 0;
  6303. vk_buffer d_Qy = nullptr;
  6304. size_t qy_buf_offset = 0;
  6305. vk_buffer d_ids = nullptr;
  6306. size_t ids_buf_offset = 0;
  6307. bool src0_uma = false;
  6308. bool src1_uma = false;
  6309. bool ids_uma = false;
  6310. if (ctx->device->uma) {
  6311. ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
  6312. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  6313. ggml_vk_host_get(ctx->device, ids->data, d_ids, ids_buf_offset);
  6314. src0_uma = d_Qx != nullptr;
  6315. src1_uma = d_Qy != nullptr;
  6316. ids_uma = d_ids != nullptr;
  6317. }
  6318. // Reformat and convert to fp16 if non-contiguous, or for coopmat2 for better perf
  6319. const bool x_non_contig = (ctx->device->coopmat2 && src0->type == GGML_TYPE_F32) ||
  6320. !ggml_vk_dim01_contiguous(src0);
  6321. const bool y_non_contig = (ctx->device->coopmat2 && src1->type == GGML_TYPE_F32) ||
  6322. (src0->type == GGML_TYPE_BF16 && src1->type != GGML_TYPE_BF16) ||
  6323. !ggml_vk_dim01_contiguous(src1);
  6324. // If src0 is BF16, try to use a BF16 x BF16 multiply
  6325. ggml_type f16_type = src0->type == GGML_TYPE_BF16 ? GGML_TYPE_BF16 : GGML_TYPE_F16;
  6326. const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig;
  6327. bool quantize_y = ctx->device->integer_dot_product && src1->type == GGML_TYPE_F32 && ggml_is_contiguous(src1) && !y_non_contig && (ne11 * ne10) % 4 == 0;
  6328. // Check for mmq first
  6329. vk_matmul_pipeline mmp = quantize_y ? ggml_vk_get_mul_mat_mat_id_pipeline(ctx, src0->type, GGML_TYPE_Q8_1, (ggml_prec)dst->op_params[0]) : nullptr;
  6330. if (mmp == nullptr) {
  6331. // Fall back to f16 dequant mul mat
  6332. mmp = ggml_vk_get_mul_mat_mat_id_pipeline(ctx, src0->type, y_non_contig ? f16_type : src1->type, (ggml_prec)dst->op_params[0]);
  6333. quantize_y = false;
  6334. }
  6335. const bool qx_needs_dequant = mmp == nullptr || x_non_contig;
  6336. const bool qy_needs_dequant = !quantize_y && ((src1->type != f16_type && !y_f32_kernel) || y_non_contig);
  6337. if (qx_needs_dequant) {
  6338. // Fall back to dequant + f16 mulmat
  6339. mmp = ggml_vk_get_mul_mat_mat_id_pipeline(ctx, f16_type, y_f32_kernel ? GGML_TYPE_F32 : f16_type, (ggml_prec)dst->op_params[0]);
  6340. }
  6341. // Not implemented
  6342. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  6343. const uint32_t kpad = quantize_y ? 0 : ggml_vk_align_size(ne10, ggml_vk_guess_matmul_id_pipeline_align(ctx, mmp, ne01, nei1, qx_needs_dequant ? f16_type : src0->type));
  6344. const bool aligned = !quantize_y && ne10 == kpad && ne01 > 8 && nei1 > 8;
  6345. vk_pipeline pipeline = ggml_vk_guess_matmul_id_pipeline(ctx, mmp, ne01, nei1, aligned, qx_needs_dequant ? f16_type : src0->type);
  6346. // Reserve extra storage in the N dimension for the Y matrix, so we can avoid bounds-checking
  6347. uint32_t padded_n = qy_needs_dequant ? ROUNDUP_POW2(ne11, pipeline->wg_denoms[1]) :ne11;
  6348. const uint64_t x_ne = ggml_nelements(src0);
  6349. const uint64_t y_ne = padded_n * ne10 * ne12 * ne13;
  6350. const uint64_t d_ne = ggml_nelements(dst);
  6351. const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type);
  6352. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  6353. const uint64_t x_sz = !qx_needs_dequant ? qx_sz : sizeof(ggml_fp16_t) * x_ne;
  6354. const uint64_t y_sz = quantize_y ? (ggml_vk_align_size(y_ne, 128) * ggml_type_size(GGML_TYPE_Q8_1) / ggml_blck_size(GGML_TYPE_Q8_1)) : (y_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne);
  6355. const uint64_t ids_sz = nbi2;
  6356. const uint64_t d_sz = sizeof(float) * d_ne;
  6357. vk_pipeline to_fp16_vk_0 = nullptr;
  6358. vk_pipeline to_fp16_vk_1 = nullptr;
  6359. vk_pipeline to_q8_1 = nullptr;
  6360. if (x_non_contig) {
  6361. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0, nullptr, f16_type);
  6362. } else {
  6363. to_fp16_vk_0 = ggml_vk_get_to_fp16(ctx, src0->type);
  6364. }
  6365. if (y_non_contig) {
  6366. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1, nullptr, f16_type);
  6367. } else {
  6368. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  6369. }
  6370. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  6371. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  6372. if (quantize_y) {
  6373. to_q8_1 = ggml_vk_get_quantize_pipeline(ctx, GGML_TYPE_Q8_1);
  6374. }
  6375. {
  6376. if (
  6377. (qx_needs_dequant && x_sz > ctx->device->properties.limits.maxStorageBufferRange) ||
  6378. (qy_needs_dequant && y_sz > ctx->device->properties.limits.maxStorageBufferRange)) {
  6379. GGML_ABORT("Requested preallocation size is too large");
  6380. }
  6381. if (qx_needs_dequant && ctx->prealloc_size_x < x_sz) {
  6382. ctx->prealloc_size_x = x_sz;
  6383. ggml_vk_preallocate_buffers(ctx, subctx);
  6384. }
  6385. if ((qy_needs_dequant || quantize_y) && ctx->prealloc_size_y < y_sz) {
  6386. ctx->prealloc_size_y = y_sz;
  6387. ggml_vk_preallocate_buffers(ctx, subctx);
  6388. }
  6389. // Request descriptor sets
  6390. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  6391. if (qx_needs_dequant) {
  6392. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_0, 1);
  6393. }
  6394. if (qy_needs_dequant) {
  6395. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_1, 1);
  6396. }
  6397. if (quantize_y) {
  6398. ggml_pipeline_request_descriptor_sets(ctx, to_q8_1, 1);
  6399. }
  6400. }
  6401. vk_buffer d_D = dst_buf_ctx->dev_buffer;
  6402. const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  6403. GGML_ASSERT(d_D != nullptr);
  6404. vk_buffer d_X;
  6405. uint64_t x_buf_offset = 0;
  6406. vk_buffer d_Y;
  6407. uint64_t y_buf_offset = 0;
  6408. if (!src0_uma) {
  6409. d_Qx = src0_buf_ctx->dev_buffer;
  6410. qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  6411. GGML_ASSERT(d_Qx != nullptr);
  6412. }
  6413. if (!src1_uma) {
  6414. d_Qy = src1_buf_ctx->dev_buffer;
  6415. qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  6416. GGML_ASSERT(d_Qy != nullptr);
  6417. }
  6418. if (!ids_uma) {
  6419. d_ids = ids_buf_ctx->dev_buffer;
  6420. ids_buf_offset = vk_tensor_offset(ids) + ids->view_offs;
  6421. GGML_ASSERT(d_ids != nullptr);
  6422. }
  6423. if (qx_needs_dequant) {
  6424. d_X = ctx->prealloc_x;
  6425. GGML_ASSERT(d_X->size >= x_sz);
  6426. } else {
  6427. d_X = d_Qx;
  6428. x_buf_offset = qx_buf_offset;
  6429. GGML_ASSERT(qx_sz == x_sz);
  6430. }
  6431. if (qy_needs_dequant) {
  6432. d_Y = ctx->prealloc_y;
  6433. GGML_ASSERT(d_Y->size >= y_sz);
  6434. } else if (quantize_y) {
  6435. d_Y = ctx->prealloc_y;
  6436. GGML_ASSERT(d_Y->size >= CEIL_DIV(y_sz, 144) * 144);
  6437. } else {
  6438. d_Y = d_Qy;
  6439. y_buf_offset = qy_buf_offset;
  6440. GGML_ASSERT(qy_sz == y_sz);
  6441. }
  6442. if (x_non_contig || qx_needs_dequant) {
  6443. if (ctx->prealloc_x_need_sync) {
  6444. ggml_vk_sync_buffers(ctx, subctx);
  6445. }
  6446. }
  6447. if (x_non_contig) {
  6448. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, ggml_vk_subbuffer(ctx, d_Qx, qx_buf_offset), ggml_vk_subbuffer(ctx, d_X, 0));
  6449. } else if (qx_needs_dequant) {
  6450. const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) };
  6451. ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0,
  6452. { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz }, vk_subbuffer{ d_X, 0, x_sz } }, pc, { (uint32_t)x_ne, 1, 1});
  6453. ggml_vk_sync_buffers(ctx, subctx);
  6454. }
  6455. if (y_non_contig) {
  6456. if (ctx->prealloc_y_last_pipeline_used != to_fp16_vk_1.get() ||
  6457. ctx->prealloc_y_last_tensor_used != src1) {
  6458. if (ctx->prealloc_y_need_sync) {
  6459. ggml_vk_sync_buffers(ctx, subctx);
  6460. }
  6461. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, ggml_vk_subbuffer(ctx, d_Qy, qy_buf_offset), ggml_vk_subbuffer(ctx, d_Y, 0));
  6462. ctx->prealloc_y_last_pipeline_used = to_fp16_vk_1.get();
  6463. ctx->prealloc_y_last_tensor_used = src1;
  6464. }
  6465. }
  6466. if (quantize_y) {
  6467. if (ctx->prealloc_y_last_pipeline_used != to_q8_1.get() ||
  6468. ctx->prealloc_y_last_tensor_used != src1) {
  6469. if (ctx->prealloc_y_need_sync) {
  6470. ggml_vk_sync_buffers(ctx, subctx);
  6471. }
  6472. ggml_vk_quantize_q8_1(ctx, subctx, ggml_vk_subbuffer(ctx, d_Qy, qy_buf_offset), ggml_vk_subbuffer(ctx, d_Y, 0), y_ne);
  6473. ctx->prealloc_y_last_pipeline_used = to_q8_1.get();
  6474. ctx->prealloc_y_last_tensor_used = src1;
  6475. }
  6476. }
  6477. uint32_t stride_batch_x = ne00*ne01;
  6478. uint32_t stride_batch_y = ne10*ne11;
  6479. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  6480. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  6481. }
  6482. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant && !quantize_y) {
  6483. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  6484. }
  6485. // compute
  6486. ggml_vk_matmul_id(
  6487. ctx, subctx, pipeline,
  6488. { d_X, x_buf_offset, x_sz }, { d_Y, y_buf_offset, y_sz },
  6489. { d_D, d_buf_offset, d_sz }, { d_ids, ids_buf_offset, ids_sz },
  6490. ne01, ne21, ne10, ne10, ne10, ne01,
  6491. stride_batch_x, stride_batch_y, ne20*ne21,
  6492. n_as, nei0, nei1, nbi1 / ggml_type_size(ids->type), ne11, padded_n
  6493. ); // NOLINT
  6494. if (x_non_contig || qx_needs_dequant) {
  6495. ctx->prealloc_x_need_sync = true;
  6496. }
  6497. if (y_non_contig) {
  6498. ctx->prealloc_y_need_sync = true;
  6499. }
  6500. }
  6501. static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const struct ggml_cgraph * cgraph, int node_idx) {
  6502. ggml_tensor * dst = cgraph->nodes[node_idx];
  6503. ggml_tensor * src0 = dst->src[0];
  6504. ggml_tensor * src1 = dst->src[1];
  6505. ggml_tensor * ids = dst->src[2];
  6506. VK_LOG_DEBUG("ggml_vk_mul_mat_vec_id_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  6507. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  6508. std::cerr << "), (" << ids << ", name=" << ids->name << ", type=" << ids->type << ", ne0=" << ids->ne[0] << ", ne1=" << ids->ne[1] << ", ne2=" << ids->ne[2] << ", ne3=" << ids->ne[3] << ", nb0=" << ids->nb[0] << ", nb1=" << ids->nb[1] << ", nb2=" << ids->nb[2] << ", nb3=" << ids->nb[3];
  6509. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  6510. std::cerr << "))");
  6511. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16); // NOLINT
  6512. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  6513. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  6514. const uint64_t ne00 = src0->ne[0];
  6515. const uint64_t ne01 = src0->ne[1];
  6516. // const uint64_t ne02 = src0->ne[2];
  6517. // const uint64_t ne03 = src0->ne[3];
  6518. const uint64_t ne10 = src1->ne[0];
  6519. const uint64_t ne11 = src1->ne[1];
  6520. // const uint64_t ne12 = src1->ne[2];
  6521. // const uint64_t ne13 = src1->ne[3];
  6522. const uint64_t nei0 = ids->ne[0];
  6523. const uint64_t nei1 = ids->ne[1];
  6524. GGML_ASSERT(nei1 == 1);
  6525. const uint64_t ne20 = dst->ne[0];
  6526. const uint64_t ne21 = dst->ne[1];
  6527. // const uint64_t ne22 = dst->ne[2];
  6528. // const uint64_t ne23 = dst->ne[3];
  6529. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  6530. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  6531. const bool f16_f32_kernel = src1->type == GGML_TYPE_F32;
  6532. const bool qx_needs_dequant = x_non_contig;
  6533. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig;
  6534. // Not implemented
  6535. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  6536. const uint64_t x_ne = ggml_nelements(src0);
  6537. const uint64_t y_ne = ggml_nelements(src1);
  6538. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
  6539. const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) : qx_sz;
  6540. const uint64_t y_sz = f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  6541. vk_pipeline to_fp16_vk_0 = nullptr;
  6542. vk_pipeline to_fp16_vk_1 = nullptr;
  6543. if (x_non_contig) {
  6544. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0, nullptr, src0->type);
  6545. }
  6546. if (y_non_contig) {
  6547. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1, nullptr, src1->type);
  6548. } else {
  6549. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  6550. }
  6551. vk_pipeline dmmv = ggml_vk_get_dequantize_mul_mat_vec_id(ctx, src0->type, src1->type);
  6552. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  6553. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  6554. GGML_ASSERT(dmmv != nullptr);
  6555. {
  6556. if (
  6557. (qx_needs_dequant && x_sz > ctx->device->properties.limits.maxStorageBufferRange) ||
  6558. (qy_needs_dequant && y_sz > ctx->device->properties.limits.maxStorageBufferRange)) {
  6559. GGML_ABORT("Requested preallocation size is too large");
  6560. }
  6561. if (qx_needs_dequant && ctx->prealloc_size_x < x_sz) {
  6562. ctx->prealloc_size_x = x_sz;
  6563. ggml_vk_preallocate_buffers(ctx, subctx);
  6564. }
  6565. if (qy_needs_dequant && ctx->prealloc_size_y < y_sz) {
  6566. ctx->prealloc_size_y = y_sz;
  6567. ggml_vk_preallocate_buffers(ctx, subctx);
  6568. }
  6569. // Request descriptor sets
  6570. if (qx_needs_dequant) {
  6571. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_0, 1);
  6572. }
  6573. if (qy_needs_dequant) {
  6574. ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_1, 1);
  6575. }
  6576. ggml_pipeline_request_descriptor_sets(ctx, dmmv, 1);
  6577. }
  6578. vk_subbuffer d_D = ggml_vk_tensor_subbuffer(ctx, cgraph->nodes[node_idx + ctx->num_additional_fused_ops]);
  6579. vk_subbuffer d_Qx = ggml_vk_tensor_subbuffer(ctx, src0);
  6580. vk_subbuffer d_Qy = ggml_vk_tensor_subbuffer(ctx, src1);
  6581. vk_subbuffer d_ids = ggml_vk_tensor_subbuffer(ctx, ids);
  6582. vk_subbuffer d_F0 = d_D;
  6583. vk_subbuffer d_X, d_Y;
  6584. if (qx_needs_dequant) {
  6585. d_X = { ctx->prealloc_x, 0, ctx->prealloc_x->size };
  6586. } else {
  6587. d_X = d_Qx;
  6588. }
  6589. if (qy_needs_dequant) {
  6590. d_Y = { ctx->prealloc_y, 0, ctx->prealloc_y->size };
  6591. } else {
  6592. d_Y = d_Qy;
  6593. }
  6594. if (x_non_contig) {
  6595. if (ctx->prealloc_x_need_sync) {
  6596. ggml_vk_sync_buffers(ctx, subctx);
  6597. }
  6598. }
  6599. if (x_non_contig) {
  6600. GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment));
  6601. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, d_Qx, d_X);
  6602. }
  6603. if (y_non_contig) {
  6604. GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne);
  6605. if (ctx->prealloc_y_last_pipeline_used != to_fp16_vk_1.get() ||
  6606. ctx->prealloc_y_last_tensor_used != src1) {
  6607. if (ctx->prealloc_y_need_sync) {
  6608. ggml_vk_sync_buffers(ctx, subctx);
  6609. }
  6610. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, d_Qy, d_Y);
  6611. ctx->prealloc_y_last_pipeline_used = to_fp16_vk_1.get();
  6612. ctx->prealloc_y_last_tensor_used = src1;
  6613. }
  6614. }
  6615. uint32_t stride_batch_y = ne10*ne11;
  6616. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  6617. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  6618. }
  6619. const uint32_t max_groups_x = ctx->device->properties.limits.maxComputeWorkGroupCount[0];
  6620. uint32_t groups_x = ne01;
  6621. uint32_t groups_z = 1;
  6622. if (ne01 > max_groups_x) {
  6623. groups_z = 64;
  6624. groups_x = CEIL_DIV(groups_x, groups_z);
  6625. }
  6626. uint32_t fusion_flags = 0;
  6627. if (ctx->num_additional_fused_ops > 0) {
  6628. const ggml_tensor * bias = cgraph->nodes[node_idx + 1]->src[1];
  6629. d_F0 = ggml_vk_tensor_subbuffer(ctx, bias);
  6630. if (cgraph->nodes[node_idx + 1]->op == GGML_OP_MUL) {
  6631. fusion_flags |= MAT_VEC_FUSION_FLAGS_SCALE0;
  6632. } else {
  6633. GGML_ASSERT(cgraph->nodes[node_idx + 1]->op == GGML_OP_ADD_ID);
  6634. fusion_flags |= MAT_VEC_FUSION_FLAGS_BIAS0;
  6635. }
  6636. }
  6637. vk_subbuffer d_F1 = d_D;
  6638. if (ctx->num_additional_fused_ops > 1) {
  6639. const ggml_tensor * scale = cgraph->nodes[node_idx + 2]->src[1];
  6640. d_F1 = ggml_vk_tensor_subbuffer(ctx, scale);
  6641. fusion_flags |= MAT_VEC_FUSION_FLAGS_SCALE1;
  6642. }
  6643. // compute
  6644. const vk_mat_vec_id_push_constants pc = {
  6645. (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01,
  6646. (uint32_t)(ne00 * ne01), stride_batch_y, (uint32_t)(ne20 * ne21),
  6647. fusion_flags,
  6648. (uint32_t)nei0, (uint32_t)ne11,
  6649. };
  6650. ggml_vk_dispatch_pipeline(ctx, subctx, dmmv,
  6651. {
  6652. d_X,
  6653. d_Y,
  6654. d_D,
  6655. d_F0,
  6656. d_F1,
  6657. d_ids,
  6658. },
  6659. pc, { groups_x, (uint32_t)nei0, groups_z });
  6660. if (x_non_contig) {
  6661. ctx->prealloc_x_need_sync = true;
  6662. }
  6663. if (y_non_contig) {
  6664. ctx->prealloc_y_need_sync = true;
  6665. }
  6666. }
  6667. static bool ggml_vk_use_mul_mat_vec_id(const struct ggml_cgraph * cgraph, int node_idx) {
  6668. ggml_tensor * dst = cgraph->nodes[node_idx];
  6669. ggml_tensor * src0 = dst->src[0];
  6670. ggml_tensor * src2 = dst->src[2];
  6671. return src2->ne[1] == 1 && (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type));
  6672. }
  6673. static void ggml_vk_mul_mat_id(ggml_backend_vk_context * ctx, vk_context& subctx, const struct ggml_cgraph * cgraph, int node_idx) {
  6674. ggml_tensor * dst = cgraph->nodes[node_idx];
  6675. ggml_tensor * src0 = dst->src[0];
  6676. ggml_tensor * src1 = dst->src[1];
  6677. ggml_tensor * src2 = dst->src[2];
  6678. VK_LOG_DEBUG("ggml_vk_mul_mat_id(" << src0 << ", " << src1 << ", " << src2 << ", " << dst << ")");
  6679. if (ggml_vk_use_mul_mat_vec_id(cgraph, node_idx)) {
  6680. ggml_vk_mul_mat_vec_id_q_f16(ctx, subctx, cgraph, node_idx);
  6681. } else {
  6682. ggml_vk_mul_mat_id_q_f16(ctx, subctx, src0, src1, src2, dst);
  6683. }
  6684. }
  6685. static bool ggml_vk_flash_attn_scalar_shmem_support(const vk_device& device, const uint32_t hsk, uint32_t hsv) {
  6686. // Needs to be kept up to date on shader changes
  6687. GGML_UNUSED(hsv);
  6688. const uint32_t wg_size = scalar_flash_attention_workgroup_size;
  6689. const uint32_t Br = get_fa_scalar_num_large_rows(hsk, hsv);
  6690. const uint32_t Bc = scalar_flash_attention_Bc;
  6691. const uint32_t tmpsh = wg_size * sizeof(float);
  6692. const uint32_t tmpshv4 = wg_size * 4 * sizeof(float);
  6693. const uint32_t masksh = Bc * Br * sizeof(float);
  6694. const uint32_t Qf = Br * (hsk / 4 + 2) * 4 * sizeof(float);
  6695. const uint32_t total_size = tmpsh + tmpshv4 + masksh + Qf;
  6696. const bool supported = total_size <= device->properties.limits.maxComputeSharedMemorySize;
  6697. VK_LOG_DEBUG("ggml_vk_flash_attn_coopmat_shmem_support(HSK=" << hsk << ", HSV=" << hsv << ", total_size=" << total_size << ", supported=" << supported);
  6698. return supported;
  6699. }
  6700. static bool ggml_vk_flash_attn_coopmat_shmem_support(const vk_device& device, const uint32_t hsk, uint32_t hsv, bool f32acc) {
  6701. // Needs to be kept up to date on shader changes
  6702. GGML_UNUSED(hsv);
  6703. const uint32_t wg_size = scalar_flash_attention_workgroup_size;
  6704. const uint32_t Br = coopmat1_flash_attention_num_large_rows;
  6705. const uint32_t Bc = scalar_flash_attention_Bc;
  6706. const uint32_t hsk_pad = ROUNDUP_POW2(hsk, 16);
  6707. const uint32_t acctype = f32acc ? 4 : 2;
  6708. const uint32_t f16vec4 = 8;
  6709. const uint32_t tmpsh = wg_size * sizeof(float);
  6710. const uint32_t tmpshv4 = wg_size * 4 * acctype;
  6711. const uint32_t qstride = hsk_pad / 4 + 2;
  6712. const uint32_t Qf = Br * qstride * f16vec4;
  6713. const uint32_t sfshstride = (hsk <= 128) ? (Br + 8) : Br;
  6714. const uint32_t sfsh = Bc * sfshstride * acctype;
  6715. const uint32_t kshstride = hsk_pad / 4 + 2;
  6716. const uint32_t ksh = Bc * kshstride * f16vec4;
  6717. const uint32_t slope = Br * sizeof(float);
  6718. const uint32_t total_size = tmpsh + tmpshv4 + Qf + sfsh + ksh + slope;
  6719. const bool supported = total_size <= device->properties.limits.maxComputeSharedMemorySize;
  6720. VK_LOG_DEBUG("ggml_vk_flash_attn_coopmat_shmem_support(HSK=" << hsk << ", HSV=" << hsv << ", f32acc=" << f32acc << ", total_size=" << total_size << ", supported=" << supported);
  6721. return supported;
  6722. }
  6723. static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * q, const ggml_tensor * k, const ggml_tensor * v, const ggml_tensor * mask, const ggml_tensor * sinks, ggml_tensor * dst) {
  6724. VK_LOG_DEBUG("ggml_vk_flash_attn((" << q << ", name=" << q->name << ", type=" << q->type << ", ne0=" << q->ne[0] << ", ne1=" << q->ne[1] << ", ne2=" << q->ne[2] << ", ne3=" << q->ne[3] << ", nb0=" << q->nb[0] << ", nb1=" << q->nb[1] << ", nb2=" << q->nb[2] << ", nb3=" << q->nb[3];
  6725. std::cerr << "), (" << k << ", name=" << k->name << ", type=" << k->type << ", ne0=" << k->ne[0] << ", ne1=" << k->ne[1] << ", ne2=" << k->ne[2] << ", ne3=" << k->ne[3] << ", nb0=" << k->nb[0] << ", nb1=" << k->nb[1] << ", nb2=" << k->nb[2] << ", nb3=" << k->nb[3];
  6726. std::cerr << "), (" << v << ", name=" << v->name << ", type=" << v->type << ", ne0=" << v->ne[0] << ", ne1=" << v->ne[1] << ", ne2=" << v->ne[2] << ", ne3=" << v->ne[3] << ", nb0=" << v->nb[0] << ", nb1=" << v->nb[1] << ", nb2=" << v->nb[2] << ", nb3=" << v->nb[3];
  6727. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  6728. if (sinks) {
  6729. std::cerr << "), (" << sinks << ", name=" << sinks->name << ", type=" << sinks->type << ", ne0=" << sinks->ne[0] << ", ne1=" << sinks->ne[1] << ", ne2=" << sinks->ne[2] << ", ne3=" << sinks->ne[3] << ", nb0=" << sinks->nb[0] << ", nb1=" << sinks->nb[1] << ", nb2=" << sinks->nb[2] << ", nb3=" << sinks->nb[3];
  6730. }
  6731. std::cerr << "))");
  6732. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  6733. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  6734. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  6735. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  6736. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  6737. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  6738. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  6739. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  6740. const uint32_t nem1 = mask ? mask->ne[1] : 0;
  6741. const uint32_t nem2 = mask ? mask->ne[2] : 0;
  6742. const uint32_t nem3 = mask ? mask->ne[3] : 0;
  6743. const uint32_t HSK = nek0;
  6744. const uint32_t HSV = nev0;
  6745. uint32_t N = neq1;
  6746. const uint32_t KV = nek1;
  6747. GGML_ASSERT(ne0 == HSV);
  6748. GGML_ASSERT(ne2 == N);
  6749. // input tensor rows must be contiguous
  6750. GGML_ASSERT(nbq0 == ggml_type_size(q->type));
  6751. GGML_ASSERT(nbk0 == ggml_type_size(k->type));
  6752. GGML_ASSERT(nbv0 == ggml_type_size(v->type));
  6753. GGML_ASSERT(neq0 == HSK);
  6754. GGML_ASSERT(neq1 == N);
  6755. GGML_ASSERT(nev1 == nek1);
  6756. // dst cannot be transposed or permuted
  6757. GGML_ASSERT(nb0 == sizeof(float));
  6758. GGML_ASSERT(nb0 <= nb1);
  6759. GGML_ASSERT(nb1 <= nb2);
  6760. GGML_ASSERT(nb2 <= nb3);
  6761. assert(dst->type == GGML_TYPE_F32);
  6762. assert(q->type == GGML_TYPE_F32);
  6763. assert(k->type == v->type);
  6764. FaCodePath path = ctx->device->coopmat2 ? FA_COOPMAT2 :
  6765. ctx->device->coopmat1_fa_support ? FA_COOPMAT1 : FA_SCALAR;
  6766. if (path == FA_COOPMAT1) {
  6767. const bool coopmat_shape_supported = (dst->op_params[3] == GGML_PREC_F32 && ctx->device->coopmat_support_16x16x16_f32acc) ||
  6768. (dst->op_params[3] != GGML_PREC_F32 && ctx->device->coopmat_support_16x16x16_f16acc);
  6769. const bool coopmat_shmem_supported = ggml_vk_flash_attn_coopmat_shmem_support(ctx->device, HSK, HSV, dst->op_params[3] == GGML_PREC_F32);
  6770. if (!coopmat_shape_supported || !coopmat_shmem_supported) {
  6771. path = FA_SCALAR;
  6772. }
  6773. }
  6774. uint32_t gqa_ratio = 1;
  6775. uint32_t qk_ratio = neq2 / nek2;
  6776. uint32_t workgroups_x = (uint32_t)neq1;
  6777. uint32_t workgroups_y = (uint32_t)neq2;
  6778. uint32_t workgroups_z = (uint32_t)neq3;
  6779. // For scalar/coopmat1 FA, we can use the "large" size to accommodate qga.
  6780. // For coopmat2 FA, we always use the small size (which is still pretty large for gqa).
  6781. uint32_t max_gqa;
  6782. switch (path) {
  6783. case FA_SCALAR:
  6784. case FA_COOPMAT1:
  6785. // We may switch from coopmat1 to scalar, so use the scalar limit for both
  6786. max_gqa = get_fa_scalar_num_large_rows(HSK, HSV);
  6787. break;
  6788. case FA_COOPMAT2:
  6789. max_gqa = get_fa_num_small_rows(FA_COOPMAT2);
  6790. break;
  6791. default:
  6792. GGML_ASSERT(0);
  6793. }
  6794. if (N == 1 && qk_ratio > 1 && qk_ratio <= max_gqa &&
  6795. qk_ratio * nek2 == neq2 && nek2 == nev2 && nem2 <= 1) {
  6796. // grouped query attention - make the N dimension equal to gqa_ratio, reduce
  6797. // workgroups proportionally in y dimension. The shader will detect gqa_ratio > 1
  6798. // and change addressing calculations to index Q's dimension 2.
  6799. gqa_ratio = qk_ratio;
  6800. N = gqa_ratio;
  6801. workgroups_y /= N;
  6802. }
  6803. bool small_rows = N <= get_fa_num_small_rows(path);
  6804. // coopmat1 does not actually support "small rows" (it needs 16 rows).
  6805. // So use scalar instead.
  6806. if (small_rows && path == FA_COOPMAT1) {
  6807. path = FA_SCALAR;
  6808. }
  6809. // scalar is faster than coopmat2 when N==1
  6810. if (N == 1 && path == FA_COOPMAT2) {
  6811. path = FA_SCALAR;
  6812. }
  6813. // with large hsk/hsv, scalar path may need to use small_rows to fit in shared memory
  6814. if (path == FA_SCALAR &&
  6815. !ggml_vk_flash_attn_scalar_shmem_support(ctx->device, HSK, HSV)) {
  6816. small_rows = true;
  6817. }
  6818. const uint32_t q_stride = (uint32_t)(nbq1 / ggml_type_size(q->type));
  6819. uint32_t k_stride = (uint32_t)(nbk1 / ggml_type_size(k->type));
  6820. uint32_t v_stride = (uint32_t)(nbv1 / ggml_type_size(v->type));
  6821. // For F32, the shader treats it as a block of size 4 (for vec4 loads)
  6822. if (k->type == GGML_TYPE_F32) {
  6823. k_stride /= 4;
  6824. }
  6825. if (v->type == GGML_TYPE_F32) {
  6826. v_stride /= 4;
  6827. }
  6828. uint32_t alignment = fa_align(path, HSK, HSV, k->type, small_rows);
  6829. bool aligned = (KV % alignment) == 0 &&
  6830. // the "aligned" shader variant will forcibly align strides, for performance
  6831. (q_stride & 7) == 0 && (k_stride & 7) == 0 && (v_stride & 7) == 0;
  6832. // Need to use the coopmat2 variant that clamps loads when HSK/HSV aren't sufficiently aligned.
  6833. if (((HSK | HSV) % 16) != 0 && path == FA_COOPMAT2) {
  6834. aligned = false;
  6835. }
  6836. bool f32acc = path == FA_SCALAR || dst->op_params[3] == GGML_PREC_F32;
  6837. vk_fa_pipeline_state fa_pipeline_state(HSK, HSV, small_rows, path, aligned, f32acc);
  6838. vk_pipeline pipeline = nullptr;
  6839. {
  6840. std::lock_guard<std::recursive_mutex> guard(ctx->device->mutex);
  6841. auto &pipelines = ctx->device->pipeline_flash_attn_f32_f16[k->type];
  6842. auto it = pipelines.find(fa_pipeline_state);
  6843. if (it != pipelines.end()) {
  6844. pipeline = it->second;
  6845. } else {
  6846. pipelines[fa_pipeline_state] = pipeline = std::make_shared<vk_pipeline_struct>();
  6847. }
  6848. }
  6849. assert(pipeline);
  6850. uint32_t split_kv = KV;
  6851. uint32_t split_k = 1;
  6852. // Use a placeholder core count if one isn't available. split_k is a big help for perf.
  6853. const uint32_t shader_core_count = ctx->device->shader_core_count ? ctx->device->shader_core_count : 16;
  6854. // Try to use split_k when KV is large enough to be worth the overhead
  6855. if (workgroups_x == 1 && shader_core_count > 0) {
  6856. // Try to run two workgroups per SM.
  6857. split_k = shader_core_count * 2 / (workgroups_y * workgroups_z);
  6858. if (split_k > 1) {
  6859. // Try to evenly split KV into split_k chunks, but it needs to be a multiple
  6860. // of "align", so recompute split_k based on that.
  6861. split_kv = ROUNDUP_POW2(std::max(1u, KV / split_k), alignment);
  6862. split_k = CEIL_DIV(KV, split_kv);
  6863. workgroups_x = split_k;
  6864. }
  6865. }
  6866. // Reserve space for split_k temporaries. For each split x batch, we need to store the O matrix (D x ne1)
  6867. // and the per-row m and L values (ne1 rows). We store all the matrices first, followed by the rows.
  6868. const uint64_t split_k_size = split_k > 1 ? (HSV * ne1 * sizeof(float) + ne1 * sizeof(float) * 2) * split_k * ne3 : 0;
  6869. if (split_k_size > ctx->device->properties.limits.maxStorageBufferRange) {
  6870. GGML_ABORT("Requested preallocation size is too large");
  6871. }
  6872. if (ctx->prealloc_size_split_k < split_k_size) {
  6873. ctx->prealloc_size_split_k = split_k_size;
  6874. ggml_vk_preallocate_buffers(ctx, subctx);
  6875. }
  6876. {
  6877. // Request descriptor sets
  6878. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  6879. if (split_k > 1) {
  6880. ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_flash_attn_split_k_reduce, 1);
  6881. }
  6882. }
  6883. float scale = 1.0f;
  6884. float max_bias = 0.0f;
  6885. float logit_softcap = 0.0f;
  6886. memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float));
  6887. memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float));
  6888. memcpy(&logit_softcap, (const float *) dst->op_params + 2, sizeof(float));
  6889. if (logit_softcap != 0) {
  6890. scale /= logit_softcap;
  6891. }
  6892. const uint32_t n_head_kv = neq2;
  6893. const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
  6894. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  6895. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  6896. vk_subbuffer q_buf = ggml_vk_tensor_subbuffer(ctx, q);
  6897. vk_subbuffer k_buf = ggml_vk_tensor_subbuffer(ctx, k);
  6898. vk_subbuffer v_buf = ggml_vk_tensor_subbuffer(ctx, v);
  6899. vk_subbuffer dst_buf = ggml_vk_tensor_subbuffer(ctx, dst);
  6900. vk_subbuffer mask_buf = mask ? ggml_vk_tensor_subbuffer(ctx, mask) : q_buf;
  6901. vk_subbuffer sinks_buf = sinks ? ggml_vk_tensor_subbuffer(ctx, sinks) : q_buf;
  6902. uint32_t mask_n_head_log2 = ((sinks != nullptr) << 24) | ((mask != nullptr) << 16) | n_head_log2;
  6903. const vk_flash_attn_push_constants pc = { N, KV,
  6904. (uint32_t)ne1, (uint32_t)ne2, (uint32_t)ne3,
  6905. (uint32_t)neq2, (uint32_t)neq3,
  6906. (uint32_t)nek2, (uint32_t)nek3,
  6907. (uint32_t)nev2, (uint32_t)nev3,
  6908. nem1, nem2, nem3,
  6909. q_stride, (uint32_t)nbq2, (uint32_t)nbq3,
  6910. k_stride, (uint32_t)nbk2, (uint32_t)nbk3,
  6911. v_stride, (uint32_t)nbv2, (uint32_t)nbv3,
  6912. scale, max_bias, logit_softcap,
  6913. mask_n_head_log2, m0, m1,
  6914. gqa_ratio, split_kv, split_k };
  6915. if (split_k > 1) {
  6916. if (ctx->prealloc_split_k_need_sync) {
  6917. ggml_vk_sync_buffers(ctx, subctx);
  6918. }
  6919. vk_subbuffer split_k_buf = ggml_vk_subbuffer(ctx, ctx->prealloc_split_k, 0);
  6920. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
  6921. {q_buf, k_buf, v_buf, mask_buf, sinks_buf, split_k_buf},
  6922. // We only use split_k when group query attention is enabled, which means
  6923. // there's no more than one tile of rows (i.e. workgroups_x would have been
  6924. // one). We reuse workgroups_x to mean the number of splits, so we need to
  6925. // cancel out the divide by wg_denoms[0].
  6926. pc, { workgroups_x * pipeline->wg_denoms[0], workgroups_y, workgroups_z });
  6927. ggml_vk_sync_buffers(ctx, subctx);
  6928. const std::array<uint32_t, 5> pc2 = { HSV, (uint32_t)ne1, (uint32_t)ne3, split_k, (sinks != nullptr) };
  6929. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_flash_attn_split_k_reduce,
  6930. {split_k_buf, sinks_buf, dst_buf},
  6931. pc2, { (uint32_t)ne1, HSV, (uint32_t)ne3 });
  6932. ctx->prealloc_split_k_need_sync = true;
  6933. } else {
  6934. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
  6935. {q_buf, k_buf, v_buf, mask_buf, sinks_buf, dst_buf},
  6936. pc, { workgroups_x, workgroups_y, workgroups_z });
  6937. }
  6938. }
  6939. static std::array<uint32_t, 3> ggml_vk_get_conv_elements(const ggml_tensor *dst) {
  6940. const ggml_tensor *src0 = dst->src[0];
  6941. const ggml_tensor *src1 = dst->src[1];
  6942. // src0 - kernel: [KW, KH, Cin, Cout]
  6943. // src1 - input: [W, H, Cin, N]
  6944. // dst - result: [OW, OH, Cout, N]
  6945. // Copied from ggml.c: int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d)
  6946. auto calc_conv_output_size = [](int64_t ins, int64_t ks, int s, int p, int d) -> int64_t {
  6947. return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
  6948. };
  6949. // parallelize in {OW/BS_K, OH/BS_NPQ, 1}
  6950. int64_t W = src1->ne[0];
  6951. int64_t H = src1->ne[1];
  6952. int64_t KW = src0->ne[0];
  6953. int64_t KH = src0->ne[1];
  6954. int64_t Cout = src0->ne[3];
  6955. int64_t N = src1->ne[3];
  6956. int64_t OH = calc_conv_output_size(H, KH, dst->op_params[1], dst->op_params[3], dst->op_params[5]);
  6957. int64_t OW = calc_conv_output_size(W, KW, dst->op_params[0], dst->op_params[2], dst->op_params[4]);
  6958. int64_t NPQ = N * OW * OH;
  6959. // Tile output matrix to (K/NB_K, NPQ/NB_NPQ, 1) workgroups
  6960. std::array<uint32_t, 3> elements = { static_cast<uint32_t>(Cout), static_cast<uint32_t>(NPQ), 1 };
  6961. return elements;
  6962. }
  6963. static std::array<uint32_t, 3> ggml_vk_get_conv_transpose_2d_elements(const ggml_tensor *dst) {
  6964. const ggml_tensor *src0 = dst->src[0];
  6965. const ggml_tensor *src1 = dst->src[1];
  6966. // src0 - kernel: [KW, KH, Cout, Cin]
  6967. // src1 - input: [W, H, Cin, N]
  6968. // dst - result: [OW, OH, Cout, N]
  6969. auto calc_conv_output_size = [](int64_t ins, int64_t ks, int s, int p, int d) -> int64_t {
  6970. return (ins - 1) * s - 2 * p + (ks - 1) * d + 1;
  6971. };
  6972. // parallelize in {OW/BS_K, OH/BS_NPQ, 1}
  6973. int64_t W = src1->ne[0];
  6974. int64_t H = src1->ne[1];
  6975. int64_t KW = src0->ne[0];
  6976. int64_t KH = src0->ne[1];
  6977. int64_t Cout = src0->ne[2];
  6978. int64_t N = src1->ne[3];
  6979. int64_t OH = calc_conv_output_size(H, KH, dst->op_params[0], 0, 1);
  6980. int64_t OW = calc_conv_output_size(W, KW, dst->op_params[0], 0, 1);
  6981. int64_t NPQ = N * OW * OH;
  6982. // Tile output matrix to (K/NB_K, NPQ/NB_NPQ, 1) workgroups
  6983. std::array<uint32_t, 3> elements = { static_cast<uint32_t>(Cout), static_cast<uint32_t>(NPQ), 1 };
  6984. return elements;
  6985. }
  6986. static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * dst, ggml_op op) {
  6987. switch (op) {
  6988. case GGML_OP_GET_ROWS:
  6989. GGML_ASSERT(src1->type == GGML_TYPE_I32);
  6990. if (dst->type == GGML_TYPE_F16) {
  6991. return ctx->device->pipeline_get_rows[src0->type];
  6992. }
  6993. if (dst->type == GGML_TYPE_F32) {
  6994. return ctx->device->pipeline_get_rows_f32[src0->type];
  6995. }
  6996. return nullptr;
  6997. case GGML_OP_ACC:
  6998. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  6999. return ctx->device->pipeline_acc_f32;
  7000. }
  7001. return nullptr;
  7002. case GGML_OP_ADD:
  7003. case GGML_OP_SUB:
  7004. case GGML_OP_MUL:
  7005. case GGML_OP_DIV:
  7006. if ((src0->type != GGML_TYPE_F32 && src0->type != GGML_TYPE_F16) ||
  7007. (src1->type != GGML_TYPE_F32 && src1->type != GGML_TYPE_F16) ||
  7008. (dst->type != GGML_TYPE_F32 && dst->type != GGML_TYPE_F16)) {
  7009. return nullptr;
  7010. }
  7011. switch (op) {
  7012. case GGML_OP_ADD:
  7013. {
  7014. if (ctx->num_additional_fused_ops > 0) {
  7015. if (ctx->do_add_rms_partials) {
  7016. return ctx->device->pipeline_multi_add_rms[ctx->num_additional_fused_ops];
  7017. } else {
  7018. return ctx->device->pipeline_multi_add[ctx->num_additional_fused_ops];
  7019. }
  7020. }
  7021. if (ctx->do_add_rms_partials) {
  7022. auto pipelines = ggml_are_same_shape(src0, src1) ? ctx->device->pipeline_add_rms_norepeat : ctx->device->pipeline_add_rms;
  7023. return pipelines[src0->type == GGML_TYPE_F16][src1->type == GGML_TYPE_F16][dst->type == GGML_TYPE_F16];
  7024. } else {
  7025. auto pipelines = ggml_are_same_shape(src0, src1) ? ctx->device->pipeline_add_norepeat : ctx->device->pipeline_add;
  7026. return pipelines[src0->type == GGML_TYPE_F16][src1->type == GGML_TYPE_F16][dst->type == GGML_TYPE_F16];
  7027. }
  7028. }
  7029. case GGML_OP_SUB:
  7030. {
  7031. auto pipelines = ggml_are_same_shape(src0, src1) ? ctx->device->pipeline_sub_norepeat : ctx->device->pipeline_sub;
  7032. return pipelines[src0->type == GGML_TYPE_F16][src1->type == GGML_TYPE_F16][dst->type == GGML_TYPE_F16];
  7033. }
  7034. case GGML_OP_MUL:
  7035. {
  7036. auto pipelines = ggml_are_same_shape(src0, src1) ? ctx->device->pipeline_mul_norepeat : ctx->device->pipeline_mul;
  7037. return pipelines[src0->type == GGML_TYPE_F16][src1->type == GGML_TYPE_F16][dst->type == GGML_TYPE_F16];
  7038. }
  7039. case GGML_OP_DIV:
  7040. {
  7041. auto pipelines = ggml_are_same_shape(src0, src1) ? ctx->device->pipeline_div_norepeat : ctx->device->pipeline_div;
  7042. return pipelines[src0->type == GGML_TYPE_F16][src1->type == GGML_TYPE_F16][dst->type == GGML_TYPE_F16];
  7043. }
  7044. default:
  7045. break;
  7046. }
  7047. return nullptr;
  7048. case GGML_OP_ADD_ID:
  7049. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && src2->type == GGML_TYPE_I32 && dst->type == GGML_TYPE_F32) {
  7050. return ctx->device->pipeline_add_id_f32;
  7051. }
  7052. return nullptr;
  7053. case GGML_OP_CONCAT:
  7054. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7055. return ctx->device->pipeline_concat_f32;
  7056. }
  7057. if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  7058. return ctx->device->pipeline_concat_f16;
  7059. }
  7060. if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32 && dst->type == GGML_TYPE_I32) {
  7061. return ctx->device->pipeline_concat_i32;
  7062. }
  7063. return nullptr;
  7064. case GGML_OP_UPSCALE:
  7065. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7066. ggml_scale_mode mode = (ggml_scale_mode)(ggml_get_op_params_i32(dst, 0) & 0xFF);
  7067. switch (mode) {
  7068. case GGML_SCALE_MODE_NEAREST:
  7069. return ctx->device->pipeline_upscale_nearest_f32;
  7070. case GGML_SCALE_MODE_BILINEAR:
  7071. return ctx->device->pipeline_upscale_bilinear_f32;
  7072. case GGML_SCALE_MODE_BICUBIC:
  7073. return ctx->device->pipeline_upscale_bicubic_f32;
  7074. default:
  7075. return nullptr;
  7076. }
  7077. }
  7078. return nullptr;
  7079. case GGML_OP_SCALE:
  7080. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7081. return ctx->device->pipeline_scale_f32;
  7082. }
  7083. return nullptr;
  7084. case GGML_OP_SQR:
  7085. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7086. return ctx->device->pipeline_sqr_f32;
  7087. }
  7088. return nullptr;
  7089. case GGML_OP_SQRT:
  7090. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7091. return ctx->device->pipeline_sqrt_f32;
  7092. }
  7093. return nullptr;
  7094. case GGML_OP_SIN:
  7095. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7096. return ctx->device->pipeline_sin_f32;
  7097. }
  7098. return nullptr;
  7099. case GGML_OP_COS:
  7100. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7101. return ctx->device->pipeline_cos_f32;
  7102. }
  7103. return nullptr;
  7104. case GGML_OP_LOG:
  7105. if (src0->type == dst->type &&
  7106. (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16)) {
  7107. return ctx->device->pipeline_log[dst->type == GGML_TYPE_F16];
  7108. }
  7109. return nullptr;
  7110. case GGML_OP_TRI:
  7111. if (src0->type == dst->type &&
  7112. (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16)) {
  7113. return ctx->device->pipeline_tri[dst->type == GGML_TYPE_F16];
  7114. }
  7115. return nullptr;
  7116. case GGML_OP_CLAMP:
  7117. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7118. return ctx->device->pipeline_clamp_f32;
  7119. }
  7120. return nullptr;
  7121. case GGML_OP_PAD:
  7122. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7123. return ctx->device->pipeline_pad_f32;
  7124. }
  7125. return nullptr;
  7126. case GGML_OP_ROLL:
  7127. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7128. return ctx->device->pipeline_roll_f32;
  7129. }
  7130. return nullptr;
  7131. case GGML_OP_REPEAT:
  7132. if (ggml_type_size(src0->type) == sizeof(float) && ggml_type_size(dst->type) == sizeof(float)) {
  7133. return ctx->device->pipeline_repeat_f32;
  7134. }
  7135. return nullptr;
  7136. case GGML_OP_REPEAT_BACK:
  7137. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7138. return ctx->device->pipeline_repeat_back_f32;
  7139. }
  7140. return nullptr;
  7141. case GGML_OP_CPY:
  7142. case GGML_OP_CONT:
  7143. case GGML_OP_DUP:
  7144. return ggml_vk_get_cpy_pipeline(ctx, src0, dst, dst->type);
  7145. case GGML_OP_SET_ROWS:
  7146. if (src1->type == GGML_TYPE_I64) {
  7147. return ctx->device->pipeline_set_rows_i64[dst->type];
  7148. } else {
  7149. return ctx->device->pipeline_set_rows_i32[dst->type];
  7150. }
  7151. case GGML_OP_SILU_BACK:
  7152. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7153. return ctx->device->pipeline_silu_back_f32;
  7154. }
  7155. return nullptr;
  7156. case GGML_OP_NORM:
  7157. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7158. return ctx->device->pipeline_norm_f32;
  7159. }
  7160. return nullptr;
  7161. case GGML_OP_GROUP_NORM:
  7162. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7163. return ctx->device->pipeline_group_norm_f32;
  7164. }
  7165. return nullptr;
  7166. case GGML_OP_RMS_NORM:
  7167. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7168. if (ctx->do_add_rms_partials) {
  7169. return ctx->num_additional_fused_ops > 0 ? ctx->device->pipeline_rms_norm_mul_partials_f32 : ctx->device->pipeline_rms_norm_partials_f32;
  7170. } else {
  7171. return ctx->num_additional_fused_ops > 0 ? ctx->device->pipeline_rms_norm_mul_f32 : ctx->device->pipeline_rms_norm_f32;
  7172. }
  7173. }
  7174. return nullptr;
  7175. case GGML_OP_RMS_NORM_BACK:
  7176. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7177. return ctx->device->pipeline_rms_norm_back_f32;
  7178. }
  7179. return nullptr;
  7180. case GGML_OP_L2_NORM:
  7181. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7182. return ctx->device->pipeline_l2_norm_f32;
  7183. }
  7184. return nullptr;
  7185. case GGML_OP_UNARY:
  7186. if ((src0->type != GGML_TYPE_F32 && src0->type != GGML_TYPE_F16) ||
  7187. (dst->type != GGML_TYPE_F32 && dst->type != GGML_TYPE_F16) ||
  7188. (src0->type != dst->type)) {
  7189. return nullptr;
  7190. }
  7191. switch (ggml_get_unary_op(dst)) {
  7192. case GGML_UNARY_OP_EXP:
  7193. return ctx->device->pipeline_exp[dst->type == GGML_TYPE_F16];
  7194. case GGML_UNARY_OP_SILU:
  7195. return ctx->device->pipeline_silu[dst->type == GGML_TYPE_F16];
  7196. case GGML_UNARY_OP_GELU:
  7197. return ctx->device->pipeline_gelu[dst->type == GGML_TYPE_F16];
  7198. case GGML_UNARY_OP_GELU_ERF:
  7199. return ctx->device->pipeline_gelu_erf[dst->type == GGML_TYPE_F16];
  7200. case GGML_UNARY_OP_GELU_QUICK:
  7201. return ctx->device->pipeline_gelu_quick[dst->type == GGML_TYPE_F16];
  7202. case GGML_UNARY_OP_RELU:
  7203. return ctx->device->pipeline_relu[dst->type == GGML_TYPE_F16];
  7204. case GGML_UNARY_OP_NEG:
  7205. return ctx->device->pipeline_neg[dst->type == GGML_TYPE_F16];
  7206. case GGML_UNARY_OP_TANH:
  7207. return ctx->device->pipeline_tanh[dst->type == GGML_TYPE_F16];
  7208. case GGML_UNARY_OP_SIGMOID:
  7209. return ctx->device->pipeline_sigmoid[dst->type == GGML_TYPE_F16];
  7210. case GGML_UNARY_OP_HARDSIGMOID:
  7211. return ctx->device->pipeline_hardsigmoid[dst->type == GGML_TYPE_F16];
  7212. case GGML_UNARY_OP_HARDSWISH:
  7213. return ctx->device->pipeline_hardswish[dst->type == GGML_TYPE_F16];
  7214. case GGML_UNARY_OP_ABS:
  7215. return ctx->device->pipeline_abs[dst->type == GGML_TYPE_F16];
  7216. case GGML_UNARY_OP_SOFTPLUS:
  7217. return ctx->device->pipeline_softplus[dst->type == GGML_TYPE_F16];
  7218. case GGML_UNARY_OP_STEP:
  7219. return ctx->device->pipeline_step[dst->type == GGML_TYPE_F16];
  7220. case GGML_UNARY_OP_ROUND:
  7221. return ctx->device->pipeline_round[dst->type == GGML_TYPE_F16];
  7222. case GGML_UNARY_OP_CEIL:
  7223. return ctx->device->pipeline_ceil[dst->type == GGML_TYPE_F16];
  7224. case GGML_UNARY_OP_FLOOR:
  7225. return ctx->device->pipeline_floor[dst->type == GGML_TYPE_F16];
  7226. case GGML_UNARY_OP_TRUNC:
  7227. return ctx->device->pipeline_trunc[dst->type == GGML_TYPE_F16];
  7228. default:
  7229. break;
  7230. }
  7231. return nullptr;
  7232. case GGML_OP_GLU:
  7233. if ((src0->type != GGML_TYPE_F32 && src0->type != GGML_TYPE_F16) ||
  7234. (dst->type != GGML_TYPE_F32 && dst->type != GGML_TYPE_F16) ||
  7235. (src0->type != dst->type)) {
  7236. return nullptr;
  7237. }
  7238. switch (ggml_get_glu_op(dst)) {
  7239. case GGML_GLU_OP_GEGLU:
  7240. return ctx->device->pipeline_geglu[dst->type == GGML_TYPE_F16];
  7241. case GGML_GLU_OP_REGLU:
  7242. return ctx->device->pipeline_reglu[dst->type == GGML_TYPE_F16];
  7243. case GGML_GLU_OP_SWIGLU:
  7244. return ctx->device->pipeline_swiglu[dst->type == GGML_TYPE_F16];
  7245. case GGML_GLU_OP_SWIGLU_OAI:
  7246. return ctx->device->pipeline_swiglu_oai[dst->type == GGML_TYPE_F16];
  7247. case GGML_GLU_OP_GEGLU_ERF:
  7248. return ctx->device->pipeline_geglu_erf[dst->type == GGML_TYPE_F16];
  7249. case GGML_GLU_OP_GEGLU_QUICK:
  7250. return ctx->device->pipeline_geglu_quick[dst->type == GGML_TYPE_F16];
  7251. default:
  7252. break;
  7253. }
  7254. return nullptr;
  7255. case GGML_OP_DIAG_MASK_INF:
  7256. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7257. return ctx->device->pipeline_diag_mask_inf_f32;
  7258. }
  7259. return nullptr;
  7260. case GGML_OP_SOFT_MAX:
  7261. GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16);
  7262. GGML_ASSERT(!src2 || src2->type == GGML_TYPE_F32);
  7263. if (ctx->num_additional_fused_ops) {
  7264. uint32_t idx = (uint32_t)ceilf(log2f(float(dst->ne[0])));
  7265. GGML_ASSERT(idx < num_topk_moe_pipelines);
  7266. topk_moe_mode mode = ggml_vk_num_additional_ops_to_topk_moe_mode(ctx->num_additional_fused_ops);
  7267. return ctx->device->pipeline_topk_moe[idx][mode];
  7268. }
  7269. if (src0->type == GGML_TYPE_F32 && (src1 == nullptr || src1->type == GGML_TYPE_F32) && dst->type == GGML_TYPE_F32) {
  7270. return src0->ne[0] > 1024 ? ctx->device->pipeline_soft_max_f32_wg512 : ctx->device->pipeline_soft_max_f32;
  7271. }
  7272. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
  7273. return src0->ne[0] > 1024 ? ctx->device->pipeline_soft_max_f32_f16_wg512 : ctx->device->pipeline_soft_max_f32_f16;
  7274. }
  7275. return nullptr;
  7276. case GGML_OP_SOFT_MAX_BACK:
  7277. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7278. return ctx->device->pipeline_soft_max_back_f32;
  7279. }
  7280. return nullptr;
  7281. case GGML_OP_ROPE:
  7282. case GGML_OP_ROPE_BACK:
  7283. {
  7284. const ggml_tensor *rope = ctx->num_additional_fused_ops == 2 ? dst->src[0]->src[0] : dst;
  7285. const int mode = ((const int32_t *) rope->op_params)[2];
  7286. const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
  7287. const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE;
  7288. const bool is_vision = mode == GGML_ROPE_TYPE_VISION;
  7289. if (is_neox) {
  7290. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7291. return ctx->device->pipeline_rope_neox_f32;
  7292. }
  7293. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
  7294. return ctx->device->pipeline_rope_neox_f32_f16;
  7295. }
  7296. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  7297. return ctx->device->pipeline_rope_neox_f16;
  7298. }
  7299. } else if (is_mrope && !is_vision) {
  7300. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7301. return ctx->device->pipeline_rope_multi_f32;
  7302. }
  7303. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  7304. return ctx->device->pipeline_rope_multi_f16;
  7305. }
  7306. } else if (is_vision) {
  7307. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7308. return ctx->device->pipeline_rope_vision_f32;
  7309. }
  7310. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  7311. return ctx->device->pipeline_rope_vision_f16;
  7312. }
  7313. } else {
  7314. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7315. return ctx->device->pipeline_rope_norm_f32;
  7316. }
  7317. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
  7318. return ctx->device->pipeline_rope_norm_f32_f16;
  7319. }
  7320. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  7321. return ctx->device->pipeline_rope_norm_f16;
  7322. }
  7323. }
  7324. return nullptr;
  7325. }
  7326. case GGML_OP_SUM:
  7327. case GGML_OP_SUM_ROWS:
  7328. case GGML_OP_MEAN:
  7329. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7330. return ctx->device->pipeline_sum_rows_f32;
  7331. }
  7332. return nullptr;
  7333. case GGML_OP_CUMSUM:
  7334. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7335. return ctx->device->pipeline_cumsum_f32;
  7336. }
  7337. return nullptr;
  7338. case GGML_OP_SOLVE_TRI:
  7339. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7340. vk_solve_tri_pipeline_state solve_tri_pipeline_state(src0->ne[0], src1->ne[0]);
  7341. vk_pipeline pipeline = nullptr;
  7342. {
  7343. std::lock_guard<std::recursive_mutex> guard(ctx->device->mutex);
  7344. auto it = ctx->device->pipeline_solve_tri_f32.find(solve_tri_pipeline_state);
  7345. if (it != ctx->device->pipeline_solve_tri_f32.end()) {
  7346. pipeline = it->second;
  7347. } else {
  7348. ctx->device->pipeline_solve_tri_f32[solve_tri_pipeline_state] = pipeline = std::make_shared<vk_pipeline_struct>();
  7349. }
  7350. }
  7351. return pipeline;
  7352. }
  7353. return nullptr;
  7354. case GGML_OP_ARGMAX:
  7355. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_I32) {
  7356. return ctx->device->pipeline_argmax_f32;
  7357. }
  7358. return nullptr;
  7359. case GGML_OP_COUNT_EQUAL:
  7360. if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32 && dst->type == GGML_TYPE_I64) {
  7361. return ctx->device->pipeline_count_equal_i32;
  7362. }
  7363. return nullptr;
  7364. case GGML_OP_IM2COL:
  7365. if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7366. return ctx->device->pipeline_im2col_f32;
  7367. }
  7368. if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
  7369. return ctx->device->pipeline_im2col_f32_f16;
  7370. }
  7371. return nullptr;
  7372. case GGML_OP_IM2COL_3D:
  7373. if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7374. return ctx->device->pipeline_im2col_3d_f32;
  7375. }
  7376. if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
  7377. return ctx->device->pipeline_im2col_3d_f32_f16;
  7378. }
  7379. return nullptr;
  7380. case GGML_OP_TIMESTEP_EMBEDDING:
  7381. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7382. return ctx->device->pipeline_timestep_embedding_f32;
  7383. }
  7384. return nullptr;
  7385. case GGML_OP_CONV_TRANSPOSE_1D:
  7386. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7387. return ctx->device->pipeline_conv_transpose_1d_f32;
  7388. }
  7389. return nullptr;
  7390. case GGML_OP_POOL_2D:
  7391. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7392. return ctx->device->pipeline_pool2d_f32;
  7393. }
  7394. return nullptr;
  7395. case GGML_OP_RWKV_WKV6:
  7396. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7397. return ctx->device->pipeline_rwkv_wkv6_f32;
  7398. }
  7399. return nullptr;
  7400. case GGML_OP_RWKV_WKV7:
  7401. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7402. return ctx->device->pipeline_rwkv_wkv7_f32;
  7403. }
  7404. return nullptr;
  7405. case GGML_OP_SSM_SCAN:
  7406. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7407. const uint32_t d_state = src0->ne[0];
  7408. if (d_state == 128) {
  7409. return ctx->device->pipeline_ssm_scan_f32_d128;
  7410. } else if (d_state == 256) {
  7411. return ctx->device->pipeline_ssm_scan_f32_d256;
  7412. }
  7413. }
  7414. return nullptr;
  7415. case GGML_OP_SSM_CONV:
  7416. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7417. return ctx->device->pipeline_ssm_conv_f32;
  7418. }
  7419. return nullptr;
  7420. case GGML_OP_OPT_STEP_ADAMW:
  7421. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7422. return ctx->device->pipeline_opt_step_adamw_f32;
  7423. }
  7424. return nullptr;
  7425. case GGML_OP_OPT_STEP_SGD:
  7426. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7427. return ctx->device->pipeline_opt_step_sgd_f32;
  7428. }
  7429. return nullptr;
  7430. case GGML_OP_LEAKY_RELU:
  7431. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7432. return ctx->device->pipeline_leaky_relu_f32;
  7433. }
  7434. return nullptr;
  7435. case GGML_OP_CONV_2D:
  7436. case GGML_OP_CONV_TRANSPOSE_2D:
  7437. if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 &&
  7438. ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && ggml_is_contiguous(dst)) {
  7439. std::array<uint32_t, 3> elements{};
  7440. if (op == GGML_OP_CONV_2D) elements = ggml_vk_get_conv_elements(dst);
  7441. else if (op == GGML_OP_CONV_TRANSPOSE_2D) elements = ggml_vk_get_conv_transpose_2d_elements(dst);
  7442. vk_conv_shapes shape;
  7443. uint32_t tiles[CONV_SHAPE_COUNT];
  7444. for (uint32_t i = 0; i < CONV_SHAPE_COUNT; ++i) {
  7445. tiles[i] = CEIL_DIV(elements[0], conv_shapes_wg_denoms[i][0]) * CEIL_DIV(elements[1], conv_shapes_wg_denoms[i][1]);
  7446. }
  7447. // We can't query number of shader cores on Intel, use 32 as a placeholder
  7448. // so small convolutions will still choose a smaller tile.
  7449. const uint32_t shader_core_count = ctx->device->shader_core_count > 0 ? ctx->device->shader_core_count : 32;
  7450. if (elements[0] > 64 && tiles[CONV_SHAPE_128x128] >= shader_core_count * 2) {
  7451. shape = CONV_SHAPE_128x128;
  7452. } else if (elements[0] <= 32 && tiles[CONV_SHAPE_32x256] >= shader_core_count * 2) {
  7453. shape = CONV_SHAPE_32x256;
  7454. } else {
  7455. shape = CONV_SHAPE_64x32;
  7456. }
  7457. uint32_t KW = static_cast<uint32_t>(src0->ne[0]);
  7458. uint32_t KH = static_cast<uint32_t>(src0->ne[1]);
  7459. uint32_t s0 = static_cast<uint32_t>(dst->op_params[0]);
  7460. uint32_t s1 = op == GGML_OP_CONV_2D ? static_cast<uint32_t>(dst->op_params[1]) : static_cast<uint32_t>(dst->op_params[0]);
  7461. uint32_t p0 = op == GGML_OP_CONV_2D ? static_cast<uint32_t>(dst->op_params[2]) : 0;
  7462. uint32_t p1 = op == GGML_OP_CONV_2D ? static_cast<uint32_t>(dst->op_params[3]) : 0;
  7463. uint32_t d0 = op == GGML_OP_CONV_2D ? static_cast<uint32_t>(dst->op_params[4]) : 1;
  7464. uint32_t d1 = op == GGML_OP_CONV_2D ? static_cast<uint32_t>(dst->op_params[5]) : 1;
  7465. vk_conv2d_pipeline_state conv2d_pipeline_state(s0, s1, p0, p1, d0, d1, KW, KH);
  7466. std::map<vk_conv2d_pipeline_state, vk_pipeline> *pipelines = nullptr;
  7467. if (op == GGML_OP_CONV_2D) {
  7468. if (src0->type == GGML_TYPE_F32) {
  7469. pipelines = &ctx->device->pipeline_conv2d_f32[shape];
  7470. } else if (src0->type == GGML_TYPE_F16) {
  7471. pipelines = &ctx->device->pipeline_conv2d_f16_f32[shape];
  7472. }
  7473. } else if (op == GGML_OP_CONV_TRANSPOSE_2D) {
  7474. if (src0->type == GGML_TYPE_F32) {
  7475. pipelines = &ctx->device->pipeline_conv_transpose_2d_f32[shape];
  7476. } else if (src0->type == GGML_TYPE_F16) {
  7477. pipelines = &ctx->device->pipeline_conv_transpose_2d_f16_f32[shape];
  7478. }
  7479. }
  7480. vk_pipeline pipeline = nullptr;
  7481. {
  7482. std::lock_guard<std::recursive_mutex> guard(ctx->device->mutex);
  7483. auto it = pipelines->find(conv2d_pipeline_state);
  7484. if (it != pipelines->end()) {
  7485. pipeline = it->second;
  7486. } else {
  7487. (*pipelines)[conv2d_pipeline_state] = pipeline = std::make_shared<vk_pipeline_struct>();
  7488. }
  7489. }
  7490. return pipeline;
  7491. }
  7492. return nullptr;
  7493. case GGML_OP_CONV_2D_DW:
  7494. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7495. if (ggml_is_contiguous(src1)) {
  7496. return ctx->device->pipeline_conv2d_dw_whcn_f32;
  7497. } else if (ggml_is_contiguous_channels(src1)) {
  7498. return ctx->device->pipeline_conv2d_dw_cwhn_f32;
  7499. }
  7500. } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
  7501. if (ggml_is_contiguous(src1)) {
  7502. return ctx->device->pipeline_conv2d_dw_whcn_f16_f32;
  7503. } else if (ggml_is_contiguous_channels(src1)) {
  7504. return ctx->device->pipeline_conv2d_dw_cwhn_f16_f32;
  7505. }
  7506. }
  7507. return nullptr;
  7508. case GGML_OP_ADD1:
  7509. if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  7510. return ctx->device->pipeline_add1_f16_f16;
  7511. }
  7512. if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
  7513. return ctx->device->pipeline_add1_f16_f32;
  7514. }
  7515. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  7516. return ctx->device->pipeline_add1_f32_f32;
  7517. }
  7518. return nullptr;
  7519. case GGML_OP_ARANGE:
  7520. if (dst->type == GGML_TYPE_F32) {
  7521. return ctx->device->pipeline_arange_f32;
  7522. }
  7523. return nullptr;
  7524. case GGML_OP_FILL:
  7525. if (dst->type == GGML_TYPE_F32) {
  7526. return ctx->device->pipeline_fill_f32;
  7527. }
  7528. return nullptr;
  7529. default:
  7530. return nullptr;
  7531. }
  7532. GGML_UNUSED(src2);
  7533. }
  7534. template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_unary_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  7535. const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
  7536. const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
  7537. p.misalign_offsets = (a_offset << 16) | d_offset;
  7538. GGML_UNUSED(src1);
  7539. GGML_UNUSED(src2);
  7540. GGML_UNUSED(src3);
  7541. }
  7542. template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_sum_rows_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  7543. const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
  7544. const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
  7545. p.misalign_offsets = (a_offset << 16) | d_offset;
  7546. GGML_UNUSED(src1);
  7547. GGML_UNUSED(src2);
  7548. GGML_UNUSED(src3);
  7549. }
  7550. template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_pad_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  7551. const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
  7552. const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
  7553. p.misalign_offsets = (a_offset << 16) | d_offset;
  7554. GGML_UNUSED(src1);
  7555. GGML_UNUSED(src2);
  7556. GGML_UNUSED(src3);
  7557. }
  7558. template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_im2col_3d_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  7559. const uint32_t a_offset = get_misalign_bytes(ctx, src1) / ggml_type_size(src1->type);
  7560. const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
  7561. p.misalign_offsets = (a_offset << 16) | d_offset;
  7562. GGML_UNUSED(src0);
  7563. GGML_UNUSED(src2);
  7564. GGML_UNUSED(src3);
  7565. }
  7566. template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_binary_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  7567. const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
  7568. const uint32_t b_offset = get_misalign_bytes(ctx, src1) / ggml_type_size(src1->type);
  7569. const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
  7570. GGML_ASSERT(dst->op != GGML_OP_GET_ROWS || (a_offset == 0 && b_offset == 0 && d_offset == 0));
  7571. p.misalign_offsets = (a_offset << 16) | (b_offset << 8) | d_offset;
  7572. GGML_UNUSED(src2);
  7573. GGML_UNUSED(src3);
  7574. }
  7575. template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_upscale_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) {
  7576. const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
  7577. const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
  7578. p.a_offset = a_offset;
  7579. p.d_offset = d_offset;
  7580. GGML_UNUSED(src1);
  7581. GGML_UNUSED(src2);
  7582. GGML_UNUSED(src3);
  7583. }
  7584. template<typename PC>
  7585. static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst, ggml_op op, PC&& pc) {
  7586. VK_LOG_DEBUG("ggml_vk_op_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  7587. if (src1 != nullptr) {
  7588. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  7589. }
  7590. if (src2 != nullptr) {
  7591. std::cerr << "), (" << src2 << ", name=" << src2->name << ", type=" << src2->type << ", ne0=" << src2->ne[0] << ", ne1=" << src2->ne[1] << ", ne2=" << src2->ne[2] << ", ne3=" << src2->ne[3] << ", nb0=" << src2->nb[0] << ", nb1=" << src2->nb[1] << ", nb2=" << src2->nb[2] << ", nb3=" << src2->nb[3];
  7592. }
  7593. if (src3 != nullptr) {
  7594. std::cerr << "), (" << src3 << ", name=" << src3->name << ", type=" << src3->type << ", ne0=" << src3->ne[0] << ", ne1=" << src3->ne[1] << ", ne2=" << src3->ne[2] << ", ne3=" << src3->ne[3] << ", nb0=" << src3->nb[0] << ", nb1=" << src3->nb[1] << ", nb2=" << src3->nb[2] << ", nb3=" << src3->nb[3];
  7595. }
  7596. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  7597. std::cerr << "), " << ggml_op_name(op) << ")");
  7598. GGML_ASSERT(op == GGML_OP_GET_ROWS || op == GGML_OP_CPY || (!ggml_is_quantized(src0->type) && (src1 == nullptr || !ggml_is_quantized(src1->type)))); // NOLINT
  7599. GGML_ASSERT(dst->buffer != nullptr);
  7600. const uint64_t ne00 = src0->ne[0];
  7601. const uint64_t ne01 = src0->ne[1];
  7602. const uint64_t ne02 = src0->ne[2];
  7603. const uint64_t ne03 = src0->ne[3];
  7604. const bool use_src1 = src1 != nullptr;
  7605. const uint64_t ne10 = use_src1 ? src1->ne[0] : 0;
  7606. const uint64_t ne11 = use_src1 ? src1->ne[1] : 0;
  7607. const uint64_t ne12 = use_src1 ? src1->ne[2] : 0;
  7608. const uint64_t ne13 = use_src1 ? src1->ne[3] : 0;
  7609. const bool use_src2 = src2 != nullptr;
  7610. const bool use_src3 = src3 != nullptr;
  7611. init_pushconst_fastdiv(pc);
  7612. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, dst, op);
  7613. if (pipeline == nullptr) {
  7614. std::cerr << "ggml_vulkan: Error: Missing op: " << ggml_op_name(op) << " for " << ggml_type_name(src0->type);
  7615. if (src1 != nullptr) {
  7616. std::cerr << " and " << ggml_type_name(src1->type);
  7617. }
  7618. std::cerr << " to " << ggml_type_name(dst->type) << std::endl;
  7619. GGML_ABORT("fatal error");
  7620. }
  7621. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  7622. vk_subbuffer src0_buf = ggml_vk_tensor_subbuffer(ctx, src0, true);
  7623. vk_subbuffer src1_buf = use_src1 ? ggml_vk_tensor_subbuffer(ctx, src1, true) : vk_subbuffer{};
  7624. vk_subbuffer src2_buf = use_src2 ? ggml_vk_tensor_subbuffer(ctx, src2, true) : vk_subbuffer{};
  7625. vk_subbuffer src3_buf = use_src3 ? ggml_vk_tensor_subbuffer(ctx, src3, true) : vk_subbuffer{};
  7626. vk_subbuffer dst_buf = ggml_vk_tensor_subbuffer(ctx, dst, true);
  7627. // Compute misalignment offset for descriptors and store it in in push constants.
  7628. init_pushconst_tensor_offsets(ctx, pc, src0, src1, src2, src3, dst);
  7629. std::array<uint32_t, 3> elements;
  7630. switch (op) {
  7631. case GGML_OP_NORM:
  7632. case GGML_OP_RMS_NORM_BACK:
  7633. case GGML_OP_L2_NORM:
  7634. case GGML_OP_SOFT_MAX:
  7635. case GGML_OP_SOFT_MAX_BACK:
  7636. case GGML_OP_SUM_ROWS:
  7637. case GGML_OP_CUMSUM:
  7638. case GGML_OP_MEAN:
  7639. case GGML_OP_ARGMAX:
  7640. {
  7641. const uint32_t nr = ggml_nrows(src0);
  7642. if (nr > 262144) {
  7643. elements = { 512, 512, CEIL_DIV(nr, 262144) };
  7644. } else if (nr > 512) {
  7645. elements = { 512, CEIL_DIV(nr, 512), 1 };
  7646. } else {
  7647. elements = { nr, 1, 1 };
  7648. }
  7649. } break;
  7650. case GGML_OP_SOLVE_TRI:
  7651. {
  7652. uint32_t nr = (uint32_t)(ne02 * ne03);
  7653. if (nr > 262144) {
  7654. elements = { 512, 512, CEIL_DIV(nr, 262144) };
  7655. } else if (nr > 512) {
  7656. elements = { 512, CEIL_DIV(nr, 512), 1 };
  7657. } else {
  7658. elements = { nr, 1, 1 };
  7659. }
  7660. }
  7661. break;
  7662. case GGML_OP_RMS_NORM:
  7663. if (ctx->do_add_rms_partials) {
  7664. // Run one element per thread, 128 threads per workgroup
  7665. elements = { (uint32_t)CEIL_DIV(ne00, 128), 1, 1 };
  7666. } else {
  7667. elements = { (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne03 };
  7668. }
  7669. break;
  7670. case GGML_OP_SUM:
  7671. // We use GGML_OP_SUM_ROWS with 1 row.
  7672. elements = { 1, 1, 1 };
  7673. break;
  7674. case GGML_OP_GROUP_NORM:
  7675. {
  7676. const uint32_t num_groups = dst->op_params[0];
  7677. elements = { num_groups * (uint32_t)src0->ne[3], 1, 1 };
  7678. } break;
  7679. case GGML_OP_DIAG_MASK_INF:
  7680. case GGML_OP_ROPE:
  7681. case GGML_OP_ROPE_BACK:
  7682. elements = { (uint32_t)ggml_nrows(src0), (uint32_t)ne00, 1 };
  7683. break;
  7684. case GGML_OP_GET_ROWS:
  7685. elements = { (uint32_t)ne00, (uint32_t)ne10, (uint32_t)(ne11 * ne12) };
  7686. elements[1] = std::min(elements[1], ctx->device->properties.limits.maxComputeWorkGroupCount[1]);
  7687. elements[2] = std::min(elements[2], ctx->device->properties.limits.maxComputeWorkGroupCount[2]);
  7688. break;
  7689. case GGML_OP_ARGSORT:
  7690. GGML_ASSERT(0);
  7691. break;
  7692. case GGML_OP_IM2COL:
  7693. {
  7694. const bool is_2D = dst->op_params[6] == 1;
  7695. const uint32_t IC = src1->ne[is_2D ? 2 : 1];
  7696. const uint32_t KH = is_2D ? src0->ne[1] : 1;
  7697. const uint32_t KW = src0->ne[0];
  7698. const uint32_t OH = is_2D ? dst->ne[2] : 1;
  7699. const uint32_t OW = dst->ne[1];
  7700. const uint32_t batch = src1->ne[is_2D ? 3 : 2];
  7701. elements = { OW * KW * KH, OH, batch * IC };
  7702. } break;
  7703. case GGML_OP_IM2COL_3D:
  7704. {
  7705. const uint32_t IC = ((const uint32_t *)(dst->op_params))[9];
  7706. const uint32_t N = ne13 / IC;
  7707. const uint32_t KD = ne02;
  7708. const uint32_t KH = ne01;
  7709. const uint32_t KW = ne00;
  7710. const uint32_t OD = dst->ne[3] / N;
  7711. const uint32_t OH = dst->ne[2];
  7712. const uint32_t OW = dst->ne[1];
  7713. const uint32_t IC_KD_KH_KW = IC*KD*KH*KW;
  7714. const uint32_t N_OD_OH = N*OD*OH;
  7715. elements = { IC_KD_KH_KW, OW, N_OD_OH };
  7716. elements[2] = std::min(elements[2], ctx->device->properties.limits.maxComputeWorkGroupCount[2]);
  7717. } break;
  7718. case GGML_OP_TIMESTEP_EMBEDDING:
  7719. {
  7720. const uint32_t dim = dst->op_params[0];
  7721. uint32_t half_ceil = (dim + 1) / 2;
  7722. elements = { half_ceil, (uint32_t)src0->ne[0], 1 };
  7723. } break;
  7724. case GGML_OP_CONV_TRANSPOSE_1D:
  7725. {
  7726. elements = {uint32_t(src0->ne[1]), 1, 1}; // parallelize in {Cout, 1, 1}
  7727. } break;
  7728. case GGML_OP_POOL_2D:
  7729. {
  7730. const uint32_t N = dst->ne[3];
  7731. const uint32_t OC = dst->ne[2];
  7732. const uint32_t OH = dst->ne[1];
  7733. const uint32_t OW = dst->ne[0];
  7734. elements = { N * OC * OH * OW, 1, 1};
  7735. } break;
  7736. case GGML_OP_CONV_2D:
  7737. {
  7738. elements = ggml_vk_get_conv_elements(dst);
  7739. } break;
  7740. case GGML_OP_CONV_TRANSPOSE_2D:
  7741. {
  7742. elements = ggml_vk_get_conv_transpose_2d_elements(dst);
  7743. } break;
  7744. case GGML_OP_ADD:
  7745. case GGML_OP_SUB:
  7746. case GGML_OP_DIV:
  7747. case GGML_OP_MUL:
  7748. case GGML_OP_ADD1:
  7749. case GGML_OP_ARANGE:
  7750. case GGML_OP_FILL:
  7751. case GGML_OP_SCALE:
  7752. case GGML_OP_SQR:
  7753. case GGML_OP_SQRT:
  7754. case GGML_OP_SIN:
  7755. case GGML_OP_COS:
  7756. case GGML_OP_LOG:
  7757. case GGML_OP_TRI:
  7758. case GGML_OP_CLAMP:
  7759. case GGML_OP_PAD:
  7760. case GGML_OP_ROLL:
  7761. case GGML_OP_REPEAT:
  7762. case GGML_OP_REPEAT_BACK:
  7763. case GGML_OP_CPY:
  7764. case GGML_OP_CONCAT:
  7765. case GGML_OP_UPSCALE:
  7766. case GGML_OP_UNARY:
  7767. case GGML_OP_GLU:
  7768. case GGML_OP_CONV_2D_DW:
  7769. {
  7770. uint32_t ne = ggml_nelements(dst);
  7771. if (op == GGML_OP_CPY && ggml_is_quantized(src0->type) && ggml_is_quantized(dst->type)) {
  7772. // Convert from number of logical elements to 2- or 4-byte units.
  7773. ne /= ggml_blck_size(src0->type);
  7774. if ((ggml_type_size(src0->type) % 4) == 0) {
  7775. ne *= ggml_type_size(src0->type) / 4;
  7776. } else {
  7777. ne *= ggml_type_size(src0->type) / 2;
  7778. }
  7779. }
  7780. // copy_to_quant has block size of 32, and each thread does QUANT_K elements.
  7781. // Splitting into 512x512xZ wouldn't work well since each workgroup does 1024 elements.
  7782. // So divide by block size here before splitting into 512x512 groups.
  7783. if (op == GGML_OP_CPY && !ggml_is_quantized(src0->type) && ggml_is_quantized(dst->type)) {
  7784. ne = CEIL_DIV(ne, ggml_blck_size(dst->type));
  7785. }
  7786. if (ne > 262144) {
  7787. elements = { 512, 512, CEIL_DIV(ne, 262144) };
  7788. } else if (ne > 512) {
  7789. elements = { 512, CEIL_DIV(ne, 512), 1 };
  7790. } else {
  7791. elements = { ne, 1, 1 };
  7792. }
  7793. if (pipeline == ctx->device->pipeline_cpy_transpose_32 ||
  7794. pipeline == ctx->device->pipeline_cpy_transpose_16) {
  7795. // 32x32 tiles
  7796. elements[0] = (uint32_t)CEIL_DIV(dst->ne[0], 32);
  7797. elements[1] = (uint32_t)CEIL_DIV(dst->ne[1], 32);
  7798. elements[2] = (uint32_t)(dst->ne[2]*dst->ne[3]);
  7799. elements[0] = std::min(elements[0], ctx->device->properties.limits.maxComputeWorkGroupCount[0]);
  7800. elements[1] = std::min(elements[1], ctx->device->properties.limits.maxComputeWorkGroupCount[1]);
  7801. elements[2] = std::min(elements[2], ctx->device->properties.limits.maxComputeWorkGroupCount[2]);
  7802. }
  7803. } break;
  7804. case GGML_OP_ADD_ID:
  7805. {
  7806. elements = { (uint32_t)ne01, (uint32_t)ne02, 1 };
  7807. } break;
  7808. case GGML_OP_SET_ROWS:
  7809. {
  7810. uint32_t ne = ggml_nelements(src0);
  7811. if (ggml_is_quantized(dst->type)) {
  7812. // quants run 32 threads each doing QUANT_K elements
  7813. ne = CEIL_DIV(ne, 32 * ggml_blck_size(dst->type));
  7814. } else {
  7815. // scalar types do one element per thread, running 512 threads
  7816. ne = CEIL_DIV(ne, 512);
  7817. }
  7818. if (ne > 262144) {
  7819. elements = { 512, 512, CEIL_DIV(ne, 262144) };
  7820. } else if (ne > 512) {
  7821. elements = { 512, CEIL_DIV(ne, 512), 1 };
  7822. } else {
  7823. elements = { ne, 1, 1 };
  7824. }
  7825. }
  7826. break;
  7827. case GGML_OP_SSM_CONV:
  7828. {
  7829. const uint32_t nr = src0->ne[1];
  7830. const uint32_t n_t = dst->ne[1];
  7831. const uint32_t n_s = dst->ne[2];
  7832. elements = { nr, n_t, n_s };
  7833. }
  7834. break;
  7835. default:
  7836. elements = { (uint32_t)ggml_nelements(src0), 1, 1 };
  7837. break;
  7838. }
  7839. if (op == GGML_OP_ADD || op == GGML_OP_RMS_NORM) {
  7840. vk_subbuffer a_buf = src0_buf;
  7841. if (ctx->do_add_rms_partials) {
  7842. a_buf = ggml_vk_subbuffer(ctx, ctx->prealloc_add_rms_partials, ctx->prealloc_size_add_rms_partials_offset);
  7843. }
  7844. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
  7845. { src0_buf, src1_buf, dst_buf, a_buf }, pc, elements);
  7846. } else if (op == GGML_OP_GLU) {
  7847. // Empty src1 is possible in glu, but the shader needs a buffer
  7848. vk_subbuffer subbuf1 = use_src1 ? src1_buf : src0_buf;
  7849. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src0_buf, subbuf1, dst_buf }, pc, elements);
  7850. } else if (op == GGML_OP_SOFT_MAX) {
  7851. // Empty src1 and src2 is possible in soft_max, but the shader needs a buffer
  7852. vk_subbuffer subbuf1 = use_src1 ? src1_buf : src0_buf;
  7853. vk_subbuffer subbuf2 = use_src2 ? src2_buf : src0_buf;
  7854. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src0_buf, subbuf1, subbuf2, dst_buf }, pc, elements);
  7855. } else if (op == GGML_OP_ROPE || op == GGML_OP_ROPE_BACK) {
  7856. // Empty src2 and src3 is possible in rope, but the shader needs a buffer
  7857. vk_subbuffer subbuf2 = use_src2 ? src2_buf : src0_buf;
  7858. vk_subbuffer subbuf3 = use_src3 ? src3_buf : src0_buf;
  7859. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src0_buf, src1_buf, subbuf2, dst_buf, subbuf3 }, pc, elements);
  7860. } else if (op == GGML_OP_IM2COL || op == GGML_OP_IM2COL_3D) {
  7861. if (ctx->device->shader_int64 && ctx->device->buffer_device_address) {
  7862. // buffer device address path doesn't use dst buffer
  7863. dst_buf.size = 1;
  7864. }
  7865. // im2col uses only src1 and dst buffers
  7866. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src1_buf, dst_buf }, pc, elements);
  7867. } else if (op == GGML_OP_COUNT_EQUAL) {
  7868. // count_equal assumes that destination buffer is initialized with zeroes
  7869. ggml_vk_buffer_memset_async(subctx, dst_buf.buffer, dst_buf.offset, 0, dst_buf.size);
  7870. ggml_vk_sync_buffers(ctx, subctx);
  7871. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src0_buf, src1_buf, dst_buf }, pc, elements);
  7872. } else if (op == GGML_OP_OPT_STEP_SGD) {
  7873. // OPT_STEP_SGD works on src0, it does not need dst
  7874. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src0_buf, src1_buf, src2_buf }, pc, elements);
  7875. } else if (use_src3) {
  7876. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src0_buf, src1_buf, src2_buf, src3_buf, dst_buf }, pc, elements);
  7877. } else if (use_src2) {
  7878. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src0_buf, src1_buf, src2_buf, dst_buf }, pc, elements);
  7879. } else if (use_src1) {
  7880. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src0_buf, src1_buf, dst_buf }, pc, elements);
  7881. } else {
  7882. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src0_buf, dst_buf }, pc, elements);
  7883. }
  7884. }
  7885. static void ggml_vk_get_rows(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  7886. const uint32_t src0_type_size = ggml_type_size(src0->type);
  7887. const uint32_t src1_type_size = ggml_type_size(src1->type);
  7888. const uint32_t dst_type_size = ggml_type_size(dst->type);
  7889. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_GET_ROWS, {
  7890. (uint32_t)ggml_nelements(src0),
  7891. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  7892. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  7893. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  7894. 0,
  7895. 0.0f, 0.0f, 0,
  7896. });
  7897. }
  7898. static void ggml_vk_acc(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  7899. const uint32_t src0_type_size = ggml_type_size(src0->type);
  7900. const uint32_t src1_type_size = ggml_type_size(src1->type);
  7901. const uint32_t dst_type_size = ggml_type_size(dst->type);
  7902. int nb1 = dst->op_params[0] / 4; // 4 bytes of float32
  7903. int nb2 = dst->op_params[1] / 4; // 4 bytes of float32
  7904. // int nb3 = dst->op_params[2] / 4; // 4 bytes of float32 - unused
  7905. int offset = dst->op_params[3] / 4; // offset in bytes
  7906. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_ACC, {
  7907. (uint32_t)ggml_nelements(src0),
  7908. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)nb1, (uint32_t)nb2, (uint32_t)src0->nb[3] / src0_type_size,
  7909. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  7910. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t)nb1, (uint32_t)nb2, (uint32_t) dst->nb[3] / dst_type_size,
  7911. 0,
  7912. 0.0f, 0.0f, offset,
  7913. });
  7914. }
  7915. static void ggml_vk_multi_add(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_cgraph * cgraph, int node_idx) {
  7916. const ggml_tensor *first_node = cgraph->nodes[node_idx];
  7917. const ggml_tensor *dst = cgraph->nodes[node_idx + ctx->num_additional_fused_ops];
  7918. // Make a list of all the tensors used by the op.
  7919. // Last element of the list is the dest tensor.
  7920. const ggml_tensor *tensors[MAX_PARAMETER_COUNT];
  7921. uint32_t num_srcs = ctx->num_additional_fused_ops + 2;
  7922. uint32_t num_tensors = num_srcs + 1;
  7923. GGML_ASSERT(num_tensors + ctx->do_add_rms_partials <= MAX_PARAMETER_COUNT);
  7924. tensors[0] = first_node->src[0];
  7925. tensors[1] = first_node->src[1];
  7926. for (int32_t i = 0; i < ctx->num_additional_fused_ops; ++i) {
  7927. // check whether the previous result is src[0] or src[1]
  7928. if (cgraph->nodes[node_idx + i] == cgraph->nodes[node_idx + i + 1]->src[0]) {
  7929. tensors[i+2] = cgraph->nodes[node_idx + i + 1]->src[1];
  7930. } else {
  7931. tensors[i+2] = cgraph->nodes[node_idx + i + 1]->src[0];
  7932. }
  7933. }
  7934. tensors[num_srcs] = dst;
  7935. vk_op_multi_add_push_constants pc;
  7936. pc.ne20 = (uint32_t)dst->ne[0];
  7937. pc.ne21 = (uint32_t)dst->ne[1];
  7938. pc.ne22 = (uint32_t)dst->ne[2];
  7939. pc.ne23 = (uint32_t)dst->ne[3];
  7940. for (uint32_t i = 0; i < num_tensors; ++i) {
  7941. const ggml_tensor *t = tensors[i];
  7942. pc.nb[i][0] = (uint32_t)t->nb[0] / sizeof(float);
  7943. pc.nb[i][1] = (uint32_t)t->nb[1] / sizeof(float);
  7944. pc.nb[i][2] = (uint32_t)t->nb[2] / sizeof(float);
  7945. pc.nb[i][3] = (uint32_t)t->nb[3] / sizeof(float);
  7946. }
  7947. pc.rms_partials = ctx->do_add_rms_partials;
  7948. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, tensors[0], tensors[1], nullptr, dst, dst->op);
  7949. if (pipeline == nullptr) {
  7950. std::cerr << "ggml_vulkan: Error: Missing multi_add";
  7951. GGML_ABORT("fatal error");
  7952. }
  7953. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  7954. ggml_backend_vk_buffer_context * buf_ctx[MAX_PARAMETER_COUNT];
  7955. vk_buffer buf[MAX_PARAMETER_COUNT];
  7956. size_t offset[MAX_PARAMETER_COUNT];
  7957. bool uma[MAX_PARAMETER_COUNT];
  7958. for (uint32_t i = 0; i < num_tensors; ++i) {
  7959. buf_ctx[i] = (ggml_backend_vk_buffer_context *)tensors[i]->buffer->context;
  7960. buf[i] = nullptr;
  7961. offset[i] = 0;
  7962. uma[i] = false;
  7963. if (ctx->device->uma) {
  7964. ggml_vk_host_get(ctx->device, tensors[i]->data, buf[i], offset[i]);
  7965. uma[i] = buf[i] != nullptr;
  7966. }
  7967. if (!uma[i]) {
  7968. buf[i] = buf_ctx[i]->dev_buffer;
  7969. offset[i] = vk_tensor_offset(tensors[i]) + tensors[i]->view_offs;
  7970. }
  7971. GGML_ASSERT(buf[i] != nullptr);
  7972. }
  7973. // If any remaining descriptors are unused, just point them at src[0]
  7974. for (uint32_t i = num_tensors; i < MAX_PARAMETER_COUNT; ++i) {
  7975. buf[i] = buf[0];
  7976. offset[i] = 0;
  7977. }
  7978. if (ctx->do_add_rms_partials) {
  7979. buf[num_tensors] = ctx->prealloc_add_rms_partials;
  7980. offset[num_tensors] = ctx->prealloc_size_add_rms_partials_offset;
  7981. }
  7982. std::array<uint32_t, 3> elements;
  7983. uint32_t ne = ggml_nelements(dst);
  7984. if (ne > 262144) {
  7985. elements = { 512, 512, CEIL_DIV(ne, 262144) };
  7986. } else if (ne > 512) {
  7987. elements = { 512, CEIL_DIV(ne, 512), 1 };
  7988. } else {
  7989. elements = { ne, 1, 1 };
  7990. }
  7991. static_assert(MAX_PARAMETER_COUNT == 12);
  7992. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
  7993. {
  7994. ggml_vk_subbuffer(ctx, buf[0], offset[0]),
  7995. ggml_vk_subbuffer(ctx, buf[1], offset[1]),
  7996. ggml_vk_subbuffer(ctx, buf[2], offset[2]),
  7997. ggml_vk_subbuffer(ctx, buf[3], offset[3]),
  7998. ggml_vk_subbuffer(ctx, buf[4], offset[4]),
  7999. ggml_vk_subbuffer(ctx, buf[5], offset[5]),
  8000. ggml_vk_subbuffer(ctx, buf[6], offset[6]),
  8001. ggml_vk_subbuffer(ctx, buf[7], offset[7]),
  8002. ggml_vk_subbuffer(ctx, buf[8], offset[8]),
  8003. ggml_vk_subbuffer(ctx, buf[9], offset[9]),
  8004. ggml_vk_subbuffer(ctx, buf[10], offset[10]),
  8005. ggml_vk_subbuffer(ctx, buf[11], offset[11]),
  8006. }, pc, elements);
  8007. }
  8008. static void ggml_vk_add(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8009. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8010. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8011. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8012. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_ADD, {
  8013. (uint32_t)ggml_nelements(src0),
  8014. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8015. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8016. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8017. 0,
  8018. 0.0f, 0.0f, ctx->do_add_rms_partials,
  8019. });
  8020. }
  8021. static void ggml_vk_sub(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8022. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8023. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8024. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8025. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SUB, {
  8026. (uint32_t)ggml_nelements(src0),
  8027. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8028. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8029. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8030. 0,
  8031. 0.0f, 0.0f, 0,
  8032. });
  8033. }
  8034. static void ggml_vk_mul(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8035. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8036. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8037. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8038. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_MUL, {
  8039. (uint32_t)ggml_nelements(src0),
  8040. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8041. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8042. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8043. 0,
  8044. 0.0f, 0.0f, 0,
  8045. });
  8046. }
  8047. static void ggml_vk_div(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8048. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8049. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8050. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8051. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_DIV, {
  8052. (uint32_t)ggml_nelements(src0),
  8053. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8054. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8055. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8056. 0,
  8057. 0.0f, 0.0f, 0,
  8058. });
  8059. }
  8060. static void ggml_vk_add_id(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) {
  8061. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8062. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8063. const uint32_t src2_type_size = ggml_type_size(src2->type);
  8064. ggml_vk_op_f32<vk_op_add_id_push_constants>(ctx, subctx, src0, src1, src2, nullptr, dst, GGML_OP_ADD_ID, {
  8065. (uint32_t)dst->ne[0],
  8066. (uint32_t)dst->ne[1],
  8067. (uint32_t)src0->nb[1] / src0_type_size,
  8068. (uint32_t)src0->nb[2] / src0_type_size,
  8069. (uint32_t)src1->nb[1] / src1_type_size,
  8070. (uint32_t)src2->nb[1] / src2_type_size,
  8071. });
  8072. }
  8073. static void ggml_vk_op_f32_wkv(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, const vk_op_rwkv_wkv6_push_constants&& pc, int version) {
  8074. GGML_ASSERT(version == 6 || version == 7);
  8075. int num_srcs = version == 6 ? 6 : 7;
  8076. for (int i = 0; i < num_srcs; i++) {
  8077. GGML_ASSERT(!ggml_is_quantized(dst->src[i]->type));
  8078. }
  8079. GGML_ASSERT(dst->buffer != nullptr);
  8080. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, dst->src[0], dst->src[1], dst->src[2], dst, dst->op);
  8081. GGML_ASSERT(pipeline != nullptr);
  8082. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8083. vk_subbuffer dst_buf = ggml_vk_tensor_subbuffer(ctx, dst);
  8084. vk_subbuffer src_buf[7] = {};
  8085. for (int i = 0; i < num_srcs; i++) {
  8086. src_buf[i] = ggml_vk_tensor_subbuffer(ctx, dst->src[i]);
  8087. }
  8088. std::array<uint32_t, 3> elements = {
  8089. (uint32_t)(pc.B * pc.H),
  8090. 1,
  8091. 1
  8092. };
  8093. if (version == 6) {
  8094. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
  8095. {src_buf[0], src_buf[1], src_buf[2], src_buf[3], src_buf[4], src_buf[5], dst_buf},
  8096. pc, elements);
  8097. } else if (version == 7) {
  8098. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
  8099. {src_buf[0], src_buf[1], src_buf[2], src_buf[3], src_buf[4], src_buf[5], src_buf[6], dst_buf},
  8100. pc, elements);
  8101. } else {
  8102. // shouldn't happen
  8103. GGML_ASSERT(false);
  8104. }
  8105. }
  8106. static void ggml_vk_rwkv_wkv6(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst) {
  8107. const size_t seq_length = dst->src[0]->ne[2];
  8108. const size_t n_embed = dst->ne[0];
  8109. const size_t n_heads = dst->src[0]->ne[1];
  8110. const size_t n_seqs = dst->src[5]->ne[1];
  8111. ggml_vk_op_f32_wkv(
  8112. ctx, subctx, dst,
  8113. {
  8114. (uint32_t)n_seqs,
  8115. (uint32_t)seq_length,
  8116. (uint32_t)n_embed,
  8117. (uint32_t)n_heads,
  8118. },
  8119. 6
  8120. );
  8121. }
  8122. static void ggml_vk_rwkv_wkv7(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst) {
  8123. const size_t seq_length = dst->src[0]->ne[2];
  8124. const size_t n_embed = dst->ne[0];
  8125. const size_t n_heads = dst->src[0]->ne[1];
  8126. const size_t n_seqs = dst->src[6]->ne[1];
  8127. ggml_vk_op_f32_wkv(
  8128. ctx, subctx, dst,
  8129. {
  8130. (uint32_t)n_seqs,
  8131. (uint32_t)seq_length,
  8132. (uint32_t)n_embed,
  8133. (uint32_t)n_heads,
  8134. },
  8135. 7
  8136. );
  8137. }
  8138. static void ggml_vk_ssm_scan(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst) {
  8139. const ggml_tensor * src0 = dst->src[0];
  8140. const ggml_tensor * src1 = dst->src[1];
  8141. const ggml_tensor * src2 = dst->src[2];
  8142. const ggml_tensor * src3 = dst->src[3];
  8143. const ggml_tensor * src4 = dst->src[4];
  8144. const ggml_tensor * src5 = dst->src[5];
  8145. GGML_ASSERT(dst->buffer != nullptr);
  8146. const uint32_t head_dim = src0->ne[1];
  8147. const uint32_t n_head = src1->ne[1];
  8148. const uint32_t n_group = src4->ne[1];
  8149. const uint32_t n_tok = src1->ne[2];
  8150. const uint32_t n_seq = src1->ne[3];
  8151. bool is_mamba2 = (src3->nb[1] == sizeof(float));
  8152. GGML_ASSERT(is_mamba2);
  8153. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, dst, dst->op);
  8154. GGML_ASSERT(pipeline != nullptr);
  8155. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8156. const int64_t s_off = ggml_nelements(src1) * sizeof(float);
  8157. const vk_op_ssm_scan_push_constants pc = {
  8158. (uint32_t)src0->nb[2], (uint32_t)src0->nb[3],
  8159. (uint32_t)src1->nb[2], (uint32_t)src1->nb[3],
  8160. (uint32_t)src2->nb[1], (uint32_t)src2->nb[2],
  8161. (uint32_t)src3->nb[1],
  8162. (uint32_t)src4->nb[2], (uint32_t)src4->nb[3],
  8163. (uint32_t)src5->nb[2], (uint32_t)src5->nb[3],
  8164. (uint32_t)s_off,
  8165. n_head, head_dim, n_group, n_tok
  8166. };
  8167. vk_subbuffer dst_buf = ggml_vk_tensor_subbuffer(ctx, dst);
  8168. vk_subbuffer src_buf[7] = {};
  8169. for (int i = 0; i < 7 && dst->src[i] != nullptr; i++) {
  8170. src_buf[i] = ggml_vk_tensor_subbuffer(ctx, dst->src[i]);
  8171. }
  8172. std::array<uint32_t, 3> elements;
  8173. const int splitH = 16;
  8174. const uint32_t num_workgroups_x = CEIL_DIV(n_head * head_dim, splitH);
  8175. const uint32_t num_workgroups_y = n_seq;
  8176. elements = { num_workgroups_x, num_workgroups_y, 1 };
  8177. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
  8178. {src_buf[0], src_buf[1], src_buf[2], src_buf[3], src_buf[4], src_buf[5], src_buf[6], dst_buf},
  8179. pc, elements);
  8180. }
  8181. static void ggml_vk_ssm_conv(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst) {
  8182. const ggml_tensor * src0 = dst->src[0];
  8183. const ggml_tensor * src1 = dst->src[1];
  8184. ggml_vk_op_f32<vk_op_ssm_conv_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SSM_CONV, {
  8185. (uint32_t)src0->nb[1], (uint32_t)src0->nb[2],
  8186. (uint32_t)src1->nb[1],
  8187. (uint32_t)dst->nb[0], (uint32_t)dst->nb[1], (uint32_t)dst->nb[2],
  8188. (uint32_t)src1->ne[0],
  8189. (uint32_t)src0->ne[0],
  8190. (uint32_t)src0->ne[1],
  8191. (uint32_t)dst->ne[1],
  8192. (uint32_t)dst->ne[2],
  8193. });
  8194. }
  8195. static void ggml_vk_op_f32_opt_step_adamw(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, const vk_op_push_constants&& pc) {
  8196. const ggml_tensor * x = dst->src[0];
  8197. const ggml_tensor * g = dst->src[1];
  8198. const ggml_tensor * gm = dst->src[2];
  8199. const ggml_tensor * gv = dst->src[3];
  8200. const ggml_tensor * p = dst->src[4];
  8201. GGML_ASSERT(x->type == GGML_TYPE_F32);
  8202. GGML_ASSERT(g->type == GGML_TYPE_F32);
  8203. GGML_ASSERT(gm->type == GGML_TYPE_F32);
  8204. GGML_ASSERT(gv->type == GGML_TYPE_F32);
  8205. GGML_ASSERT(p->type == GGML_TYPE_F32);
  8206. GGML_ASSERT(dst->buffer != nullptr);
  8207. GGML_ASSERT(ggml_is_contiguous(x));
  8208. GGML_ASSERT(ggml_is_contiguous(g));
  8209. GGML_ASSERT(ggml_is_contiguous(gm));
  8210. GGML_ASSERT(ggml_is_contiguous(gv));
  8211. GGML_ASSERT(ggml_is_contiguous(p));
  8212. GGML_ASSERT(ggml_are_same_shape(x, g));
  8213. GGML_ASSERT(ggml_are_same_shape(x, gm));
  8214. GGML_ASSERT(ggml_are_same_shape(x, gv));
  8215. GGML_ASSERT(ggml_nelements(p) == 7);
  8216. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, g, gm, gv, dst, GGML_OP_OPT_STEP_ADAMW);
  8217. GGML_ASSERT(pipeline != nullptr);
  8218. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8219. vk_subbuffer x_buf = ggml_vk_tensor_subbuffer(ctx, x);
  8220. vk_subbuffer g_buf = ggml_vk_tensor_subbuffer(ctx, g);
  8221. vk_subbuffer gm_buf = ggml_vk_tensor_subbuffer(ctx, gm);
  8222. vk_subbuffer gv_buf = ggml_vk_tensor_subbuffer(ctx, gv);
  8223. vk_subbuffer p_buf = ggml_vk_tensor_subbuffer(ctx, p);
  8224. std::array<uint32_t, 3> elements = { (uint32_t)ggml_nelements(x), 1, 1 };
  8225. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
  8226. {x_buf, g_buf, gm_buf, gv_buf, p_buf},
  8227. pc, elements);
  8228. }
  8229. static void ggml_vk_opt_step_adamw(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst) {
  8230. const size_t n = ggml_nelements(dst->src[0]);
  8231. ggml_vk_op_f32_opt_step_adamw(
  8232. ctx, subctx, dst,
  8233. { (uint32_t)n, 0, 0.0f, 0.0f }
  8234. );
  8235. }
  8236. static void ggml_vk_opt_step_sgd(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) {
  8237. const size_t n = ggml_nelements(dst->src[0]);
  8238. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, src2, nullptr, dst, GGML_OP_OPT_STEP_SGD, { (uint32_t)n, 0, 0.0f, 0.0f });
  8239. }
  8240. static void ggml_vk_concat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8241. int * op_params = (int *)dst->op_params;
  8242. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8243. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8244. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8245. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_CONCAT, {
  8246. (uint32_t)ggml_nelements(dst),
  8247. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8248. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8249. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8250. 0,
  8251. 0.0f, 0.0f, op_params[0],
  8252. });
  8253. }
  8254. static void ggml_vk_upscale(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8255. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8256. const uint32_t mode = (uint32_t)ggml_get_op_params_i32(dst, 0);
  8257. GGML_TENSOR_UNARY_OP_LOCALS
  8258. float sf0 = (float)ne0 / ne00;
  8259. float sf1 = (float)ne1 / ne01;
  8260. float sf2 = (float)ne2 / ne02;
  8261. float sf3 = (float)ne3 / ne03;
  8262. float pixel_offset = 0.5f;
  8263. if (mode & GGML_SCALE_FLAG_ALIGN_CORNERS) {
  8264. sf0 = ne0 > 1 && ne00 > 1 ? (float)(ne0 - 1) / (ne00 - 1) : sf0;
  8265. sf1 = ne1 > 1 && ne01 > 1 ? (float)(ne1 - 1) / (ne01 - 1) : sf1;
  8266. pixel_offset = 0.0f;
  8267. }
  8268. ggml_vk_op_f32<vk_op_upscale_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_UPSCALE, {
  8269. (uint32_t)ggml_nelements(dst), 0, 0,
  8270. (uint32_t)ne00, (uint32_t)ne01,
  8271. (uint32_t)nb00 / src0_type_size, (uint32_t)nb01 / src0_type_size, (uint32_t)nb02 / src0_type_size, (uint32_t)nb03 / src0_type_size,
  8272. (uint32_t)ne0, (uint32_t)ne1, (uint32_t)ne2, (uint32_t)ne3,
  8273. sf0, sf1, sf2, sf3, pixel_offset
  8274. });
  8275. }
  8276. static void ggml_vk_scale(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8277. vk_op_unary_push_constants p = vk_op_unary_push_constants_init(src0, dst);
  8278. p.param1 = ggml_get_op_params_f32(dst, 0);
  8279. p.param2 = ggml_get_op_params_f32(dst, 1);
  8280. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_SCALE, std::move(p));
  8281. }
  8282. static void ggml_vk_sqr(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8283. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_SQR, vk_op_unary_push_constants_init(src0, dst));
  8284. }
  8285. static void ggml_vk_sqrt(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8286. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_SQRT, vk_op_unary_push_constants_init(src0, dst));
  8287. }
  8288. static void ggml_vk_add1(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8289. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8290. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8291. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8292. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_ADD1, {
  8293. (uint32_t)ggml_nelements(src0),
  8294. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8295. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8296. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8297. 0,
  8298. 0.0f, 0.0f, 0,
  8299. });
  8300. }
  8301. static void ggml_vk_arange(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst) {
  8302. VK_LOG_DEBUG("ggml_vk_arange(dst=" << dst << ", ne=" << ggml_nelements(dst) << ")");
  8303. vk_op_push_constants pc = {
  8304. (uint32_t)ggml_nelements(dst),
  8305. 1,
  8306. ggml_get_op_params_f32(dst, 0),
  8307. ggml_get_op_params_f32(dst, 2),
  8308. };
  8309. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, nullptr, nullptr, nullptr, dst, GGML_OP_ARANGE);
  8310. GGML_ASSERT(pipeline != nullptr);
  8311. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8312. vk_subbuffer dst_buf = ggml_vk_tensor_subbuffer(ctx, dst, false);
  8313. std::array<uint32_t, 3> elements = { (uint32_t)ggml_nelements(dst), 1, 1 };
  8314. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { dst_buf }, pc, elements);
  8315. }
  8316. static void ggml_vk_fill(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst) {
  8317. VK_LOG_DEBUG("ggml_vk_fill(dst=" << dst << ", ne=" << ggml_nelements(dst) << ")");
  8318. vk_op_push_constants pc = {
  8319. (uint32_t)ggml_nelements(dst),
  8320. 1,
  8321. ggml_get_op_params_f32(dst, 0),
  8322. 0.0f,
  8323. };
  8324. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, nullptr, nullptr, nullptr, dst, GGML_OP_FILL);
  8325. GGML_ASSERT(pipeline != nullptr);
  8326. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8327. vk_subbuffer dst_buf = ggml_vk_tensor_subbuffer(ctx, dst, false);
  8328. std::array<uint32_t, 3> elements = { (uint32_t)ggml_nelements(dst), 1, 1 };
  8329. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { dst_buf }, pc, elements);
  8330. }
  8331. static void ggml_vk_sin(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8332. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_SIN, vk_op_unary_push_constants_init(src0, dst));
  8333. }
  8334. static void ggml_vk_cos(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8335. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_COS, vk_op_unary_push_constants_init(src0, dst));
  8336. }
  8337. static void ggml_vk_log(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8338. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_LOG, vk_op_unary_push_constants_init(src0, dst));
  8339. }
  8340. static void ggml_vk_tri(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8341. vk_op_unary_push_constants p = vk_op_unary_push_constants_init(src0, dst);
  8342. p.param1 = ggml_get_op_params_f32(dst, 0);
  8343. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_TRI, std::move(p));
  8344. }
  8345. static void ggml_vk_clamp(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8346. vk_op_unary_push_constants p = vk_op_unary_push_constants_init(src0, dst);
  8347. p.param1 = ggml_get_op_params_f32(dst, 0);
  8348. p.param2 = ggml_get_op_params_f32(dst, 1);
  8349. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_CLAMP, std::move(p));
  8350. }
  8351. static void ggml_vk_pad(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8352. vk_op_pad_push_constants p = vk_op_pad_push_constants_init(src0, dst);
  8353. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_PAD, std::move(p));
  8354. }
  8355. static void ggml_vk_roll(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8356. const int32_t s0 = ggml_get_op_params_i32(dst, 0);
  8357. const int32_t s1 = ggml_get_op_params_i32(dst, 1);
  8358. const int32_t s2 = ggml_get_op_params_i32(dst, 2);
  8359. const int32_t s3 = ggml_get_op_params_i32(dst, 3);
  8360. const uint32_t s01_packed = ((s0 + 0x8000) << 16) | (s1 + 0x8000);
  8361. const uint32_t s23_packed = ((s2 + 0x8000) << 16) | (s3 + 0x8000);
  8362. vk_op_unary_push_constants p = vk_op_unary_push_constants_init(src0, dst);
  8363. memcpy(&p.param1, &s01_packed, sizeof(float));
  8364. memcpy(&p.param2, &s23_packed, sizeof(float));
  8365. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_ROLL, std::move(p));
  8366. }
  8367. static void ggml_vk_repeat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8368. vk_op_unary_push_constants p = vk_op_unary_push_constants_init(src0, dst, ggml_nelements(dst));
  8369. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_REPEAT, std::move(p));
  8370. }
  8371. static void ggml_vk_repeat_back(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8372. vk_op_unary_push_constants p = vk_op_unary_push_constants_init(src0, dst, ggml_nelements(dst));
  8373. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_REPEAT_BACK, std::move(p));
  8374. }
  8375. static void ggml_vk_cpy(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8376. uint32_t ne = (uint32_t)ggml_nelements(src0);
  8377. if (ggml_is_quantized(src0->type) && ggml_is_quantized(dst->type)) {
  8378. // Convert from number of logical elements to 2- or 4-byte units.
  8379. ne /= ggml_blck_size(src0->type);
  8380. if ((ggml_type_size(src0->type) % 4) == 0) {
  8381. ne *= ggml_type_size(src0->type) / 4;
  8382. } else {
  8383. ne *= ggml_type_size(src0->type) / 2;
  8384. }
  8385. }
  8386. vk_op_unary_push_constants p = vk_op_unary_push_constants_init(src0, dst, ne);
  8387. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_CPY, std::move(p));
  8388. }
  8389. static void ggml_vk_set_rows(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8390. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8391. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8392. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8393. // Skip empty skip_rows operations. For most ops the empty check at the start
  8394. // of ggml_vk_build_graph is sufficient, but set_rows can have a nonempty dst
  8395. // with empty srcs.
  8396. if (ggml_is_empty(src0) || ggml_is_empty(src1)) {
  8397. return;
  8398. }
  8399. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SET_ROWS, {
  8400. (uint32_t)ggml_nelements(src0),
  8401. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8402. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8403. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8404. 0,
  8405. 0.0f, 0.0f, 0,
  8406. });
  8407. }
  8408. static void ggml_vk_silu_back(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8409. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SILU_BACK, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f });
  8410. }
  8411. static void ggml_vk_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8412. float * op_params = (float *)dst->op_params;
  8413. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f });
  8414. }
  8415. static void ggml_vk_group_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8416. const int * int_op_params = (const int *)dst->op_params;
  8417. const float * float_op_params = (const float *)dst->op_params;
  8418. const uint32_t num_groups = int_op_params[0];
  8419. const float eps = float_op_params[1];
  8420. const uint32_t group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups);
  8421. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_GROUP_NORM, { group_size, 0, eps, 0.0f });
  8422. }
  8423. static uint32_t ggml_vk_rms_num_partials(ggml_backend_vk_context * ctx, const ggml_tensor *node) {
  8424. const uint32_t ne = (uint32_t)node->ne[0];
  8425. const uint32_t denom = ctx->device->pipeline_add_rms[0][0][0]->wg_denoms[0];
  8426. const uint32_t num_partials = CEIL_DIV(ne, denom);
  8427. return num_partials;
  8428. }
  8429. static uint32_t ggml_vk_rms_partials_size(ggml_backend_vk_context * ctx, const ggml_tensor *node) {
  8430. const uint32_t num_partials = ggml_vk_rms_num_partials(ctx, node);
  8431. const uint32_t num_bytes = ROUNDUP_POW2(num_partials * sizeof(uint32_t), ctx->device->partials_binding_alignment);
  8432. return num_bytes;
  8433. }
  8434. static vk_op_rope_push_constants ggml_vk_make_rope_constants(const ggml_tensor *dst, const ggml_tensor *src0, const bool has_ff, bool backprop, const uint32_t set_rows_stride) {
  8435. const int n_dims = ((const int32_t *) dst->op_params)[1];
  8436. const int mode = ((const int32_t *) dst->op_params)[2];
  8437. // const int n_ctx = ((const int32_t *) dst->op_params)[3];
  8438. const int n_ctx_orig = ((const int32_t *) dst->op_params)[4];
  8439. const float freq_base = ((const float *) dst->op_params)[5];
  8440. const float freq_scale = ((const float *) dst->op_params)[6];
  8441. const float ext_factor = ((const float *) dst->op_params)[7];
  8442. const float attn_factor = ((const float *) dst->op_params)[8];
  8443. const float beta_fast = ((const float *) dst->op_params)[9];
  8444. const float beta_slow = ((const float *) dst->op_params)[10];
  8445. int sections[4] {};
  8446. if (mode & GGML_ROPE_TYPE_MROPE) {
  8447. memcpy(sections, (const int32_t *) dst->op_params + 11, sizeof(int)*4);
  8448. }
  8449. const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE;
  8450. float corr_dims[2];
  8451. ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
  8452. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  8453. uint32_t nb01 = src0->nb[1] / ggml_type_size(src0->type);
  8454. uint32_t nb02 = src0->nb[2] / ggml_type_size(src0->type);
  8455. vk_op_rope_push_constants rope {
  8456. (uint32_t)mode, (uint32_t)src0->ne[0], (uint32_t)n_dims, freq_scale, (uint32_t)src0->ne[1],
  8457. freq_base, ext_factor, attn_factor, {corr_dims[0], corr_dims[1]}, theta_scale,
  8458. has_ff, (uint32_t)src0->ne[2], nb01, nb02,
  8459. { sections[0], sections[1], sections[2], sections[3] }, is_imrope, backprop, set_rows_stride,
  8460. };
  8461. return rope;
  8462. }
  8463. static void ggml_vk_rms_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const struct ggml_cgraph * cgraph, int node_idx, float * op_params) {
  8464. ggml_tensor * dst;
  8465. const ggml_tensor * src0;
  8466. const ggml_tensor * src1;
  8467. if (ctx->num_additional_fused_ops > 0) {
  8468. // fused rms_norm + mul
  8469. ggml_tensor *mul = cgraph->nodes[node_idx + 1];
  8470. ggml_tensor *other_src = mul->src[0] == cgraph->nodes[node_idx + 0] ? mul->src[1] : mul->src[0];
  8471. dst = mul;
  8472. src0 = cgraph->nodes[node_idx]->src[0];
  8473. src1 = other_src;
  8474. } else {
  8475. dst = cgraph->nodes[node_idx];
  8476. src0 = src1 = dst->src[0];
  8477. }
  8478. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8479. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8480. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8481. uint32_t param3 = ctx->do_add_rms_partials ? ggml_vk_rms_num_partials(ctx, dst) : 0;
  8482. vk_op_binary_push_constants bin {
  8483. (uint32_t)ggml_nelements(src0),
  8484. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8485. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8486. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8487. 0,
  8488. op_params[0], 0.0f, (int32_t)param3,
  8489. };
  8490. // more than one fused op means rms_norm+mul+rope
  8491. if (ctx->num_additional_fused_ops > 1) {
  8492. static constexpr uint32_t max_tensors = 7;
  8493. const ggml_tensor *tensors[max_tensors] {};
  8494. ggml_tensor *rms = cgraph->nodes[node_idx + 0];
  8495. ggml_tensor *mul = cgraph->nodes[node_idx + 1];
  8496. ggml_tensor *rope = cgraph->nodes[node_idx + 2];
  8497. ggml_tensor *other_src = mul->src[0] == rms ? mul->src[1] : mul->src[0];
  8498. bool do_set_rows = ctx->num_additional_fused_ops == 4;
  8499. tensors[0] = rms->src[0];
  8500. tensors[1] = other_src;
  8501. tensors[2] = mul;
  8502. tensors[3] = rope->src[1]; // pos
  8503. tensors[4] = rope->src[2]; // ff
  8504. tensors[5] = cgraph->nodes[node_idx + ctx->num_additional_fused_ops]; // dst
  8505. tensors[6] = do_set_rows ? tensors[5]->src[1] : nullptr;
  8506. const uint32_t set_rows_stride = do_set_rows ? tensors[5]->nb[1] / ggml_type_size(tensors[5]->type) : 0;
  8507. vk_op_rms_norm_mul_rope_push_constants pc;
  8508. pc.bin = bin;
  8509. pc.rope = ggml_vk_make_rope_constants(rope, rope->src[0], tensors[4] != nullptr, false, set_rows_stride);
  8510. vk_pipeline pipeline = tensors[5]->type == GGML_TYPE_F16 ? ctx->device->pipeline_rms_norm_mul_rope_f32_f16 : ctx->device->pipeline_rms_norm_mul_rope_f32_f32;
  8511. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8512. ggml_backend_vk_buffer_context * buf_ctx[max_tensors];
  8513. vk_buffer buf[max_tensors];
  8514. size_t offset[max_tensors];
  8515. bool uma[max_tensors];
  8516. for (uint32_t i = 0; i < max_tensors; ++i) {
  8517. if (!tensors[i]) {
  8518. // If any remaining descriptors are unused, just point them at src[0]
  8519. buf[i] = buf[0];
  8520. offset[i] = 0;
  8521. continue;
  8522. }
  8523. buf_ctx[i] = (ggml_backend_vk_buffer_context *)tensors[i]->buffer->context;
  8524. buf[i] = nullptr;
  8525. offset[i] = 0;
  8526. uma[i] = false;
  8527. if (ctx->device->uma) {
  8528. ggml_vk_host_get(ctx->device, tensors[i]->data, buf[i], offset[i]);
  8529. uma[i] = buf[i] != nullptr;
  8530. }
  8531. if (!uma[i]) {
  8532. buf[i] = buf_ctx[i]->dev_buffer;
  8533. offset[i] = vk_tensor_offset(tensors[i]) + tensors[i]->view_offs;
  8534. }
  8535. GGML_ASSERT(buf[i] != nullptr);
  8536. }
  8537. std::array<uint32_t, 3> elements;
  8538. elements = { (uint32_t)rms->src[0]->ne[1], (uint32_t)rms->src[0]->ne[2], (uint32_t)rms->src[0]->ne[3] };
  8539. static_assert(max_tensors == 7);
  8540. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
  8541. {
  8542. ggml_vk_subbuffer(ctx, buf[0], offset[0]),
  8543. ggml_vk_subbuffer(ctx, buf[1], offset[1]),
  8544. ggml_vk_subbuffer(ctx, buf[2], offset[2]),
  8545. ggml_vk_subbuffer(ctx, buf[3], offset[3]),
  8546. ggml_vk_subbuffer(ctx, buf[4], offset[4]),
  8547. ggml_vk_subbuffer(ctx, buf[5], offset[5]),
  8548. ggml_vk_subbuffer(ctx, buf[6], offset[6]),
  8549. }, pc, elements);
  8550. } else {
  8551. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_RMS_NORM, std::move(bin));
  8552. }
  8553. if (ctx->do_add_rms_partials_offset_calculation) {
  8554. ctx->prealloc_size_add_rms_partials_offset += ggml_vk_rms_partials_size(ctx, src0);
  8555. ctx->do_add_rms_partials = false;
  8556. ctx->do_add_rms_partials_offset_calculation = false;
  8557. }
  8558. }
  8559. static void ggml_vk_rms_norm_back(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8560. float * op_params = (float *)dst->op_params;
  8561. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_RMS_NORM_BACK, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f });
  8562. }
  8563. static void ggml_vk_l2_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8564. float * op_params = (float *)dst->op_params;
  8565. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_L2_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f });
  8566. }
  8567. static void ggml_vk_unary(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8568. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_UNARY, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f });
  8569. }
  8570. static void ggml_vk_glu(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8571. const float * op_params_f = (const float *)dst->op_params;
  8572. const bool swapped = (bool)dst->op_params[1];
  8573. const bool split = src1 != nullptr;
  8574. const float alpha = op_params_f[2];
  8575. const float limit = op_params_f[3];
  8576. GGML_ASSERT(ggml_is_contiguous(src0));
  8577. if (!split) {
  8578. GGML_ASSERT(src0->ne[0] / 2 == dst->ne[0]);
  8579. } else {
  8580. GGML_ASSERT(src0->ne[0] == src1->ne[0]);
  8581. GGML_ASSERT(src0->ne[0] == dst->ne[0]);
  8582. GGML_ASSERT(src0->type == src1->type);
  8583. }
  8584. const uint32_t mode = split ? 2 : (swapped ? 1 : 0);
  8585. ggml_vk_op_f32<vk_op_glu_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_GLU,
  8586. {
  8587. (uint32_t)ggml_nelements(dst),
  8588. (uint32_t)src0->ne[0],
  8589. (uint32_t)dst->ne[0],
  8590. mode,
  8591. alpha,
  8592. limit
  8593. });
  8594. }
  8595. static void ggml_vk_diag_mask_inf(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8596. int32_t * op_params = (int32_t *)dst->op_params;
  8597. ggml_vk_op_f32<vk_op_diag_mask_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_DIAG_MASK_INF, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0] });
  8598. }
  8599. static void ggml_vk_soft_max(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) {
  8600. float * op_params = (float *)dst->op_params;
  8601. float scale = op_params[0];
  8602. float max_bias = op_params[1];
  8603. const uint32_t ncols = (uint32_t)src0->ne[0];
  8604. const uint32_t nrows_x = (uint32_t)ggml_nrows(src0);
  8605. const uint32_t nrows_y = (uint32_t)src0->ne[1];
  8606. const uint32_t ne12 = src1 ? (uint32_t)(src1->ne[2]) : 0u;
  8607. const uint32_t ne13 = src1 ? (uint32_t)(src1->ne[3]) : 0u;
  8608. const uint32_t nb11 = src1 ? (uint32_t)(src1->nb[1] / src1->nb[0]) : 0u;
  8609. const uint32_t nb12 = src1 ? (uint32_t)(src1->nb[2] / src1->nb[0]) : 0u;
  8610. const uint32_t nb13 = src1 ? (uint32_t)(src1->nb[3] / src1->nb[0]) : 0u;
  8611. const uint32_t n_head_kv = src0->ne[2];
  8612. const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
  8613. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  8614. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  8615. ggml_vk_op_f32<vk_op_soft_max_push_constants>(ctx, subctx, src0, src1, src2, nullptr, dst, GGML_OP_SOFT_MAX, {
  8616. ncols,
  8617. src1 != nullptr ? nrows_y : (uint32_t)0,
  8618. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],
  8619. ne12, ne13,
  8620. nb11, nb12, nb13,
  8621. scale, max_bias,
  8622. m0, m1,
  8623. n_head_log2,
  8624. nrows_x,
  8625. src2 != nullptr
  8626. });
  8627. }
  8628. static void ggml_vk_soft_max_back(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8629. float * op_params = (float *)dst->op_params;
  8630. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SOFT_MAX_BACK, { (uint32_t)src0->ne[0], (uint32_t)ggml_nrows(src0), op_params[0], op_params[1] });
  8631. }
  8632. static void ggml_vk_topk_moe(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_cgraph * cgraph, int node_idx) {
  8633. topk_moe_mode mode = ggml_vk_num_additional_ops_to_topk_moe_mode(ctx->num_additional_fused_ops);
  8634. ggml_tensor * logits = cgraph->nodes[node_idx + 0]->src[0];
  8635. ggml_tensor * weights = (mode == TOPK_MOE_EARLY_SOFTMAX_NORM) ? cgraph->nodes[node_idx + 9] :
  8636. (mode == TOPK_MOE_EARLY_SOFTMAX) ? cgraph->nodes[node_idx + 4] :
  8637. cgraph->nodes[node_idx + 5];
  8638. ggml_tensor * ids = (mode == TOPK_MOE_LATE_SOFTMAX) ? cgraph->nodes[node_idx + 1] : cgraph->nodes[node_idx + 3];
  8639. GGML_ASSERT(logits->type == GGML_TYPE_F32);
  8640. GGML_ASSERT(weights->type == GGML_TYPE_F32);
  8641. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  8642. const int n_experts = logits->ne[0];
  8643. const int n_rows = logits->ne[1];
  8644. const int n_expert_used = weights->ne[1];
  8645. GGML_ASSERT(ids->nb[1] / ggml_type_size(ids->type) == (size_t) n_experts);
  8646. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, nullptr, nullptr, nullptr, cgraph->nodes[node_idx], GGML_OP_SOFT_MAX);
  8647. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8648. vk_subbuffer logits_buf = ggml_vk_tensor_subbuffer(ctx, logits);
  8649. vk_subbuffer weights_buf = ggml_vk_tensor_subbuffer(ctx, weights);
  8650. vk_subbuffer ids_buf = ggml_vk_tensor_subbuffer(ctx, ids);
  8651. vk_op_topk_moe_push_constants pc {};
  8652. pc.n_rows = n_rows;
  8653. pc.n_expert_used = n_expert_used;
  8654. if (mode == TOPK_MOE_EARLY_SOFTMAX_NORM) {
  8655. ggml_tensor * clamp = cgraph->nodes[node_idx + 7];
  8656. pc.clamp_min = ggml_get_op_params_f32(clamp, 0);
  8657. pc.clamp_max = ggml_get_op_params_f32(clamp, 1);
  8658. }
  8659. GGML_ASSERT(n_expert_used <= n_experts);
  8660. const uint32_t rows_per_block = 4;
  8661. std::array<uint32_t, 3> elements = { CEIL_DIV(n_rows, rows_per_block), 1, 1 };
  8662. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, {logits_buf, weights_buf, ids_buf}, pc, elements);
  8663. }
  8664. static void ggml_vk_rope(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_cgraph * cgraph, int node_idx, bool backprop) {
  8665. ggml_tensor * dst = cgraph->nodes[node_idx];
  8666. const ggml_tensor * src0 = dst->src[0];
  8667. const ggml_tensor * src1 = dst->src[1];
  8668. const ggml_tensor * src2 = dst->src[2];
  8669. const ggml_tensor * src3 = nullptr;
  8670. const int n_dims = ((int32_t *) dst->op_params)[1];
  8671. const int mode = ((int32_t *) dst->op_params)[2];
  8672. // const int n_ctx = ((int32_t *) dst->op_params)[3];
  8673. const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
  8674. const float freq_base = ((float *) dst->op_params)[5];
  8675. const float beta_fast = ((float *) dst->op_params)[9];
  8676. const float beta_slow = ((float *) dst->op_params)[10];
  8677. int sections[4] {};
  8678. if (mode & GGML_ROPE_TYPE_MROPE) {
  8679. memcpy(sections, (int32_t *) dst->op_params + 11, sizeof(int)*4);
  8680. }
  8681. float corr_dims[2];
  8682. ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
  8683. uint32_t set_rows_stride = 0;
  8684. // Fused rope + view + set_rows passes the set_rows destination stride in set_rows_stride
  8685. // and overrides the dst and sets src3=row_indices
  8686. if (ctx->num_additional_fused_ops > 0) {
  8687. set_rows_stride = cgraph->nodes[node_idx + 2]->nb[1] / ggml_type_size(cgraph->nodes[node_idx + 2]->type);
  8688. src3 = cgraph->nodes[node_idx + 2]->src[1];
  8689. dst = cgraph->nodes[node_idx + 2];
  8690. }
  8691. ggml_vk_op_f32<vk_op_rope_push_constants>(ctx, subctx, src0, src1, src2, src3, dst, GGML_OP_ROPE,
  8692. ggml_vk_make_rope_constants(cgraph->nodes[node_idx], src0, src2 != nullptr, backprop, set_rows_stride));
  8693. }
  8694. static void ggml_vk_argsort(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8695. const uint32_t * op_params = (const uint32_t *)dst->op_params;
  8696. uint32_t ncols = src0->ne[0];
  8697. uint32_t nrows = ggml_nrows(src0);
  8698. uint32_t ncols_pad_log2 = (uint32_t)ceilf(log2f(float(ncols)));
  8699. uint32_t ncolsp2 = 1 << ncols_pad_log2;
  8700. vk_op_argsort_push_constants pc { ncols, ncolsp2, ncols_pad_log2, nrows, op_params[0], 0, 0, 0, 0, };
  8701. // Pick the largest workgroup size <= ncolsp2
  8702. uint32_t pipeline_idx = std::min(ncols_pad_log2, num_argsort_pipelines - 1);
  8703. // Use the "small" argsort shader if the whole sort can be done by a single workgroup.
  8704. bool use_small = ncols_pad_log2 <= ctx->device->max_workgroup_size_log2 &&
  8705. ctx->device->pipeline_argsort_f32[pipeline_idx] != nullptr;
  8706. vk_pipeline pipeline = use_small ? ctx->device->pipeline_argsort_f32[pipeline_idx]
  8707. : ctx->device->pipeline_argsort_large_f32[pipeline_idx];
  8708. vk_subbuffer src0_buf = ggml_vk_tensor_subbuffer(ctx, src0);
  8709. vk_subbuffer dst_buf = ggml_vk_tensor_subbuffer(ctx, dst);
  8710. vk_subbuffer subbuf1 = dst_buf;
  8711. // Reserve space for ivec2 per element, with rows padded to a power of two
  8712. if (!use_small) {
  8713. const size_t x_sz = size_t{ncolsp2} * nrows * 2 * sizeof(int);
  8714. if (ctx->prealloc_size_x < x_sz) {
  8715. ctx->prealloc_size_x = x_sz;
  8716. ggml_vk_preallocate_buffers(ctx, subctx);
  8717. }
  8718. if (ctx->prealloc_x_need_sync) {
  8719. ggml_vk_sync_buffers(ctx, subctx);
  8720. }
  8721. subbuf1 = { ctx->prealloc_x, 0, ctx->prealloc_x->size };
  8722. }
  8723. std::array<uint32_t, 3> elements;
  8724. elements[0] = ncolsp2;
  8725. elements[1] = std::min((uint32_t)ggml_nrows(src0), ctx->device->properties.limits.maxComputeWorkGroupCount[1]);
  8726. elements[2] = 1;
  8727. // First dispatch initializes tmp_idx and does the first N passes where
  8728. // there is only communication between threads in the same workgroup.
  8729. {
  8730. vk_op_argsort_push_constants pc2 = pc;
  8731. pc2.outer_start = 0;
  8732. pc2.outer_end = std::min(ncols_pad_log2, ctx->device->max_workgroup_size_log2);
  8733. pc2.inner_start = 0;
  8734. pc2.inner_end = 100;
  8735. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8736. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src0_buf, subbuf1, dst_buf }, pc2, elements);
  8737. }
  8738. if (!use_small) {
  8739. ggml_vk_sync_buffers(ctx, subctx);
  8740. // Loop over outer/inner passes, synchronizing between each pass.
  8741. for (uint32_t outer = ctx->device->max_workgroup_size_log2; outer < ncols_pad_log2; ++outer) {
  8742. for (uint32_t inner = 0; inner < outer + 1; ++inner) {
  8743. vk_op_argsort_push_constants pc2 = pc;
  8744. pc2.outer_start = outer;
  8745. pc2.outer_end = outer + 1;
  8746. pc2.inner_start = inner;
  8747. pc2.inner_end = inner + 1;
  8748. // When the inner idx is large enough, there's only communication
  8749. // within a workgroup. So the remaining inner iterations can all
  8750. // run in the same dispatch.
  8751. if (outer - inner < pipeline_idx) {
  8752. pc2.inner_end = 100;
  8753. inner = outer;
  8754. pipeline = ctx->device->pipeline_argsort_large_f32[pipeline_idx];
  8755. } else {
  8756. // Smaller workgroup empirically seems to perform better
  8757. pipeline = ctx->device->pipeline_argsort_large_f32[pipeline_idx - 2];
  8758. }
  8759. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8760. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src0_buf, subbuf1, dst_buf }, pc2, elements);
  8761. ggml_vk_sync_buffers(ctx, subctx);
  8762. }
  8763. }
  8764. ctx->prealloc_x_need_sync = true;
  8765. }
  8766. }
  8767. static void ggml_vk_topk(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8768. uint32_t ncols = src0->ne[0];
  8769. uint32_t nrows = ggml_nrows(src0);
  8770. uint32_t k = dst->ne[0];
  8771. vk_op_topk_push_constants pc { ncols, ncols, k, nrows, 0, 0 };
  8772. // Reserve space for ivec2 per element, double buffered
  8773. const size_t dbl_buf_size = size_t{ncols} * nrows * 2 * sizeof(int);
  8774. const size_t x_sz = dbl_buf_size * 2;
  8775. uint32_t dbl_buf_index = 0;
  8776. if (ctx->prealloc_size_x < x_sz) {
  8777. ctx->prealloc_size_x = x_sz;
  8778. ggml_vk_preallocate_buffers(ctx, subctx);
  8779. }
  8780. if (ctx->prealloc_x_need_sync) {
  8781. ggml_vk_sync_buffers(ctx, subctx);
  8782. }
  8783. std::array<uint32_t, 3> elements;
  8784. elements[1] = std::min(nrows, ctx->device->properties.limits.maxComputeWorkGroupCount[1]);
  8785. elements[2] = 1;
  8786. uint32_t num_elements = ncols;
  8787. // Each iteration reduces a workgroup's worth of elements down to the K
  8788. // largest elements. Repeat until we have the top K elements.
  8789. // Need to do at least one iteration to write out the results.
  8790. bool done_one_iter = false;
  8791. while (num_elements > k || !done_one_iter) {
  8792. done_one_iter = true;
  8793. // Prefer going as small as num_topk_pipelines - 3 for perf reasons.
  8794. // But if K is larger, then we need a larger workgroup
  8795. uint32_t max_pipeline = num_topk_pipelines - 1;
  8796. uint32_t preferred_pipeline = std::max(num_topk_pipelines - 3, (uint32_t)log2f(float(k)) + 2);
  8797. max_pipeline = std::min(preferred_pipeline, max_pipeline);
  8798. uint32_t min_pipeline = (uint32_t)log2f(float(k)) + 1;
  8799. // require full subgroup
  8800. min_pipeline = std::max(min_pipeline, ctx->device->subgroup_size_log2);
  8801. uint32_t pipeline_idx = (uint32_t)ceilf(log2f(float(num_elements)));
  8802. pipeline_idx = std::min(pipeline_idx, max_pipeline);
  8803. pipeline_idx = std::max(pipeline_idx, min_pipeline);
  8804. if (num_elements > (1u << pipeline_idx)) {
  8805. // If we could finish on this loop iteration (i.e. a single workgroup)
  8806. // then do so. It's better than the overhead of another pass.
  8807. for (uint32_t i = pipeline_idx; i < num_topk_pipelines; ++i) {
  8808. if (num_elements <= (1u << i)) {
  8809. pipeline_idx = i;
  8810. break;
  8811. }
  8812. }
  8813. }
  8814. vk_pipeline pipeline = ctx->device->pipeline_topk_f32[pipeline_idx];
  8815. // If the device doesn't support a pipeline this large, use smaller
  8816. while (!pipeline) {
  8817. pipeline_idx--;
  8818. GGML_ASSERT(pipeline_idx >= min_pipeline);
  8819. pipeline = ctx->device->pipeline_topk_f32[pipeline_idx];
  8820. }
  8821. vk_op_topk_push_constants pc2 = pc;
  8822. pc2.ncols_input = num_elements;
  8823. // Number of elements remaining after this pass
  8824. uint32_t num_dst_elements = (num_elements / pipeline->wg_denoms[0]) * k + std::min(k, num_elements % pipeline->wg_denoms[0]);
  8825. vk_subbuffer src_buf;
  8826. vk_subbuffer dst_buf;
  8827. if (num_elements == ncols) {
  8828. pc2.first_pass = 1;
  8829. src_buf = ggml_vk_tensor_subbuffer(ctx, src0);
  8830. } else {
  8831. src_buf = { ctx->prealloc_x, dbl_buf_index * dbl_buf_size, dbl_buf_size };
  8832. }
  8833. if (num_dst_elements == k) {
  8834. pc2.last_pass = 1;
  8835. dst_buf = ggml_vk_tensor_subbuffer(ctx, dst);
  8836. } else {
  8837. dst_buf = { ctx->prealloc_x, (dbl_buf_index ^ 1) * dbl_buf_size, dbl_buf_size };
  8838. }
  8839. elements[0] = num_elements;
  8840. ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
  8841. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src_buf, dst_buf }, pc2, elements);
  8842. num_elements = num_dst_elements;
  8843. dbl_buf_index ^= 1;
  8844. if (num_elements > k) {
  8845. ggml_vk_sync_buffers(ctx, subctx);
  8846. }
  8847. }
  8848. ctx->prealloc_x_need_sync = true;
  8849. }
  8850. static void ggml_vk_sum(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8851. vk_op_sum_rows_push_constants p = vk_op_sum_rows_push_constants_init(src0, dst, ggml_nelements(src0));
  8852. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_SUM, p);
  8853. }
  8854. static void ggml_vk_sum_rows(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8855. vk_op_sum_rows_push_constants p = vk_op_sum_rows_push_constants_init(src0, dst, src0->ne[0]);
  8856. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_SUM_ROWS, p);
  8857. }
  8858. static void ggml_vk_mean(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8859. vk_op_sum_rows_push_constants p = vk_op_sum_rows_push_constants_init(src0, dst, src0->ne[0]);
  8860. p.weight = 1.0f / (float)src0->ne[0];
  8861. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_MEAN, p);
  8862. }
  8863. static void ggml_vk_cumsum(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8864. vk_op_sum_rows_push_constants p = vk_op_sum_rows_push_constants_init(src0, dst, src0->ne[0]);
  8865. ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_CUMSUM, p);
  8866. }
  8867. static void ggml_vk_argmax(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8868. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_ARGMAX, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], 0.0f, 0.0f });
  8869. }
  8870. static void ggml_vk_count_equal(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8871. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_COUNT_EQUAL, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f });
  8872. }
  8873. static void ggml_vk_solve_tri(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8874. const uint32_t src0_type_size = ggml_type_size(src0->type);
  8875. const uint32_t src1_type_size = ggml_type_size(src1->type);
  8876. const uint32_t dst_type_size = ggml_type_size(dst->type);
  8877. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_SOLVE_TRI, {
  8878. (uint32_t)ggml_nelements(src0),
  8879. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  8880. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  8881. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  8882. 0,
  8883. 0.0f, 0.0f, 0,
  8884. });
  8885. }
  8886. static void ggml_vk_im2col(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8887. const int32_t s0 = dst->op_params[0];
  8888. const int32_t s1 = dst->op_params[1];
  8889. const int32_t p0 = dst->op_params[2];
  8890. const int32_t p1 = dst->op_params[3];
  8891. const int32_t d0 = dst->op_params[4];
  8892. const int32_t d1 = dst->op_params[5];
  8893. const bool is_2D = dst->op_params[6] == 1;
  8894. const uint32_t IC = src1->ne[is_2D ? 2 : 1];
  8895. const uint32_t IH = is_2D ? src1->ne[1] : 1;
  8896. const uint32_t IW = src1->ne[0];
  8897. const uint32_t KH = is_2D ? src0->ne[1] : 1;
  8898. const uint32_t KW = src0->ne[0];
  8899. const uint32_t OH = is_2D ? dst->ne[2] : 1;
  8900. const uint32_t OW = dst->ne[1];
  8901. const uint32_t offset_delta = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32
  8902. const uint32_t batch_offset = src1->nb[is_2D ? 3 : 2] / 4; // nb is byte offset, src is type float32
  8903. const uint32_t pelements = OW * KW * KH;
  8904. const ggml_backend_vk_buffer_context * d_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  8905. const vk_buffer d_buf = d_buf_ctx->dev_buffer;
  8906. const vk::DeviceAddress dst_addr = d_buf->bda_addr + vk_tensor_offset(dst) + dst->view_offs;
  8907. ggml_vk_op_f32<vk_op_im2col_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_IM2COL, {
  8908. dst_addr,
  8909. batch_offset, offset_delta,
  8910. IC, IW, IH, OW, OH, KW, KH,
  8911. pelements,
  8912. IC * KH * KW,
  8913. s0, s1, p0, p1, d0, d1,
  8914. });
  8915. }
  8916. static void ggml_vk_im2col_3d(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8917. GGML_TENSOR_BINARY_OP_LOCALS
  8918. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  8919. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  8920. const int32_t s2 = ((const int32_t *)(dst->op_params))[2];
  8921. const int32_t p0 = ((const int32_t *)(dst->op_params))[3];
  8922. const int32_t p1 = ((const int32_t *)(dst->op_params))[4];
  8923. const int32_t p2 = ((const int32_t *)(dst->op_params))[5];
  8924. const int32_t d0 = ((const int32_t *)(dst->op_params))[6];
  8925. const int32_t d1 = ((const int32_t *)(dst->op_params))[7];
  8926. const int32_t d2 = ((const int32_t *)(dst->op_params))[8];
  8927. const int32_t IC = ((const int32_t *)(dst->op_params))[9];
  8928. const int64_t N = ne13 / IC;
  8929. const int64_t ID = ne12;
  8930. const int64_t IH = ne11;
  8931. const int64_t IW = ne10;
  8932. const int64_t KD = ne02;
  8933. const int64_t KH = ne01;
  8934. const int64_t KW = ne00;
  8935. const int64_t OD = ne3 / N;
  8936. const int64_t OH = ne2;
  8937. const int64_t OW = ne1;
  8938. const ggml_backend_vk_buffer_context * d_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  8939. const vk_buffer d_buf = d_buf_ctx->dev_buffer;
  8940. const vk::DeviceAddress dst_addr = d_buf->bda_addr + vk_tensor_offset(dst) + dst->view_offs;
  8941. vk_op_im2col_3d_push_constants pc {};
  8942. pc.dst_addr = dst_addr;
  8943. pc.nb10 = nb10 / ggml_type_size(src1->type);
  8944. pc.nb11 = nb11 / ggml_type_size(src1->type);
  8945. pc.nb12 = nb12 / ggml_type_size(src1->type);
  8946. pc.nb13 = nb13 / ggml_type_size(src1->type);
  8947. pc.s0 = s0;
  8948. pc.s1 = s1;
  8949. pc.s2 = s2;
  8950. pc.p0 = p0;
  8951. pc.p1 = p1;
  8952. pc.p2 = p2;
  8953. pc.d0 = d0;
  8954. pc.d1 = d1;
  8955. pc.d2 = d2;
  8956. pc.IW = IW;
  8957. pc.IH = IH;
  8958. pc.ID = ID;
  8959. pc.IC = IC;
  8960. pc.KW = KW;
  8961. pc.OH = OH;
  8962. pc.KD_KH_KW = KD*KH*KW;
  8963. pc.KH_KW = KH*KW;
  8964. pc.IC_KD_KH_KW = IC*KD*KH*KW;
  8965. pc.N_OD_OH = N*OD*OH;
  8966. pc.OD_OH = OD*OH;
  8967. pc.OD_OH_OW_IC_KD_KH_KW = OD*OH*OW*IC*KD*KH*KW;
  8968. pc.OH_OW_IC_KD_KH_KW = OH*OW*IC*KD*KH*KW;
  8969. pc.OW_IC_KD_KH_KW = OW*IC*KD*KH*KW;
  8970. ggml_vk_op_f32<vk_op_im2col_3d_push_constants>(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_IM2COL_3D, std::move(pc));
  8971. }
  8972. static void ggml_vk_timestep_embedding(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  8973. const uint32_t dim = dst->op_params[0];
  8974. const uint32_t max_period = dst->op_params[1];
  8975. const uint32_t nb1 = dst->nb[1] / ggml_type_size(dst->type);
  8976. ggml_vk_op_f32<vk_op_timestep_embedding_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_TIMESTEP_EMBEDDING, {
  8977. nb1, dim, max_period,
  8978. });
  8979. }
  8980. static void ggml_vk_conv_transpose_1d(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  8981. // src0: (K, Cout, Cin, 1) -- kernel
  8982. // src1: (L, Cin, 1, 1) -- input
  8983. // dst: (*, Cout, 1, 1)
  8984. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8985. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  8986. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8987. GGML_TENSOR_BINARY_OP_LOCALS
  8988. GGML_ASSERT(nb00 == sizeof(float));
  8989. GGML_ASSERT(nb10 == sizeof(float));
  8990. const int32_t s0 = dst->op_params[0];
  8991. vk_op_conv_transpose_1d_push_constants p{};
  8992. p.Cout = static_cast<uint32_t>(ne01);
  8993. p.Cin = static_cast<uint32_t>(ne02);
  8994. p.K = static_cast<uint32_t>(ne00);
  8995. p.L = static_cast<uint32_t>(ne10);
  8996. p.KL = static_cast<uint32_t>(ne0);
  8997. p.nb01 = static_cast<uint32_t>(nb01 / nb00);
  8998. p.nb02 = static_cast<uint32_t>(nb02 / nb00);
  8999. p.nb11 = static_cast<uint32_t>(nb11 / nb10);
  9000. p.nb1 = static_cast<uint32_t>(nb1 / nb0);
  9001. p.s0 = static_cast<uint32_t>(s0);
  9002. ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_CONV_TRANSPOSE_1D, std::move(p));
  9003. }
  9004. static void ggml_vk_pool_2d(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  9005. uint32_t op = static_cast<uint32_t>(dst->op_params[0]);
  9006. const int32_t k1 = dst->op_params[1];
  9007. const int32_t k0 = dst->op_params[2];
  9008. const int32_t s1 = dst->op_params[3];
  9009. const int32_t s0 = dst->op_params[4];
  9010. const int32_t p1 = dst->op_params[5];
  9011. const int32_t p0 = dst->op_params[6];
  9012. const uint32_t IH = src0->ne[1];
  9013. const uint32_t IW = src0->ne[0];
  9014. const uint32_t N = dst->ne[3];
  9015. const uint32_t OC = dst->ne[2];
  9016. const uint32_t OH = dst->ne[1];
  9017. const uint32_t OW = dst->ne[0];
  9018. const uint32_t parallel_elements = N * OC * OH * OW;
  9019. ggml_vk_op_f32<vk_op_pool2d_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_POOL_2D, {
  9020. IW, IH, OW, OH, OC,
  9021. parallel_elements,
  9022. op,
  9023. k0, k1, s0, s1, p0, p1,
  9024. });
  9025. }
  9026. static void ggml_vk_conv_2d(ggml_backend_vk_context * ctx, vk_context & subctx, const ggml_tensor * src0,
  9027. const ggml_tensor * src1, ggml_tensor * dst) {
  9028. GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16);
  9029. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9030. GGML_ASSERT(dst->type == GGML_TYPE_F32);
  9031. GGML_TENSOR_BINARY_OP_LOCALS
  9032. GGML_ASSERT(nb00 == sizeof(float) || nb00 == sizeof(ggml_fp16_t));
  9033. GGML_ASSERT(nb10 == sizeof(float));
  9034. GGML_ASSERT(nb0 == sizeof(float));
  9035. vk_op_conv2d_push_constants p{};
  9036. p.Cout = static_cast<uint32_t>(ne03);
  9037. p.Cin = static_cast<uint32_t>(ne02);
  9038. p.N = static_cast<uint32_t>(ne13);
  9039. p.KW = static_cast<uint32_t>(ne00);
  9040. p.KH = static_cast<uint32_t>(ne01);
  9041. p.W = static_cast<uint32_t>(ne10);
  9042. p.H = static_cast<uint32_t>(ne11);
  9043. p.OW = static_cast<uint32_t>(ne0);
  9044. p.OH = static_cast<uint32_t>(ne1);
  9045. p.s0 = static_cast<uint32_t>(dst->op_params[0]);
  9046. p.s1 = static_cast<uint32_t>(dst->op_params[1]);
  9047. p.p0 = static_cast<uint32_t>(dst->op_params[2]);
  9048. p.p1 = static_cast<uint32_t>(dst->op_params[3]);
  9049. p.d0 = static_cast<uint32_t>(dst->op_params[4]);
  9050. p.d1 = static_cast<uint32_t>(dst->op_params[5]);
  9051. p.nb01 = static_cast<uint32_t>(nb01 / nb00);
  9052. p.nb02 = static_cast<uint32_t>(nb02 / nb00);
  9053. p.nb03 = static_cast<uint32_t>(nb03 / nb00);
  9054. p.nb11 = static_cast<uint32_t>(nb11 / nb10);
  9055. p.nb12 = static_cast<uint32_t>(nb12 / nb10);
  9056. p.nb13 = static_cast<uint32_t>(nb13 / nb10);
  9057. p.nb1 = static_cast<uint32_t>(nb1 / nb0);
  9058. p.nb2 = static_cast<uint32_t>(nb2 / nb0);
  9059. p.nb3 = static_cast<uint32_t>(nb3 / nb0);
  9060. GGML_ASSERT(ne03 == ne2);
  9061. GGML_ASSERT(ne02 == ne12);
  9062. ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_CONV_2D, std::move(p));
  9063. }
  9064. static void ggml_vk_conv_transpose_2d(ggml_backend_vk_context * ctx, vk_context & subctx, const ggml_tensor * src0,
  9065. const ggml_tensor * src1, ggml_tensor * dst) {
  9066. GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16);
  9067. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9068. GGML_ASSERT(dst->type == GGML_TYPE_F32);
  9069. GGML_TENSOR_BINARY_OP_LOCALS
  9070. GGML_ASSERT(nb00 == sizeof(float) || nb00 == sizeof(ggml_fp16_t));
  9071. GGML_ASSERT(nb10 == sizeof(float));
  9072. GGML_ASSERT(nb0 == sizeof(float));
  9073. vk_op_conv_transpose_2d_push_constants p{};
  9074. p.Cout = static_cast<uint32_t>(ne02);
  9075. p.Cin = static_cast<uint32_t>(ne03);
  9076. p.N = static_cast<uint32_t>(ne13);
  9077. p.KW = static_cast<uint32_t>(ne00);
  9078. p.KH = static_cast<uint32_t>(ne01);
  9079. p.W = static_cast<uint32_t>(ne10);
  9080. p.H = static_cast<uint32_t>(ne11);
  9081. p.OW = static_cast<uint32_t>(ne0);
  9082. p.OH = static_cast<uint32_t>(ne1);
  9083. p.s0 = static_cast<uint32_t>(dst->op_params[0]);
  9084. p.s1 = static_cast<uint32_t>(dst->op_params[0]);
  9085. p.p0 = 0;
  9086. p.p1 = 0;
  9087. p.d0 = 1;
  9088. p.d1 = 1;
  9089. p.nb01 = static_cast<uint32_t>(nb01 / nb00);
  9090. p.nb02 = static_cast<uint32_t>(nb02 / nb00);
  9091. p.nb03 = static_cast<uint32_t>(nb03 / nb00);
  9092. p.nb11 = static_cast<uint32_t>(nb11 / nb10);
  9093. p.nb12 = static_cast<uint32_t>(nb12 / nb10);
  9094. p.nb13 = static_cast<uint32_t>(nb13 / nb10);
  9095. p.nb1 = static_cast<uint32_t>(nb1 / nb0);
  9096. p.nb2 = static_cast<uint32_t>(nb2 / nb0);
  9097. p.nb3 = static_cast<uint32_t>(nb3 / nb0);
  9098. GGML_ASSERT(ne02 == ne2);
  9099. GGML_ASSERT(ne03 == ne12);
  9100. ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_CONV_TRANSPOSE_2D, std::move(p));
  9101. }
  9102. static void ggml_vk_conv_2d_dw(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9103. vk_op_conv2d_dw_push_constants p{};
  9104. p.ne = ggml_nelements(dst);
  9105. p.channels = dst->ne[2];
  9106. p.batches = dst->ne[3];
  9107. p.dst_w = dst->ne[0];
  9108. p.dst_h = dst->ne[1];
  9109. p.src_w = src1->ne[0];
  9110. p.src_h = src1->ne[1];
  9111. p.knl_w = src0->ne[0];
  9112. p.knl_h = src0->ne[1];
  9113. p.stride_x = dst->op_params[0];
  9114. p.stride_y = dst->op_params[1];
  9115. p.pad_x = dst->op_params[2];
  9116. p.pad_y = dst->op_params[3];
  9117. p.dilation_x = dst->op_params[4];
  9118. p.dilation_y = dst->op_params[5];
  9119. GGML_ASSERT(src0->ne[3] == p.channels);
  9120. GGML_ASSERT(src1->ne[3] == p.batches);
  9121. ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, nullptr, dst, GGML_OP_CONV_2D_DW, std::move(p));
  9122. }
  9123. static void ggml_vk_leaky_relu(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  9124. const float * op_params = (const float *)dst->op_params;
  9125. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_LEAKY_RELU, { (uint32_t)ggml_nelements(src0), 0, op_params[0], 0.0f });
  9126. }
  9127. #ifdef GGML_VULKAN_RUN_TESTS
  9128. static void ggml_vk_print_matrix_area(const void * data, ggml_type type, int ne0, int ne1, int i0, int i1, int i2) {
  9129. if (type != GGML_TYPE_F32 && type != GGML_TYPE_F16) {
  9130. return;
  9131. }
  9132. i0 = std::max(i0, 5);
  9133. i1 = std::max(i1, 5);
  9134. i2 = std::max(i2, 0);
  9135. fprintf(stderr, " ");
  9136. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  9137. fprintf(stderr, "%7d ", idx1);
  9138. }
  9139. fprintf(stderr, "\n");
  9140. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  9141. fprintf(stderr, "%7d: ", idx0);
  9142. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  9143. if (idx0 >= 0 && idx0 < ne0 && idx1 >= 0 && idx1 < ne1) {
  9144. float val;
  9145. if (type == GGML_TYPE_F32) {
  9146. val = *((const float *) data + i2*ne1*ne0 + idx1*ne0 + idx0);
  9147. } else if (type == GGML_TYPE_F16) {
  9148. val = ggml_fp16_to_fp32(*((const ggml_fp16_t *) data + i2*ne1*ne0 + idx1*ne0 + idx0));
  9149. } else {
  9150. GGML_ABORT("fatal error");
  9151. }
  9152. fprintf(stderr, "% 7.2f ", val);
  9153. } else {
  9154. fprintf(stderr, " ");
  9155. }
  9156. }
  9157. fprintf(stderr, "\n");
  9158. }
  9159. }
  9160. template <typename X_TYPE, typename Y_TYPE>
  9161. static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t n, size_t k, size_t batch, size_t num_it, int split_k, int shader_size) {
  9162. VK_LOG_DEBUG("ggml_vk_test_matmul(" << m << ", " << n << ", " << k << ", " << batch << ", " << num_it << ", " << split_k << ", " << shader_size << ")");
  9163. const size_t x_ne = m * k * batch;
  9164. const size_t y_ne = k * n * batch;
  9165. const size_t d_ne = m * n * batch;
  9166. vk_pipeline p;
  9167. std::string shname;
  9168. if (shader_size == 0) {
  9169. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9170. p = ctx->device->pipeline_matmul_f32->a_s;
  9171. shname = "F32_ALIGNED_S";
  9172. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9173. p = ctx->device->pipeline_matmul_f32_f16->a_s;
  9174. shname = "F32_F16_ALIGNED_S";
  9175. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9176. p = ctx->device->pipeline_matmul_f16_f32.f32acc->a_s;
  9177. shname = "F16_F32_ALIGNED_S";
  9178. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9179. p = ctx->device->pipeline_matmul_f16.f32acc->a_s;
  9180. shname = "F16_ALIGNED_S";
  9181. } else {
  9182. GGML_ABORT("fatal error");
  9183. }
  9184. } else if (shader_size == 1) {
  9185. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9186. p = ctx->device->pipeline_matmul_f32->a_m;
  9187. shname = "F32_ALIGNED_M";
  9188. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9189. p = ctx->device->pipeline_matmul_f32_f16->a_m;
  9190. shname = "F32_F16_ALIGNED_M";
  9191. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9192. p = ctx->device->pipeline_matmul_f16_f32.f32acc->a_m;
  9193. shname = "F16_F32_ALIGNED_M";
  9194. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9195. p = ctx->device->pipeline_matmul_f16.f32acc->a_m;
  9196. shname = "F16_ALIGNED_M";
  9197. } else {
  9198. GGML_ABORT("fatal error");
  9199. }
  9200. } else if (shader_size == 2) {
  9201. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9202. p = ctx->device->pipeline_matmul_f32->a_l;
  9203. shname = "F32_ALIGNED_L";
  9204. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9205. p = ctx->device->pipeline_matmul_f32_f16->a_l;
  9206. shname = "F32_F16_ALIGNED_L";
  9207. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9208. p = ctx->device->pipeline_matmul_f16_f32.f32acc->a_l;
  9209. shname = "F16_F32_ALIGNED_L";
  9210. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9211. p = ctx->device->pipeline_matmul_f16.f32acc->a_l;
  9212. shname = "F16_ALIGNED_L";
  9213. } else {
  9214. GGML_ABORT("fatal error");
  9215. }
  9216. } else {
  9217. GGML_ASSERT(0);
  9218. }
  9219. const size_t kpad = ggml_vk_align_size(k, p->align);
  9220. if (k != kpad) {
  9221. if (shader_size == 0) {
  9222. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9223. p = ctx->device->pipeline_matmul_f32->s;
  9224. shname = "F32_S";
  9225. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9226. p = ctx->device->pipeline_matmul_f32_f16->s;
  9227. shname = "F32_F16_S";
  9228. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9229. p = ctx->device->pipeline_matmul_f16_f32.f32acc->s;
  9230. shname = "F16_F32_S";
  9231. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9232. p = ctx->device->pipeline_matmul_f16.f32acc->s;
  9233. shname = "F16_S";
  9234. }
  9235. } else if (shader_size == 1) {
  9236. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9237. p = ctx->device->pipeline_matmul_f32->m;
  9238. shname = "F32_M";
  9239. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9240. p = ctx->device->pipeline_matmul_f32_f16->m;
  9241. shname = "F32_F16_M";
  9242. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9243. p = ctx->device->pipeline_matmul_f16_f32.f32acc->m;
  9244. shname = "F16_F32_M";
  9245. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9246. p = ctx->device->pipeline_matmul_f16.f32acc->m;
  9247. shname = "F16_M";
  9248. }
  9249. } else if (shader_size == 2) {
  9250. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9251. p = ctx->device->pipeline_matmul_f32->l;
  9252. shname = "F32_L";
  9253. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9254. p = ctx->device->pipeline_matmul_f32_f16->l;
  9255. shname = "F32_F16_L";
  9256. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  9257. p = ctx->device->pipeline_matmul_f16_f32.f32acc->l;
  9258. shname = "F16_F32_L";
  9259. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9260. p = ctx->device->pipeline_matmul_f16.f32acc->l;
  9261. shname = "F16_L";
  9262. }
  9263. }
  9264. }
  9265. ggml_pipeline_request_descriptor_sets(ctx, p, num_it);
  9266. if (split_k > 1) {
  9267. ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_matmul_split_k_reduce, num_it);
  9268. if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) {
  9269. // Resize buffer
  9270. if (ctx->prealloc_split_k != nullptr) {
  9271. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  9272. }
  9273. ctx->prealloc_split_k = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne * split_k, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9274. }
  9275. }
  9276. ggml_pipeline_allocate_descriptor_sets(ctx);
  9277. vk_buffer d_X = ggml_vk_create_buffer_check(ctx->device, sizeof(X_TYPE) * x_ne, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9278. vk_buffer d_Y = ggml_vk_create_buffer_check(ctx->device, sizeof(Y_TYPE) * y_ne, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9279. vk_buffer d_D = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9280. X_TYPE* x = (X_TYPE *) malloc(sizeof(X_TYPE) * x_ne);
  9281. Y_TYPE* y = (Y_TYPE *) malloc(sizeof(Y_TYPE) * y_ne);
  9282. float* d = (float *) malloc(sizeof(float) * d_ne);
  9283. for (size_t i = 0; i < x_ne; i++) {
  9284. if (std::is_same<float, X_TYPE>()) {
  9285. x[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  9286. // x[i] = 1.0f;
  9287. // x[i] = i + 1;
  9288. // x[i] = (i % k == i / k) ? 1.0f : 0.0f;
  9289. } else if (std::is_same<ggml_fp16_t, X_TYPE>()) {
  9290. x[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f);
  9291. // x[i] = ggml_fp32_to_fp16(1.0f);
  9292. // x[i] = ggml_fp32_to_fp16(i + 1);
  9293. // x[i] = ggml_fp32_to_fp16((i % k == i / k) ? 1.0f : 0.0f);
  9294. } else {
  9295. GGML_ABORT("fatal error");
  9296. }
  9297. }
  9298. for (size_t i = 0; i < y_ne; i++) {
  9299. if (std::is_same<float, Y_TYPE>()) {
  9300. y[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  9301. // y[i] = (i % k == i / k) ? 1.0f : 0.0f;
  9302. // y[i] = i + 1;
  9303. } else if (std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9304. y[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f);
  9305. // y[i] = ggml_fp32_to_fp16((i % k == i / k) ? 1.0f : 0.0f);
  9306. // y[i] = ggml_fp32_to_fp16(i + 1);
  9307. } else {
  9308. GGML_ABORT("fatal error");
  9309. }
  9310. }
  9311. ggml_vk_buffer_write(d_X, 0, x, sizeof(X_TYPE) * k * m * batch);
  9312. ggml_vk_buffer_write(d_Y, 0, y, sizeof(Y_TYPE) * k * n * batch);
  9313. vk_context subctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  9314. ggml_vk_ctx_begin(ctx->device, subctx);
  9315. for (size_t i = 0; i < num_it; i++) {
  9316. ggml_vk_matmul(
  9317. ctx, subctx, p, ggml_vk_subbuffer(ctx, d_X), ggml_vk_subbuffer(ctx, d_Y), ggml_vk_subbuffer(ctx, d_D), ggml_vk_subbuffer(ctx, ctx->prealloc_split_k),
  9318. m, n, k,
  9319. k, k, m, k*m, k*n, m*n,
  9320. split_k, batch, batch, batch, 1, 1, n
  9321. );
  9322. }
  9323. ggml_vk_ctx_end(subctx);
  9324. auto begin = std::chrono::high_resolution_clock::now();
  9325. ggml_vk_submit(subctx, ctx->fence);
  9326. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_matmul waitForFences");
  9327. ctx->device->device.resetFences({ ctx->fence });
  9328. ggml_vk_queue_command_pools_cleanup(ctx->device);
  9329. auto end = std::chrono::high_resolution_clock::now();
  9330. double time = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  9331. // copy dst to host
  9332. ggml_vk_buffer_read(d_D, 0, d, sizeof(float) * d_ne);
  9333. float * d_chk = (float *) malloc(sizeof(float) * d_ne);
  9334. ggml_init_params iparams = {
  9335. /*.mem_size =*/ 1024*1024*1024,
  9336. /*.mem_buffer =*/ NULL,
  9337. /*.no_alloc =*/ true,
  9338. };
  9339. ggml_context * ggml_ctx = ggml_init(iparams);
  9340. ggml_type src0_type;
  9341. ggml_type src1_type;
  9342. if (std::is_same<float, X_TYPE>()) {
  9343. src0_type = GGML_TYPE_F32;
  9344. } else if (std::is_same<ggml_fp16_t, X_TYPE>()) {
  9345. src0_type = GGML_TYPE_F16;
  9346. } else {
  9347. GGML_ABORT("fatal error");
  9348. }
  9349. if (std::is_same<float, Y_TYPE>()) {
  9350. src1_type = GGML_TYPE_F32;
  9351. } else if (std::is_same<ggml_fp16_t, Y_TYPE>()) {
  9352. src1_type = GGML_TYPE_F16;
  9353. } else {
  9354. GGML_ABORT("fatal error");
  9355. }
  9356. ggml_tensor * src0_ggml = ggml_new_tensor_3d(ggml_ctx, src0_type, k, m, batch);
  9357. ggml_tensor * src1_ggml = ggml_new_tensor_3d(ggml_ctx, src1_type, k, n, batch);
  9358. ggml_tensor * tensor_ggml = ggml_mul_mat(ggml_ctx, src0_ggml, src1_ggml);
  9359. src0_ggml->data = x;
  9360. src1_ggml->data = y;
  9361. tensor_ggml->data = d_chk;
  9362. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  9363. ggml_build_forward_expand(cgraph, tensor_ggml);
  9364. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 1);
  9365. ggml_free(ggml_ctx);
  9366. double avg_err = 0.0;
  9367. int first_err_n = -1;
  9368. int first_err_m = -1;
  9369. int first_err_b = -1;
  9370. for (size_t i = 0; i < m*n*batch; i++) {
  9371. double err = std::fabs(d[i] - d_chk[i]);
  9372. avg_err += err;
  9373. if ((err > 0.05f || std::isnan(err)) && first_err_n == -1) {
  9374. first_err_b = i / (m * n);
  9375. first_err_n = (i % (m * n)) / m;
  9376. first_err_m = (i % (m * n)) % m;
  9377. }
  9378. }
  9379. avg_err /= m * n;
  9380. double tflops = 2.0*m*n*k*batch*num_it / (time / 1000.0) / (1000.0*1000.0*1000.0*1000.0);
  9381. std::cerr << "TEST " << shname << " m=" << m << " n=" << n << " k=" << k << " batch=" << batch << " split_k=" << split_k << " matmul " << time / num_it << "ms " << tflops << " TFLOPS avg_err=" << avg_err << std::endl;
  9382. if (avg_err > 0.1 || std::isnan(avg_err)) {
  9383. std::cerr << "m = " << first_err_m << " n = " << first_err_n << " b = " << first_err_b << std::endl;
  9384. std::cerr << "Actual result: " << std::endl << std::endl;
  9385. ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9386. std::cerr << "Expected result: " << std::endl << std::endl;
  9387. ggml_vk_print_matrix_area(d_chk, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9388. if (split_k > 1) {
  9389. float * split_k_buf = (float *) malloc(sizeof(float) * d_ne * split_k);
  9390. ggml_vk_buffer_read(ctx->prealloc_split_k, 0, split_k_buf, sizeof(float) * d_ne * split_k);
  9391. std::cerr << "d_buf0: " << std::endl << std::endl;
  9392. ggml_vk_print_matrix_area(split_k_buf, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9393. std::cerr << "d_buf1: " << std::endl << std::endl;
  9394. ggml_vk_print_matrix_area(split_k_buf + d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9395. std::cerr << "d_buf2: " << std::endl << std::endl;
  9396. ggml_vk_print_matrix_area(split_k_buf + 2 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9397. std::cerr << "d_buf3: " << std::endl << std::endl;
  9398. ggml_vk_print_matrix_area(split_k_buf + 3 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9399. free(split_k_buf);
  9400. }
  9401. }
  9402. free(d_chk);
  9403. ggml_vk_command_pool_cleanup(ctx->device, ctx->compute_cmd_pool);
  9404. ggml_vk_command_pool_cleanup(ctx->device, ctx->transfer_cmd_pool);
  9405. ggml_vk_destroy_buffer(d_X);
  9406. ggml_vk_destroy_buffer(d_Y);
  9407. ggml_vk_destroy_buffer(d_D);
  9408. free(x);
  9409. free(y);
  9410. free(d);
  9411. }
  9412. static void ggml_vk_print_tensor_area(const ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  9413. if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16) {
  9414. return;
  9415. }
  9416. i0 = std::max(i0, 5);
  9417. i1 = std::max(i1, 5);
  9418. i2 = std::max(i2, 0);
  9419. i3 = std::max(i3, 0);
  9420. fprintf(stderr, " ");
  9421. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  9422. fprintf(stderr, "%7d ", idx1);
  9423. }
  9424. fprintf(stderr, "\n");
  9425. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  9426. fprintf(stderr, "%7d: ", idx0);
  9427. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  9428. if (idx0 >= 0 && idx0 < tensor->ne[0] && idx1 >= 0 && idx1 < tensor->ne[1] && i2 >= 0 && i2 < tensor->ne[2] && i3 >= 0 && i3 < tensor->ne[3]) {
  9429. float val;
  9430. if (tensor->type == GGML_TYPE_F32) {
  9431. val = *(float *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  9432. } else if (tensor->type == GGML_TYPE_F16) {
  9433. val = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]));
  9434. } else {
  9435. GGML_ABORT("fatal error");
  9436. }
  9437. fprintf(stderr, "% 7.2f ", val);
  9438. } else {
  9439. fprintf(stderr, " ");
  9440. }
  9441. }
  9442. fprintf(stderr, "\n");
  9443. }
  9444. }
  9445. static void ggml_vk_quantize_data(const float * from, void * to, size_t ne, ggml_type quant) {
  9446. ggml_quantize_chunk(quant, from, to, 0, 1, ne, nullptr);
  9447. }
  9448. static void ggml_vk_dequantize_data(const void * from, float * to, size_t ne, ggml_type quant) {
  9449. if (quant == GGML_TYPE_F32) {
  9450. memcpy(to, from, sizeof(float) * ne);
  9451. return;
  9452. }
  9453. const auto * tt = ggml_get_type_traits(quant);
  9454. ggml_to_float_t dequant_fn = tt->to_float;
  9455. dequant_fn(from, to, ne);
  9456. }
  9457. static void ggml_vk_test_dequant(ggml_backend_vk_context * ctx, size_t ne, ggml_type quant) {
  9458. VK_LOG_DEBUG("ggml_vk_test_dequant(" << ne << ")");
  9459. const size_t x_sz = sizeof(float) * ne;
  9460. const size_t x_sz_f16 = sizeof(ggml_fp16_t) * ne;
  9461. const size_t qx_sz = ne * ggml_type_size(quant)/ggml_blck_size(quant);
  9462. float * x = (float *) malloc(x_sz);
  9463. void * qx = malloc(qx_sz);
  9464. vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx->device, qx_sz, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9465. vk_buffer x_buf = ggml_vk_create_buffer_check(ctx->device, x_sz_f16, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9466. float * x_ref = (float *) malloc(x_sz);
  9467. ggml_fp16_t * x_chk = (ggml_fp16_t *) malloc(x_sz_f16);
  9468. for (size_t i = 0; i < ne; i++) {
  9469. x[i] = rand() / (float)RAND_MAX;
  9470. }
  9471. vk_pipeline p = ggml_vk_get_to_fp16(ctx, quant);
  9472. ggml_vk_quantize_data(x, qx, ne, quant);
  9473. ggml_vk_dequantize_data(qx, x_ref, ne, quant);
  9474. ggml_pipeline_request_descriptor_sets(ctx, p, 1);
  9475. ggml_pipeline_allocate_descriptor_sets(ctx);
  9476. ggml_vk_buffer_write(qx_buf, 0, qx, qx_sz);
  9477. vk_context subctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  9478. ggml_vk_ctx_begin(ctx->device, subctx);
  9479. const std::vector<uint32_t> pc = { 1, (uint32_t)ne, (uint32_t)ne, (uint32_t)ne, (uint32_t)ne };
  9480. ggml_vk_dispatch_pipeline(ctx, subctx, p, { vk_subbuffer{ qx_buf, 0, qx_sz }, vk_subbuffer{ x_buf, 0, x_sz_f16 } }, pc, { (uint32_t)ne, 1, 1});
  9481. ggml_vk_ctx_end(subctx);
  9482. auto begin = std::chrono::high_resolution_clock::now();
  9483. ggml_vk_submit(subctx, ctx->fence);
  9484. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences");
  9485. ctx->device->device.resetFences({ ctx->fence });
  9486. ggml_vk_queue_command_pools_cleanup(ctx->device);
  9487. auto end = std::chrono::high_resolution_clock::now();
  9488. double ms_dequant = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  9489. ggml_vk_buffer_read(x_buf, 0, x_chk, x_sz_f16);
  9490. int first_err = -1;
  9491. double avg_err = 0.0;
  9492. for (size_t i = 0; i < ne; i++) {
  9493. double error = std::fabs(x_ref[i] - ggml_fp16_to_fp32(x_chk[i]));
  9494. avg_err += error;
  9495. if (first_err < 0 && error > 0.05) {
  9496. first_err = i;
  9497. }
  9498. }
  9499. avg_err /= ne;
  9500. std::cerr << "TEST DEQUANT " << ggml_type_name(quant) << " time=" << ms_dequant << "ms avg_err=" << avg_err << std::endl;
  9501. if (avg_err > 0.1) {
  9502. std::cerr << "first_error = " << first_err << std::endl;
  9503. std::cerr << "Actual result: " << std::endl << std::endl;
  9504. for (int i = std::max(0, first_err - 5); i < std::min((int)ne, first_err + 5); i++) {
  9505. std::cerr << ggml_fp16_to_fp32(x_chk[i]) << ", ";
  9506. }
  9507. std::cerr << std::endl << "Expected result: " << std::endl << std::endl;
  9508. for (int i = std::max(0, first_err - 5); i < std::min((int)ne, first_err + 5); i++) {
  9509. std::cerr << x_ref[i] << ", ";
  9510. }
  9511. std::cerr << std::endl;
  9512. }
  9513. ggml_vk_destroy_buffer(x_buf);
  9514. ggml_vk_destroy_buffer(qx_buf);
  9515. free(x);
  9516. free(qx);
  9517. free(x_ref);
  9518. free(x_chk);
  9519. }
  9520. // This does not work without ggml q8_1 quantization support
  9521. //
  9522. // typedef uint16_t ggml_half;
  9523. // typedef uint32_t ggml_half2;
  9524. //
  9525. // #define QK8_1 32
  9526. // typedef struct {
  9527. // union {
  9528. // struct {
  9529. // ggml_half d; // delta
  9530. // ggml_half s; // d * sum(qs[i])
  9531. // } GGML_COMMON_AGGR_S;
  9532. // ggml_half2 ds;
  9533. // } GGML_COMMON_AGGR_U;
  9534. // int8_t qs[QK8_1]; // quants
  9535. // } block_q8_1;
  9536. //
  9537. // static void ggml_vk_test_quantize(ggml_backend_vk_context * ctx, size_t ne, ggml_type quant) {
  9538. // VK_LOG_DEBUG("ggml_vk_test_quantize(" << ne << ")");
  9539. // GGML_ASSERT(quant == GGML_TYPE_Q8_1);
  9540. //
  9541. // const size_t x_sz = sizeof(float) * ne;
  9542. // const size_t qx_sz = ne * ggml_type_size(quant)/ggml_blck_size(quant);
  9543. // float * x = (float *) malloc(x_sz);
  9544. // block_q8_1 * qx = (block_q8_1 *)malloc(qx_sz);
  9545. // block_q8_1 * qx_res = (block_q8_1 *)malloc(qx_sz);
  9546. // vk_buffer x_buf = ggml_vk_create_buffer_check(ctx->device, x_sz, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9547. // vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx->device, qx_sz, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9548. //
  9549. // for (size_t i = 0; i < ne; i++) {
  9550. // x[i] = rand() / (float)RAND_MAX;
  9551. // }
  9552. //
  9553. // vk_pipeline p = ggml_vk_get_quantize_pipeline(ctx, quant);
  9554. //
  9555. // ggml_pipeline_request_descriptor_sets(ctx, p, 1);
  9556. //
  9557. // ggml_pipeline_allocate_descriptor_sets(ctx);
  9558. //
  9559. // ggml_vk_buffer_write(x_buf, 0, x, x_sz);
  9560. //
  9561. // vk_context subctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  9562. // ggml_vk_ctx_begin(ctx->device, subctx);
  9563. // ggml_vk_quantize_q8_1(ctx, subctx, ggml_vk_subbuffer(ctx, x_buf), ggml_vk_subbuffer(ctx, qx_buf), ne);
  9564. // ggml_vk_ctx_end(subctx);
  9565. //
  9566. // auto begin = std::chrono::high_resolution_clock::now();
  9567. //
  9568. // ggml_vk_submit(subctx, ctx->fence);
  9569. // VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_quantize waitForFences");
  9570. // ctx->device->device.resetFences({ ctx->fence });
  9571. // ggml_vk_queue_command_pools_cleanup(ctx->device);
  9572. //
  9573. // auto end = std::chrono::high_resolution_clock::now();
  9574. //
  9575. // double ms_quant = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  9576. // ggml_vk_buffer_read(qx_buf, 0, qx, qx_sz);
  9577. //
  9578. // ggml_vk_quantize_data(x, qx_res, ne, quant);
  9579. //
  9580. // int first_err = -1;
  9581. //
  9582. // for (size_t i = 0; i < ne / 32; i++) {
  9583. // double error = std::fabs(ggml_fp16_to_fp32(qx_res[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d) - ggml_fp16_to_fp32(qx[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d));
  9584. //
  9585. // if (first_err < 0 && error > 0.1) {
  9586. // first_err = i;
  9587. // }
  9588. //
  9589. // error = std::fabs(ggml_fp16_to_fp32(qx_res[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.s) - ggml_fp16_to_fp32(qx[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.s));
  9590. //
  9591. // if (first_err < 0 && error > 0.1) {
  9592. // first_err = i;
  9593. // }
  9594. //
  9595. // for (size_t j = 0; j < 32; j++) {
  9596. // uint64_t error = std::abs(qx_res[i].qs[j] - qx[i].qs[j]);
  9597. //
  9598. // if (first_err < 0 && error > 1) {
  9599. // first_err = i;
  9600. // }
  9601. // }
  9602. // }
  9603. //
  9604. // std::cerr << "TEST QUANTIZE " << ggml_type_name(quant) << " time=" << ms_quant << "ms " << (first_err == -1 ? "CORRECT" : "INCORRECT") << std::endl;
  9605. //
  9606. // if (first_err != -1) {
  9607. // std::cerr << "first_error = " << first_err << std::endl;
  9608. // std::cerr << "Actual result: " << std::endl << std::endl;
  9609. // std::cout << "d=" << ggml_fp16_to_fp32(qx[first_err].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d) << " s=" << ggml_fp16_to_fp32(qx[first_err].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.s) << " ";
  9610. // for (size_t j = 0; j < 32; j++) {
  9611. // std::cout << " qs" << j << "=" << (uint32_t)qx[first_err].qs[j] << " ";
  9612. // }
  9613. // std::cerr << std::endl << std::endl << "Expected result: " << std::endl << std::endl;
  9614. // std::cout << "d=" << ggml_fp16_to_fp32(qx_res[first_err].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d) << " s=" << ggml_fp16_to_fp32(qx_res[first_err].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.s) << " ";
  9615. // for (size_t j = 0; j < 32; j++) {
  9616. // std::cout << " qs" << j << "=" << (uint32_t)qx_res[first_err].qs[j] << " ";
  9617. // }
  9618. // std::cerr << std::endl;
  9619. // }
  9620. //
  9621. // ggml_vk_destroy_buffer(x_buf);
  9622. // ggml_vk_destroy_buffer(qx_buf);
  9623. //
  9624. // free(x);
  9625. // free(qx);
  9626. // free(qx_res);
  9627. // }
  9628. static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, size_t n, size_t k, size_t batch, size_t num_it, size_t split_k, size_t shader_size, ggml_type quant, bool mmq = false) {
  9629. VK_LOG_DEBUG("ggml_vk_test_dequant_matmul(" << m << ", " << n << ", " << k << ", " << batch << ", " << num_it << ", " << split_k << ", " << ggml_type_name(quant) << ")");
  9630. const size_t x_ne = m * k * batch;
  9631. const size_t y_ne = k * n * batch;
  9632. const size_t d_ne = m * n * batch;
  9633. vk_matmul_pipeline2 * pipelines;
  9634. if (mmq) {
  9635. pipelines = ctx->device->pipeline_dequant_mul_mat_mat_q8_1;
  9636. } else {
  9637. pipelines = ctx->device->pipeline_dequant_mul_mat_mat;
  9638. }
  9639. const bool fp16acc = ctx->device->fp16;
  9640. vk_pipeline p;
  9641. std::string shname;
  9642. if (shader_size == 0) {
  9643. p = fp16acc ? pipelines[quant].f16acc->a_s : pipelines[quant].f32acc->a_s;
  9644. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_S";
  9645. } else if (shader_size == 1) {
  9646. p = fp16acc ? pipelines[quant].f16acc->a_m : pipelines[quant].f32acc->a_m;
  9647. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_M";
  9648. } else if (shader_size == 2) {
  9649. p = fp16acc ? pipelines[quant].f16acc->a_l : pipelines[quant].f32acc->a_l;
  9650. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_L";
  9651. } else {
  9652. GGML_ASSERT(0);
  9653. }
  9654. const size_t kpad = mmq ? 0 : ggml_vk_align_size(k, p->align);
  9655. if (mmq || k != kpad) {
  9656. if (shader_size == 0) {
  9657. p = fp16acc ? pipelines[quant].f16acc->s : pipelines[quant].f32acc->s;
  9658. shname = std::string(ggml_type_name(quant)) + "_S";
  9659. } else if (shader_size == 1) {
  9660. p = fp16acc ? pipelines[quant].f16acc->m : pipelines[quant].f32acc->m;
  9661. shname = std::string(ggml_type_name(quant)) + "_M";
  9662. } else if (shader_size == 2) {
  9663. p = fp16acc ? pipelines[quant].f16acc->l : pipelines[quant].f32acc->l;
  9664. shname = std::string(ggml_type_name(quant)) + "_L";
  9665. } else {
  9666. GGML_ASSERT(0);
  9667. }
  9668. }
  9669. if (p == nullptr) {
  9670. std::cerr << "error: no pipeline for ggml_vk_test_dequant_matmul " << ggml_type_name(quant) << std::endl;
  9671. return;
  9672. }
  9673. const size_t x_sz = sizeof(float) * x_ne;
  9674. const size_t y_sz = sizeof(float) * y_ne;
  9675. const size_t qx_sz = x_ne * ggml_type_size(quant)/ggml_blck_size(quant);
  9676. const size_t qy_sz = mmq ? y_ne * ggml_type_size(GGML_TYPE_Q8_1)/ggml_blck_size(GGML_TYPE_Q8_1) : y_sz;
  9677. const size_t d_sz = sizeof(float) * d_ne;
  9678. float * x = (float *) malloc(x_sz);
  9679. float * y = (float *) malloc(y_sz);
  9680. void * qx = malloc(qx_sz);
  9681. vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx->device, qx_sz, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9682. vk_buffer y_buf = ggml_vk_create_buffer_check(ctx->device, y_sz, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9683. vk_buffer qy_buf = ggml_vk_create_buffer_check(ctx->device, qy_sz, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9684. vk_buffer d_buf = ggml_vk_create_buffer_check(ctx->device, d_sz, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9685. float * d = (float *) malloc(d_sz);
  9686. float * d_chk = (float *) malloc(d_sz);
  9687. for (size_t i = 0; i < x_ne; i++) {
  9688. x[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  9689. // x[i] = (i % k == i / k) ? 1.0f : 0.0f;
  9690. // x[i] = i % k;
  9691. }
  9692. ggml_vk_quantize_data(x, qx, x_ne, quant);
  9693. for (size_t i = 0; i < y_ne; i++) {
  9694. y[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  9695. // y[i] = (i % k == i / k) ? 1.0f : 0.0f;
  9696. // y[i] = i % k;
  9697. }
  9698. ggml_pipeline_request_descriptor_sets(ctx, p, num_it);
  9699. if (split_k > 1) {
  9700. ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_matmul_split_k_reduce, num_it);
  9701. if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) {
  9702. // Resize buffer
  9703. if (ctx->prealloc_split_k != nullptr) {
  9704. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  9705. }
  9706. ctx->prealloc_split_k = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne * split_k, {vk::MemoryPropertyFlagBits::eDeviceLocal});
  9707. }
  9708. }
  9709. if (mmq) {
  9710. ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_quantize_q8_1, num_it);
  9711. }
  9712. ggml_pipeline_allocate_descriptor_sets(ctx);
  9713. ggml_vk_buffer_write(qx_buf, 0, qx, qx_sz);
  9714. ggml_vk_buffer_write(y_buf, 0, y, y_sz);
  9715. vk_context subctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  9716. ggml_vk_ctx_begin(ctx->device, subctx);
  9717. if (mmq) {
  9718. for (size_t i = 0; i < num_it; i++) {
  9719. ggml_vk_quantize_q8_1(ctx, subctx, { y_buf, 0, y_sz }, { qy_buf, 0, qy_sz }, y_ne);
  9720. ggml_vk_matmul(
  9721. ctx, subctx, p, { qx_buf, 0, qx_sz }, { qy_buf, 0, qy_sz }, { d_buf, 0, d_sz }, { ctx->prealloc_split_k, 0, ctx->prealloc_size_split_k },
  9722. m, n, k,
  9723. k, k, m, k*m, k*n, m*n,
  9724. split_k, batch, batch, batch, 1, 1, n
  9725. );
  9726. }
  9727. } else {
  9728. for (size_t i = 0; i < num_it; i++) {
  9729. ggml_vk_matmul(
  9730. ctx, subctx, p, { qx_buf, 0, qx_sz }, { y_buf, 0, y_sz }, { d_buf, 0, d_sz }, { ctx->prealloc_split_k, 0, ctx->prealloc_size_split_k },
  9731. m, n, k,
  9732. k, k, m, k*m, k*n, m*n,
  9733. split_k, batch, batch, batch, 1, 1, n
  9734. );
  9735. }
  9736. }
  9737. ggml_vk_ctx_end(subctx);
  9738. auto begin = std::chrono::high_resolution_clock::now();
  9739. ggml_vk_submit(subctx, ctx->fence);
  9740. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences");
  9741. ctx->device->device.resetFences({ ctx->fence });
  9742. ggml_vk_queue_command_pools_cleanup(ctx->device);
  9743. auto end = std::chrono::high_resolution_clock::now();
  9744. double time_ms = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  9745. ggml_vk_buffer_read(d_buf, 0, d, d_sz);
  9746. ggml_init_params iparams = {
  9747. /*.mem_size =*/ 1024*1024*1024,
  9748. /*.mem_buffer =*/ NULL,
  9749. /*.no_alloc =*/ true,
  9750. };
  9751. ggml_context * ggml_ctx = ggml_init(iparams);
  9752. ggml_tensor * src0_ggml = ggml_new_tensor_3d(ggml_ctx, quant, k, m, batch);
  9753. ggml_tensor * src1_ggml = ggml_new_tensor_3d(ggml_ctx, GGML_TYPE_F32, k, n, batch);
  9754. ggml_tensor * tensor_ggml = ggml_mul_mat(ggml_ctx, src0_ggml, src1_ggml);
  9755. src0_ggml->data = qx;
  9756. src1_ggml->data = y;
  9757. tensor_ggml->data = d_chk;
  9758. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  9759. ggml_build_forward_expand(cgraph, tensor_ggml);
  9760. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 1);
  9761. ggml_free(ggml_ctx);
  9762. double avg_err = 0.0;
  9763. int first_err_n = -1;
  9764. int first_err_m = -1;
  9765. int first_err_b = -1;
  9766. for (size_t i = 0; i < m*n*batch; i++) {
  9767. double err = std::fabs(d[i] - d_chk[i]);
  9768. avg_err += err;
  9769. if ((err > 0.05f || std::isnan(err)) && first_err_n == -1) {
  9770. first_err_b = i / (m * n);
  9771. first_err_n = (i % (m * n)) / m;
  9772. first_err_m = (i % (m * n)) % m;
  9773. }
  9774. }
  9775. avg_err /= m * n;
  9776. double tflops = 2.0*m*n*k*batch*num_it / (time_ms / 1000.0) / (1000.0*1000.0*1000.0*1000.0);
  9777. std::cerr << "TEST dequant matmul " << shname;
  9778. if (mmq) {
  9779. std::cerr << " mmq";
  9780. }
  9781. std::cerr << " m=" << m << " n=" << n << " k=" << k << " batch=" << batch << " split_k=" << split_k << " matmul " << time_ms / num_it << "ms " << tflops << " TFLOPS avg_err=" << avg_err << std::endl;
  9782. if (avg_err > 0.01 || std::isnan(avg_err)) {
  9783. std::cerr << "m = " << first_err_m << " n = " << first_err_n << " b = " << first_err_b << std::endl;
  9784. std::cerr << "Actual result: " << std::endl << std::endl;
  9785. ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9786. std::cerr << std::endl;
  9787. std::cerr << "Expected result: " << std::endl << std::endl;
  9788. ggml_vk_print_matrix_area(d_chk, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9789. std::cerr << "src0: " << std::endl << std::endl;
  9790. ggml_vk_print_matrix_area(x, GGML_TYPE_F32, k, m, first_err_m, first_err_n, first_err_b);
  9791. std::cerr << std::endl;
  9792. std::cerr << "src1: " << std::endl << std::endl;
  9793. ggml_vk_print_matrix_area(y, GGML_TYPE_F32, k, n, first_err_m, first_err_n, first_err_b);
  9794. if (split_k > 1) {
  9795. float * split_k_buf = (float *) malloc(sizeof(float) * d_ne * split_k);
  9796. ggml_vk_buffer_read(ctx->prealloc_split_k, 0, split_k_buf, sizeof(float) * d_ne * split_k);
  9797. std::cerr << "d_buf0: " << std::endl << std::endl;
  9798. ggml_vk_print_matrix_area(split_k_buf, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9799. std::cerr << "d_buf1: " << std::endl << std::endl;
  9800. ggml_vk_print_matrix_area(split_k_buf + d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9801. std::cerr << "d_buf2: " << std::endl << std::endl;
  9802. ggml_vk_print_matrix_area(split_k_buf + 2 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9803. std::cerr << "d_buf3: " << std::endl << std::endl;
  9804. ggml_vk_print_matrix_area(split_k_buf + 3 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  9805. free(split_k_buf);
  9806. }
  9807. }
  9808. ggml_vk_destroy_buffer(qx_buf);
  9809. ggml_vk_destroy_buffer(y_buf);
  9810. ggml_vk_destroy_buffer(qy_buf);
  9811. ggml_vk_destroy_buffer(d_buf);
  9812. free(x);
  9813. free(qx);
  9814. free(y);
  9815. free(d);
  9816. free(d_chk);
  9817. }
  9818. #endif
  9819. static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx, vk_context subctx) {
  9820. #if defined(GGML_VULKAN_RUN_TESTS)
  9821. const std::vector<size_t> vals {
  9822. 512, 512, 128,
  9823. 128, 512, 512,
  9824. 4096, 512, 4096,
  9825. 11008, 512, 4096,
  9826. 4096, 512, 11008,
  9827. 32000, 512, 4096,
  9828. 8, 8, 8,
  9829. 100, 46, 576,
  9830. 623, 111, 128,
  9831. 100, 46, 558,
  9832. 512, 1, 256,
  9833. 128, 110, 622,
  9834. 511, 511, 127,
  9835. 511, 511, 7,
  9836. 511, 511, 17,
  9837. 49, 49, 128,
  9838. 128, 49, 49,
  9839. 4096, 49, 4096,
  9840. };
  9841. const size_t num_it = 100;
  9842. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 0, GGML_TYPE_Q4_0);
  9843. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 1, GGML_TYPE_Q4_0);
  9844. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 2, GGML_TYPE_Q4_0);
  9845. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 0, GGML_TYPE_Q4_0, true);
  9846. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 1, GGML_TYPE_Q4_0, true);
  9847. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 2, GGML_TYPE_Q4_0, true);
  9848. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 0, GGML_TYPE_Q8_0);
  9849. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 1, GGML_TYPE_Q8_0);
  9850. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 2, GGML_TYPE_Q8_0);
  9851. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 0, GGML_TYPE_Q8_0, true);
  9852. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 1, GGML_TYPE_Q8_0, true);
  9853. ggml_vk_test_dequant_matmul(ctx, 4096, 512, 4096, 2, num_it, 1, 2, GGML_TYPE_Q8_0, true);
  9854. abort();
  9855. for (size_t i = 0; i < vals.size(); i += 3) {
  9856. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0);
  9857. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1);
  9858. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2);
  9859. std::cerr << '\n';
  9860. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 0);
  9861. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 1);
  9862. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 2);
  9863. std::cerr << '\n';
  9864. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0);
  9865. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1);
  9866. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2);
  9867. std::cerr << '\n' << std::endl;
  9868. if (vals[i + 2] % 32 == 0) {
  9869. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0, GGML_TYPE_Q4_0);
  9870. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1, GGML_TYPE_Q4_0);
  9871. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2, GGML_TYPE_Q4_0);
  9872. std::cerr << '\n';
  9873. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 0, GGML_TYPE_Q4_0);
  9874. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 1, GGML_TYPE_Q4_0);
  9875. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 2, GGML_TYPE_Q4_0);
  9876. std::cerr << '\n';
  9877. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0, GGML_TYPE_Q4_0);
  9878. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1, GGML_TYPE_Q4_0);
  9879. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2, GGML_TYPE_Q4_0);
  9880. std::cerr << '\n' << std::endl;
  9881. }
  9882. if (vals[i + 2] % 256 == 0) {
  9883. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0, GGML_TYPE_Q4_K);
  9884. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1, GGML_TYPE_Q4_K);
  9885. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2, GGML_TYPE_Q4_K);
  9886. std::cerr << '\n';
  9887. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 0, GGML_TYPE_Q4_K);
  9888. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 1, GGML_TYPE_Q4_K);
  9889. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 2, GGML_TYPE_Q4_K);
  9890. std::cerr << '\n';
  9891. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0, GGML_TYPE_Q4_K);
  9892. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1, GGML_TYPE_Q4_K);
  9893. ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2, GGML_TYPE_Q4_K);
  9894. std::cerr << '\n' << std::endl;
  9895. }
  9896. }
  9897. GGML_ABORT("fatal error");
  9898. #endif
  9899. if (subctx) {
  9900. // Submit and wait for any pending work before reallocating the buffers
  9901. ggml_vk_ctx_end(subctx);
  9902. ggml_vk_submit(subctx, {});
  9903. ctx->submit_pending = true;
  9904. ggml_vk_synchronize(ctx);
  9905. ggml_vk_ctx_begin(ctx->device, subctx);
  9906. }
  9907. if (ctx->prealloc_x == nullptr || (ctx->prealloc_size_x > 0 && ctx->prealloc_x->size < ctx->prealloc_size_x)) {
  9908. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(x_size: " << ctx->prealloc_size_x << ")");
  9909. // Resize buffer
  9910. if (ctx->prealloc_x != nullptr) {
  9911. ggml_vk_destroy_buffer(ctx->prealloc_x);
  9912. }
  9913. ctx->prealloc_x = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_x);
  9914. }
  9915. if (ctx->prealloc_y == nullptr || (ctx->prealloc_size_y > 0 && ctx->prealloc_y->size < ctx->prealloc_size_y)) {
  9916. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(y_size: " << ctx->prealloc_size_y << ")");
  9917. // Resize buffer
  9918. if (ctx->prealloc_y != nullptr) {
  9919. ggml_vk_destroy_buffer(ctx->prealloc_y);
  9920. }
  9921. ctx->prealloc_y = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_y);
  9922. }
  9923. if (ctx->prealloc_split_k == nullptr || (ctx->prealloc_size_split_k > 0 && ctx->prealloc_split_k->size < ctx->prealloc_size_split_k)) {
  9924. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(split_k_size: " << ctx->prealloc_size_split_k << ")");
  9925. // Resize buffer
  9926. if (ctx->prealloc_split_k != nullptr) {
  9927. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  9928. }
  9929. ctx->prealloc_split_k = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_split_k);
  9930. }
  9931. if (ctx->prealloc_add_rms_partials == nullptr || (ctx->prealloc_size_add_rms_partials > 0 && ctx->prealloc_add_rms_partials->size < ctx->prealloc_size_add_rms_partials)) {
  9932. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(add_partials_size: " << ctx->prealloc_add_rms_partials << ")");
  9933. // Resize buffer
  9934. if (ctx->prealloc_add_rms_partials != nullptr) {
  9935. ggml_vk_destroy_buffer(ctx->prealloc_add_rms_partials);
  9936. }
  9937. ctx->prealloc_add_rms_partials = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_add_rms_partials);
  9938. }
  9939. }
  9940. static void ggml_vk_compute_forward(ggml_backend_vk_context* ctx, ggml_cgraph * cgraph, ggml_tensor* tensor, int tensor_idx, bool almost_ready);
  9941. // Returns true if node has enqueued work into the queue, false otherwise
  9942. // If submit is true the current all operations queued so far are being submitted to Vulkan to overlap cmdlist creation and GPU execution.
  9943. static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, int node_idx, ggml_tensor *node_begin, int node_idx_begin, bool last_node, bool almost_ready, bool submit){
  9944. ggml_tensor * node = cgraph->nodes[node_idx];
  9945. if (ggml_is_empty(node) || ggml_op_is_empty(node->op) || !node->buffer) {
  9946. return false;
  9947. }
  9948. VK_LOG_DEBUG("ggml_vk_build_graph(" << node << ", " << ggml_op_name(node->op) << ")");
  9949. ctx->semaphore_idx = 0;
  9950. ggml_tensor * src0 = node->src[0];
  9951. ggml_tensor * src1 = node->src[1];
  9952. ggml_tensor * src2 = node->src[2];
  9953. ggml_tensor * src3 = node->src[3];
  9954. if (node->op == GGML_OP_ADD) {
  9955. int next_node_idx = node_idx + 1 + ctx->num_additional_fused_ops;
  9956. if (next_node_idx < cgraph->n_nodes &&
  9957. cgraph->nodes[next_node_idx]->op == GGML_OP_RMS_NORM &&
  9958. cgraph->nodes[next_node_idx]->src[0] == cgraph->nodes[next_node_idx - 1] &&
  9959. ggml_nrows(cgraph->nodes[next_node_idx]) == 1 &&
  9960. ctx->device->add_rms_fusion) {
  9961. uint32_t size = ggml_vk_rms_partials_size(ctx, cgraph->nodes[node_idx]);
  9962. ctx->do_add_rms_partials_offset_calculation = true;
  9963. if (ctx->prealloc_size_add_rms_partials_offset + size <= ctx->prealloc_size_add_rms_partials) {
  9964. ctx->do_add_rms_partials = true;
  9965. }
  9966. }
  9967. }
  9968. vk_context compute_ctx;
  9969. if (ctx->compute_ctx.expired()) {
  9970. compute_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  9971. ctx->compute_ctx = compute_ctx;
  9972. ggml_vk_ctx_begin(ctx->device, compute_ctx);
  9973. } else {
  9974. compute_ctx = ctx->compute_ctx.lock();
  9975. }
  9976. {
  9977. // This logic detects dependencies between modes in the graph and calls ggml_vk_sync_buffers
  9978. // to synchronize them. This handles most "normal" synchronization when computing the graph, and when
  9979. // there is no auxiliary memory use, it shouldn't be necessary to call ggml_vk_sync_buffers
  9980. // outside of this logic. When a node uses one of the prealloc buffers for something like
  9981. // dequantization or split_k, additional synchronization is needed between those passes.
  9982. bool need_sync = false;
  9983. // Check whether "node" requires synchronization. The node requires synchronization if it
  9984. // overlaps in memory with another unsynchronized node and at least one of them is a write.
  9985. // Destination nodes are checked against both the written/read lists. Source nodes are only
  9986. // checked against the written list. Two nodes overlap in memory if they come from the same
  9987. // buffer and the tensor or view ranges overlap.
  9988. auto const &overlaps_unsynced = [&](const ggml_tensor *node, const std::vector<const ggml_tensor *> &unsynced_nodes) -> bool {
  9989. if (unsynced_nodes.size() == 0) {
  9990. return false;
  9991. }
  9992. auto n_base = vk_tensor_offset(node) + node->view_offs;
  9993. auto n_size = ggml_nbytes(node);
  9994. ggml_backend_vk_buffer_context * a_buf_ctx = (ggml_backend_vk_buffer_context *)node->buffer->context;
  9995. vk_buffer a_buf = a_buf_ctx->dev_buffer;
  9996. for (auto &other : unsynced_nodes) {
  9997. ggml_backend_vk_buffer_context * o_buf_ctx = (ggml_backend_vk_buffer_context *)other->buffer->context;
  9998. vk_buffer o_buf = o_buf_ctx->dev_buffer;
  9999. if (a_buf == o_buf) {
  10000. auto o_base = vk_tensor_offset(other) + other->view_offs;
  10001. auto o_size = ggml_nbytes(other);
  10002. if ((o_base <= n_base && n_base < o_base + o_size) ||
  10003. (n_base <= o_base && o_base < n_base + n_size)) {
  10004. return true;
  10005. }
  10006. }
  10007. }
  10008. return false;
  10009. };
  10010. // For all fused ops, check if the destination node or any of the source
  10011. // nodes require synchronization.
  10012. for (int32_t i = 0; i < ctx->num_additional_fused_ops + 1 && !need_sync; ++i) {
  10013. const ggml_tensor *cur_node = cgraph->nodes[node_idx + i];
  10014. // If the node actually writes to memory, then check if it needs to sync
  10015. if (ctx->fused_ops_write_mask & (1 << i)) {
  10016. if (overlaps_unsynced(cur_node, ctx->unsynced_nodes_read) || overlaps_unsynced(cur_node, ctx->unsynced_nodes_written)) {
  10017. need_sync = true;
  10018. break;
  10019. }
  10020. }
  10021. for (uint32_t j = 0; j < GGML_MAX_SRC; ++j) {
  10022. if (!cur_node->src[j]) {
  10023. continue;
  10024. }
  10025. if (overlaps_unsynced(cur_node->src[j], ctx->unsynced_nodes_written)) {
  10026. need_sync = true;
  10027. break;
  10028. }
  10029. }
  10030. }
  10031. #define ENABLE_SYNC_LOGGING 0
  10032. if (need_sync) {
  10033. #if ENABLE_SYNC_LOGGING
  10034. std::cerr << "sync" << std::endl;
  10035. #endif
  10036. ctx->unsynced_nodes_written.clear();
  10037. ctx->unsynced_nodes_read.clear();
  10038. ggml_vk_sync_buffers(ctx, compute_ctx);
  10039. }
  10040. // Add all fused nodes to the unsynchronized lists.
  10041. for (int32_t i = 0; i < ctx->num_additional_fused_ops + 1; ++i) {
  10042. const ggml_tensor *cur_node = cgraph->nodes[node_idx + i];
  10043. // Multiple outputs could be written, e.g. in topk_moe. Add them all to the list.
  10044. if (ctx->fused_ops_write_mask & (1 << i)) {
  10045. ctx->unsynced_nodes_written.push_back(cur_node);
  10046. }
  10047. for (uint32_t j = 0; j < GGML_MAX_SRC; ++j) {
  10048. if (!cur_node->src[j]) {
  10049. continue;
  10050. }
  10051. ctx->unsynced_nodes_read.push_back(cur_node->src[j]);
  10052. }
  10053. }
  10054. }
  10055. #if ENABLE_SYNC_LOGGING
  10056. for (int i = 0; i < ctx->num_additional_fused_ops + 1; ++i) {
  10057. auto *n = cgraph->nodes[node_idx + i];
  10058. std::cerr << node_idx + i << " " << ggml_op_name(n->op) << " " << n->name;
  10059. if (n->op == GGML_OP_GLU) {
  10060. std::cerr << " " << ggml_glu_op_name(ggml_get_glu_op(n)) << " " << (n->src[1] ? "split" : "single") << " ";
  10061. }
  10062. if (n->op == GGML_OP_ROPE) {
  10063. const int mode = ((const int32_t *) n->op_params)[2];
  10064. std::cerr << " rope mode: " << mode;
  10065. }
  10066. std::cerr << std::endl;
  10067. }
  10068. #endif
  10069. switch (node->op) {
  10070. case GGML_OP_REPEAT:
  10071. ggml_vk_repeat(ctx, compute_ctx, src0, node);
  10072. break;
  10073. case GGML_OP_REPEAT_BACK:
  10074. ggml_vk_repeat_back(ctx, compute_ctx, src0, node);
  10075. break;
  10076. case GGML_OP_ACC:
  10077. ggml_vk_acc(ctx, compute_ctx, src0, src1, node);
  10078. break;
  10079. case GGML_OP_GET_ROWS:
  10080. ggml_vk_get_rows(ctx, compute_ctx, src0, src1, node);
  10081. break;
  10082. case GGML_OP_ADD:
  10083. if (ctx->num_additional_fused_ops) {
  10084. ggml_vk_multi_add(ctx, compute_ctx, cgraph, node_idx);
  10085. } else {
  10086. ggml_vk_add(ctx, compute_ctx, src0, src1, node);
  10087. }
  10088. break;
  10089. case GGML_OP_SUB:
  10090. ggml_vk_sub(ctx, compute_ctx, src0, src1, node);
  10091. break;
  10092. case GGML_OP_MUL:
  10093. ggml_vk_mul(ctx, compute_ctx, src0, src1, node);
  10094. break;
  10095. case GGML_OP_DIV:
  10096. ggml_vk_div(ctx, compute_ctx, src0, src1, node);
  10097. break;
  10098. case GGML_OP_ADD_ID:
  10099. ggml_vk_add_id(ctx, compute_ctx, src0, src1, src2, node);
  10100. break;
  10101. case GGML_OP_CONCAT:
  10102. ggml_vk_concat(ctx, compute_ctx, src0, src1, node);
  10103. break;
  10104. case GGML_OP_UPSCALE:
  10105. ggml_vk_upscale(ctx, compute_ctx, src0, node);
  10106. break;
  10107. case GGML_OP_ADD1:
  10108. ggml_vk_add1(ctx, compute_ctx, src0, src1, node);
  10109. break;
  10110. case GGML_OP_ARANGE:
  10111. ggml_vk_arange(ctx, compute_ctx, node);
  10112. break;
  10113. case GGML_OP_FILL:
  10114. ggml_vk_fill(ctx, compute_ctx, node);
  10115. break;
  10116. case GGML_OP_SCALE:
  10117. ggml_vk_scale(ctx, compute_ctx, src0, node);
  10118. break;
  10119. case GGML_OP_SQR:
  10120. ggml_vk_sqr(ctx, compute_ctx, src0, node);
  10121. break;
  10122. case GGML_OP_SQRT:
  10123. ggml_vk_sqrt(ctx, compute_ctx, src0, node);
  10124. break;
  10125. case GGML_OP_SIN:
  10126. ggml_vk_sin(ctx, compute_ctx, src0, node);
  10127. break;
  10128. case GGML_OP_COS:
  10129. ggml_vk_cos(ctx, compute_ctx, src0, node);
  10130. break;
  10131. case GGML_OP_LOG:
  10132. ggml_vk_log(ctx, compute_ctx, src0, node);
  10133. break;
  10134. case GGML_OP_TRI:
  10135. ggml_vk_tri(ctx, compute_ctx, src0, node);
  10136. break;
  10137. case GGML_OP_CLAMP:
  10138. ggml_vk_clamp(ctx, compute_ctx, src0, node);
  10139. break;
  10140. case GGML_OP_PAD:
  10141. ggml_vk_pad(ctx, compute_ctx, src0, node);
  10142. break;
  10143. case GGML_OP_ROLL:
  10144. ggml_vk_roll(ctx, compute_ctx, src0, node);
  10145. break;
  10146. case GGML_OP_CPY:
  10147. case GGML_OP_CONT:
  10148. case GGML_OP_DUP:
  10149. ggml_vk_cpy(ctx, compute_ctx, src0, node);
  10150. break;
  10151. case GGML_OP_SET_ROWS:
  10152. ggml_vk_set_rows(ctx, compute_ctx, src0, src1, node);
  10153. break;
  10154. case GGML_OP_SILU_BACK:
  10155. ggml_vk_silu_back(ctx, compute_ctx, src0, src1, node);
  10156. break;
  10157. case GGML_OP_NORM:
  10158. ggml_vk_norm(ctx, compute_ctx, src0, node);
  10159. break;
  10160. case GGML_OP_GROUP_NORM:
  10161. ggml_vk_group_norm(ctx, compute_ctx, src0, node);
  10162. break;
  10163. case GGML_OP_RMS_NORM:
  10164. ggml_vk_rms_norm(ctx, compute_ctx, cgraph, node_idx, (float *)node->op_params);
  10165. break;
  10166. case GGML_OP_RMS_NORM_BACK:
  10167. ggml_vk_rms_norm_back(ctx, compute_ctx, src0, src1, node);
  10168. break;
  10169. case GGML_OP_L2_NORM:
  10170. ggml_vk_l2_norm(ctx, compute_ctx, src0, node);
  10171. break;
  10172. case GGML_OP_UNARY:
  10173. switch (ggml_get_unary_op(node)) {
  10174. case GGML_UNARY_OP_EXP:
  10175. case GGML_UNARY_OP_SILU:
  10176. case GGML_UNARY_OP_GELU:
  10177. case GGML_UNARY_OP_GELU_ERF:
  10178. case GGML_UNARY_OP_GELU_QUICK:
  10179. case GGML_UNARY_OP_RELU:
  10180. case GGML_UNARY_OP_NEG:
  10181. case GGML_UNARY_OP_TANH:
  10182. case GGML_UNARY_OP_SIGMOID:
  10183. case GGML_UNARY_OP_HARDSIGMOID:
  10184. case GGML_UNARY_OP_HARDSWISH:
  10185. case GGML_UNARY_OP_ABS:
  10186. case GGML_UNARY_OP_SOFTPLUS:
  10187. case GGML_UNARY_OP_STEP:
  10188. case GGML_UNARY_OP_ROUND:
  10189. case GGML_UNARY_OP_CEIL:
  10190. case GGML_UNARY_OP_FLOOR:
  10191. case GGML_UNARY_OP_TRUNC:
  10192. ggml_vk_unary(ctx, compute_ctx, src0, node);
  10193. break;
  10194. default:
  10195. return false;
  10196. }
  10197. break;
  10198. case GGML_OP_GLU:
  10199. switch (ggml_get_glu_op(node)) {
  10200. case GGML_GLU_OP_GEGLU:
  10201. case GGML_GLU_OP_REGLU:
  10202. case GGML_GLU_OP_SWIGLU:
  10203. case GGML_GLU_OP_SWIGLU_OAI:
  10204. case GGML_GLU_OP_GEGLU_ERF:
  10205. case GGML_GLU_OP_GEGLU_QUICK:
  10206. ggml_vk_glu(ctx, compute_ctx, src0, src1, node);
  10207. break;
  10208. default:
  10209. return false;
  10210. }
  10211. break;
  10212. case GGML_OP_DIAG_MASK_INF:
  10213. ggml_vk_diag_mask_inf(ctx, compute_ctx, src0, node);
  10214. break;
  10215. case GGML_OP_SOFT_MAX:
  10216. if (ctx->num_additional_fused_ops) {
  10217. ggml_vk_topk_moe(ctx, compute_ctx, cgraph, node_idx);
  10218. } else {
  10219. ggml_vk_soft_max(ctx, compute_ctx, src0, src1, src2, node);
  10220. }
  10221. break;
  10222. case GGML_OP_SOFT_MAX_BACK:
  10223. ggml_vk_soft_max_back(ctx, compute_ctx, src0, src1, node);
  10224. break;
  10225. case GGML_OP_ROPE:
  10226. ggml_vk_rope(ctx, compute_ctx, cgraph, node_idx, false);
  10227. break;
  10228. case GGML_OP_ROPE_BACK:
  10229. ggml_vk_rope(ctx, compute_ctx, cgraph, node_idx, true);
  10230. break;
  10231. case GGML_OP_ARGSORT:
  10232. if (ctx->num_additional_fused_ops) {
  10233. ggml_vk_topk_moe(ctx, compute_ctx, cgraph, node_idx);
  10234. } else {
  10235. ggml_vk_argsort(ctx, compute_ctx, src0, node);
  10236. }
  10237. break;
  10238. case GGML_OP_TOP_K:
  10239. ggml_vk_topk(ctx, compute_ctx, src0, node);
  10240. break;
  10241. case GGML_OP_SUM:
  10242. ggml_vk_sum(ctx, compute_ctx, src0, node);
  10243. break;
  10244. case GGML_OP_SUM_ROWS:
  10245. ggml_vk_sum_rows(ctx, compute_ctx, src0, node);
  10246. break;
  10247. case GGML_OP_CUMSUM:
  10248. ggml_vk_cumsum(ctx, compute_ctx, src0, node);
  10249. break;
  10250. case GGML_OP_MEAN:
  10251. ggml_vk_mean(ctx, compute_ctx, src0, node);
  10252. break;
  10253. case GGML_OP_ARGMAX:
  10254. ggml_vk_argmax(ctx, compute_ctx, src0, node);
  10255. break;
  10256. case GGML_OP_COUNT_EQUAL:
  10257. ggml_vk_count_equal(ctx, compute_ctx, src0, src1, node);
  10258. break;
  10259. case GGML_OP_SOLVE_TRI:
  10260. ggml_vk_solve_tri(ctx, compute_ctx, src0, src1, node);
  10261. break;
  10262. case GGML_OP_IM2COL:
  10263. ggml_vk_im2col(ctx, compute_ctx, src0, src1, node);
  10264. break;
  10265. case GGML_OP_IM2COL_3D:
  10266. ggml_vk_im2col_3d(ctx, compute_ctx, src0, src1, node);
  10267. break;
  10268. case GGML_OP_TIMESTEP_EMBEDDING:
  10269. ggml_vk_timestep_embedding(ctx, compute_ctx, src0, node);
  10270. break;
  10271. case GGML_OP_CONV_TRANSPOSE_1D:
  10272. ggml_vk_conv_transpose_1d(ctx, compute_ctx, src0, src1, node);
  10273. break;
  10274. case GGML_OP_POOL_2D:
  10275. ggml_vk_pool_2d(ctx, compute_ctx, src0, node);
  10276. break;
  10277. case GGML_OP_CONV_2D:
  10278. ggml_vk_conv_2d(ctx, compute_ctx, src0, src1, node);
  10279. break;
  10280. case GGML_OP_CONV_TRANSPOSE_2D:
  10281. ggml_vk_conv_transpose_2d(ctx, compute_ctx, src0, src1, node);
  10282. break;
  10283. case GGML_OP_CONV_2D_DW:
  10284. ggml_vk_conv_2d_dw(ctx, compute_ctx, src0, src1, node);
  10285. break;
  10286. case GGML_OP_LEAKY_RELU:
  10287. ggml_vk_leaky_relu(ctx, compute_ctx, src0, node);
  10288. break;
  10289. case GGML_OP_MUL_MAT:
  10290. ggml_vk_mul_mat(ctx, compute_ctx, cgraph, node_idx);
  10291. break;
  10292. case GGML_OP_MUL_MAT_ID:
  10293. ggml_vk_mul_mat_id(ctx, compute_ctx, cgraph, node_idx);
  10294. break;
  10295. case GGML_OP_FLASH_ATTN_EXT:
  10296. ggml_vk_flash_attn(ctx, compute_ctx, src0, src1, src2, src3, node->src[4], node);
  10297. break;
  10298. case GGML_OP_RWKV_WKV6:
  10299. ggml_vk_rwkv_wkv6(ctx, compute_ctx, node);
  10300. break;
  10301. case GGML_OP_RWKV_WKV7:
  10302. ggml_vk_rwkv_wkv7(ctx, compute_ctx, node);
  10303. break;
  10304. case GGML_OP_SSM_SCAN:
  10305. ggml_vk_ssm_scan(ctx, compute_ctx, node);
  10306. break;
  10307. case GGML_OP_SSM_CONV:
  10308. ggml_vk_ssm_conv(ctx, compute_ctx, node);
  10309. break;
  10310. case GGML_OP_OPT_STEP_ADAMW:
  10311. ggml_vk_opt_step_adamw(ctx, compute_ctx, node);
  10312. break;
  10313. case GGML_OP_OPT_STEP_SGD:
  10314. ggml_vk_opt_step_sgd(ctx, compute_ctx, src0, src1, src2, node);
  10315. break;
  10316. default:
  10317. return false;
  10318. }
  10319. ctx->tensor_ctxs[node_idx] = compute_ctx;
  10320. #if defined(GGML_VULKAN_CHECK_RESULTS)
  10321. // Force context reset on each node so that each tensor ends up in its own context
  10322. // and can be run and compared to its CPU equivalent separately
  10323. last_node = true;
  10324. #endif
  10325. if (submit || last_node) {
  10326. ggml_vk_ctx_end(compute_ctx);
  10327. // TODO probably it'd be better to pass a exit_node flag to ggml_vk_compute_forward
  10328. if (last_node) {
  10329. compute_ctx->exit_tensor_idx = node_idx_begin;
  10330. }
  10331. else {
  10332. compute_ctx->exit_tensor_idx = -1;
  10333. }
  10334. ctx->compute_ctx.reset();
  10335. ggml_vk_compute_forward(ctx, cgraph, node_begin, node_idx_begin, almost_ready);
  10336. }
  10337. return true;
  10338. }
  10339. static void ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, ggml_tensor * tensor, int tensor_idx, bool almost_ready = false) {
  10340. GGML_UNUSED(cgraph);
  10341. GGML_UNUSED(tensor);
  10342. VK_LOG_DEBUG("ggml_vk_compute_forward(" << tensor << ", name=" << tensor->name << ", op=" << ggml_op_name(tensor->op) << ", type=" << tensor->type << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << ", view_src=" << tensor->view_src << ", view_offs=" << tensor->view_offs << ")");
  10343. vk_context subctx = ctx->tensor_ctxs[tensor_idx].lock();
  10344. // Only run if ctx hasn't been submitted yet
  10345. if (!subctx->seqs.empty()) {
  10346. #ifdef GGML_VULKAN_CHECK_RESULTS
  10347. ggml_vk_check_results_0(ctx, cgraph, tensor_idx);
  10348. #endif
  10349. // Do staging buffer copies
  10350. for (auto& cpy : subctx->in_memcpys) {
  10351. memcpy(cpy.dst, cpy.src, cpy.n);
  10352. }
  10353. for (auto& mset : subctx->memsets) {
  10354. memset(mset.dst, mset.val, mset.n);
  10355. }
  10356. if (almost_ready && !ctx->almost_ready_fence_pending) {
  10357. ggml_vk_submit(subctx, ctx->almost_ready_fence);
  10358. ctx->almost_ready_fence_pending = true;
  10359. } else {
  10360. ggml_vk_submit(subctx, {});
  10361. }
  10362. ctx->submit_pending = true;
  10363. #ifdef GGML_VULKAN_CHECK_RESULTS
  10364. ggml_vk_synchronize(ctx);
  10365. ggml_vk_check_results_1(ctx, cgraph, tensor_idx);
  10366. #endif
  10367. }
  10368. if (tensor_idx == subctx->exit_tensor_idx) {
  10369. // Do staging buffer copies
  10370. for (auto& cpy : subctx->out_memcpys) {
  10371. memcpy(cpy.dst, cpy.src, cpy.n);
  10372. }
  10373. subctx->in_memcpys.clear();
  10374. subctx->out_memcpys.clear();
  10375. subctx->memsets.clear();
  10376. }
  10377. }
  10378. // Clean up after graph processing is done
  10379. static void ggml_vk_graph_cleanup(ggml_backend_vk_context * ctx) {
  10380. VK_LOG_DEBUG("ggml_vk_graph_cleanup()");
  10381. ctx->prealloc_y_last_pipeline_used = {};
  10382. ctx->unsynced_nodes_written.clear();
  10383. ctx->unsynced_nodes_read.clear();
  10384. ctx->prealloc_x_need_sync = ctx->prealloc_y_need_sync = ctx->prealloc_split_k_need_sync = false;
  10385. ggml_vk_command_pool_cleanup(ctx->device, ctx->compute_cmd_pool);
  10386. ggml_vk_command_pool_cleanup(ctx->device, ctx->transfer_cmd_pool);
  10387. for (size_t i = 0; i < ctx->gc.semaphores.size(); i++) {
  10388. ctx->device->device.destroySemaphore({ ctx->gc.semaphores[i].s });
  10389. }
  10390. ctx->gc.semaphores.clear();
  10391. for (size_t i = 0; i < ctx->gc.tl_semaphores.size(); i++) {
  10392. ctx->device->device.destroySemaphore({ ctx->gc.tl_semaphores[i].s });
  10393. }
  10394. ctx->gc.tl_semaphores.clear();
  10395. ctx->semaphore_idx = 0;
  10396. ctx->event_idx = 0;
  10397. for (auto& event : ctx->gc.events) {
  10398. ctx->device->device.resetEvent(event);
  10399. }
  10400. ctx->tensor_ctxs.clear();
  10401. ctx->gc.contexts.clear();
  10402. ctx->pipeline_descriptor_set_requirements = 0;
  10403. ctx->descriptor_set_idx = 0;
  10404. }
  10405. // Clean up on backend free
  10406. static void ggml_vk_cleanup(ggml_backend_vk_context * ctx) {
  10407. VK_LOG_DEBUG("ggml_vk_cleanup(" << ctx->name << ")");
  10408. // discard any unsubmitted command buffers
  10409. ctx->transfer_ctx.reset();
  10410. // wait for any pending command buffers to finish
  10411. ggml_vk_synchronize(ctx);
  10412. ggml_vk_graph_cleanup(ctx);
  10413. ggml_vk_destroy_buffer(ctx->prealloc_x);
  10414. ggml_vk_destroy_buffer(ctx->prealloc_y);
  10415. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  10416. ggml_vk_destroy_buffer(ctx->prealloc_add_rms_partials);
  10417. ggml_vk_destroy_buffer(ctx->sync_staging);
  10418. ctx->prealloc_y_last_pipeline_used = nullptr;
  10419. ctx->prealloc_size_x = 0;
  10420. ctx->prealloc_size_y = 0;
  10421. ctx->prealloc_size_split_k = 0;
  10422. for (auto& event : ctx->gc.events) {
  10423. ctx->device->device.destroyEvent(event);
  10424. }
  10425. ctx->gc.events.clear();
  10426. ctx->device->device.destroyFence(ctx->fence);
  10427. ctx->device->device.destroyFence(ctx->almost_ready_fence);
  10428. for (auto& pool : ctx->descriptor_pools) {
  10429. ctx->device->device.destroyDescriptorPool(pool);
  10430. }
  10431. ctx->descriptor_pools.clear();
  10432. ctx->descriptor_sets.clear();
  10433. ctx->compute_cmd_pool.destroy(ctx->device->device);
  10434. ctx->transfer_cmd_pool.destroy(ctx->device->device);
  10435. }
  10436. static int ggml_vk_get_device_count() {
  10437. ggml_vk_instance_init();
  10438. return vk_instance.device_indices.size();
  10439. }
  10440. static void ggml_vk_get_device_description(int device, char * description, size_t description_size) {
  10441. ggml_vk_instance_init();
  10442. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  10443. vk::PhysicalDeviceProperties props;
  10444. devices[device].getProperties(&props);
  10445. snprintf(description, description_size, "%s", props.deviceName.data());
  10446. }
  10447. // backend interface
  10448. #define UNUSED GGML_UNUSED
  10449. // device backend
  10450. static bool ggml_backend_buffer_is_vk(ggml_backend_buffer_t buffer) {
  10451. return buffer->buft->iface.get_name == ggml_backend_vk_buffer_type_name;
  10452. }
  10453. static void ggml_backend_vk_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  10454. VK_LOG_MEMORY("ggml_backend_vk_buffer_free_buffer()");
  10455. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  10456. ggml_vk_destroy_buffer(ctx->dev_buffer);
  10457. delete ctx;
  10458. }
  10459. static void * ggml_backend_vk_buffer_get_base(ggml_backend_buffer_t buffer) {
  10460. return vk_ptr_base;
  10461. UNUSED(buffer);
  10462. }
  10463. static enum ggml_status ggml_backend_vk_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
  10464. VK_LOG_DEBUG("ggml_backend_vk_buffer_init_tensor(" << buffer << " (" << buffer->context << "), " << tensor << ")");
  10465. if (tensor->view_src != nullptr) {
  10466. GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft);
  10467. }
  10468. return GGML_STATUS_SUCCESS;
  10469. }
  10470. static void ggml_backend_vk_buffer_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
  10471. VK_LOG_DEBUG("ggml_backend_vk_buffer_memset_tensor(" << buffer << ", " << tensor << ", " << value << ", " << offset << ", " << size << ")");
  10472. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  10473. vk_buffer buf = buf_ctx->dev_buffer;
  10474. uint32_t val32 = (uint32_t)value * 0x01010101;
  10475. ggml_vk_buffer_memset(buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, val32, size);
  10476. }
  10477. static void ggml_backend_vk_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  10478. VK_LOG_DEBUG("ggml_backend_vk_buffer_set_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")");
  10479. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  10480. vk_buffer buf = buf_ctx->dev_buffer;
  10481. ggml_vk_buffer_write(buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
  10482. }
  10483. static void ggml_backend_vk_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  10484. VK_LOG_DEBUG("ggml_backend_vk_buffer_get_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")");
  10485. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  10486. vk_buffer buf = buf_ctx->dev_buffer;
  10487. ggml_vk_buffer_read(buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
  10488. }
  10489. static bool ggml_backend_vk_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
  10490. if (ggml_backend_buffer_is_vk(src->buffer)) {
  10491. ggml_backend_vk_buffer_context * src_buf_ctx = (ggml_backend_vk_buffer_context *)src->buffer->context;
  10492. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  10493. vk_buffer src_buf = src_buf_ctx->dev_buffer;
  10494. vk_buffer dst_buf = dst_buf_ctx->dev_buffer;
  10495. ggml_vk_buffer_copy(dst_buf, vk_tensor_offset(dst) + dst->view_offs, src_buf, vk_tensor_offset(src) + src->view_offs, ggml_nbytes(src));
  10496. return true;
  10497. }
  10498. return false;
  10499. UNUSED(buffer);
  10500. }
  10501. static void ggml_backend_vk_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  10502. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  10503. ggml_vk_buffer_memset(ctx->dev_buffer, 0, value, buffer->size);
  10504. }
  10505. static ggml_backend_buffer_i ggml_backend_vk_buffer_interface = {
  10506. /* .free_buffer = */ ggml_backend_vk_buffer_free_buffer,
  10507. /* .get_base = */ ggml_backend_vk_buffer_get_base,
  10508. /* .init_tensor = */ ggml_backend_vk_buffer_init_tensor,
  10509. /* .memset_tensor = */ ggml_backend_vk_buffer_memset_tensor,
  10510. /* .set_tensor = */ ggml_backend_vk_buffer_set_tensor,
  10511. /* .get_tensor = */ ggml_backend_vk_buffer_get_tensor,
  10512. /* .cpy_tensor = */ ggml_backend_vk_buffer_cpy_tensor,
  10513. /* .clear = */ ggml_backend_vk_buffer_clear,
  10514. /* .reset = */ NULL,
  10515. };
  10516. // vk buffer type
  10517. static const char * ggml_backend_vk_buffer_type_name(ggml_backend_buffer_type_t buft) {
  10518. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *)buft->context;
  10519. return ctx->name.c_str();
  10520. }
  10521. static ggml_backend_buffer_t ggml_backend_vk_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  10522. VK_LOG_MEMORY("ggml_backend_vk_buffer_type_alloc_buffer(" << size << ")");
  10523. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  10524. vk_buffer dev_buffer = nullptr;
  10525. try {
  10526. dev_buffer = ggml_vk_create_buffer_device(ctx->device, size);
  10527. } catch (const vk::SystemError& e) {
  10528. return nullptr;
  10529. }
  10530. ggml_backend_vk_buffer_context * bufctx = new ggml_backend_vk_buffer_context(ctx->device, std::move(dev_buffer), ctx->name);
  10531. return ggml_backend_buffer_init(buft, ggml_backend_vk_buffer_interface, bufctx, size);
  10532. }
  10533. static size_t ggml_backend_vk_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  10534. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  10535. return ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  10536. }
  10537. static size_t ggml_backend_vk_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
  10538. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  10539. return ctx->device->suballocation_block_size;
  10540. }
  10541. static size_t ggml_backend_vk_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
  10542. return ggml_nbytes(tensor);
  10543. UNUSED(buft);
  10544. }
  10545. ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num) {
  10546. ggml_vk_instance_init();
  10547. VK_LOG_DEBUG("ggml_backend_vk_buffer_type(" << dev_num << ")");
  10548. vk_device dev = ggml_vk_get_device(dev_num);
  10549. return &dev->buffer_type;
  10550. }
  10551. // host buffer type
  10552. static const char * ggml_backend_vk_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
  10553. return GGML_VK_NAME "_Host";
  10554. UNUSED(buft);
  10555. }
  10556. static const char * ggml_backend_vk_host_buffer_name(ggml_backend_buffer_t buffer) {
  10557. return GGML_VK_NAME "_Host";
  10558. UNUSED(buffer);
  10559. }
  10560. static void ggml_backend_vk_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  10561. VK_LOG_MEMORY("ggml_backend_vk_host_buffer_free_buffer()");
  10562. ggml_vk_host_free(vk_instance.devices[0], buffer->context);
  10563. }
  10564. static ggml_backend_buffer_t ggml_backend_vk_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  10565. VK_LOG_MEMORY("ggml_backend_vk_host_buffer_type_alloc_buffer(" << size << ")");
  10566. size += 32; // Behave like the CPU buffer type
  10567. void * ptr = nullptr;
  10568. try {
  10569. ptr = ggml_vk_host_malloc(vk_instance.devices[0], size);
  10570. } catch (vk::SystemError& e) {
  10571. GGML_LOG_WARN("ggml_vulkan: Failed to allocate pinned memory (%s)\n", e.what());
  10572. // fallback to cpu buffer
  10573. return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
  10574. }
  10575. ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
  10576. buffer->buft = buft;
  10577. buffer->iface.free_buffer = ggml_backend_vk_host_buffer_free_buffer;
  10578. return buffer;
  10579. UNUSED(buft);
  10580. }
  10581. static size_t ggml_backend_vk_host_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  10582. return vk_instance.devices[0]->properties.limits.minMemoryMapAlignment;
  10583. UNUSED(buft);
  10584. }
  10585. static size_t ggml_backend_vk_host_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
  10586. return vk_instance.devices[0]->suballocation_block_size;
  10587. UNUSED(buft);
  10588. }
  10589. // Should be changed to return device-specific host buffer type
  10590. // but that probably requires changes in llama.cpp
  10591. ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type() {
  10592. static struct ggml_backend_buffer_type ggml_backend_vk_buffer_type_host = {
  10593. /* .iface = */ {
  10594. /* .get_name = */ ggml_backend_vk_host_buffer_type_name,
  10595. /* .alloc_buffer = */ ggml_backend_vk_host_buffer_type_alloc_buffer,
  10596. /* .get_alignment = */ ggml_backend_vk_host_buffer_type_get_alignment,
  10597. /* .get_max_size = */ ggml_backend_vk_host_buffer_type_get_max_size,
  10598. /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
  10599. /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
  10600. },
  10601. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_vk_reg(), 0),
  10602. /* .context = */ nullptr,
  10603. };
  10604. // Make sure device 0 is initialized
  10605. ggml_vk_instance_init();
  10606. ggml_vk_get_device(0);
  10607. return &ggml_backend_vk_buffer_type_host;
  10608. }
  10609. // backend
  10610. static const char * ggml_backend_vk_name(ggml_backend_t backend) {
  10611. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  10612. return ctx->name.c_str();
  10613. }
  10614. static void ggml_backend_vk_free(ggml_backend_t backend) {
  10615. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  10616. VK_LOG_DEBUG("ggml_backend_vk_free(" << ctx->name << ")");
  10617. ggml_vk_cleanup(ctx);
  10618. delete ctx;
  10619. delete backend;
  10620. }
  10621. static ggml_backend_buffer_type_t ggml_backend_vk_get_default_buffer_type(ggml_backend_t backend) {
  10622. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  10623. return &ctx->device->buffer_type;
  10624. }
  10625. static void ggml_backend_vk_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  10626. VK_LOG_DEBUG("ggml_backend_vk_set_tensor_async(" << size << ")");
  10627. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  10628. GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
  10629. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
  10630. vk_context transfer_ctx;
  10631. if (ctx->transfer_ctx.expired()) {
  10632. // Initialize new transfer context
  10633. transfer_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  10634. ctx->transfer_ctx = transfer_ctx;
  10635. ggml_vk_ctx_begin(ctx->device, transfer_ctx);
  10636. } else {
  10637. transfer_ctx = ctx->transfer_ctx.lock();
  10638. }
  10639. vk_buffer buf = buf_ctx->dev_buffer;
  10640. ggml_vk_buffer_write_async(transfer_ctx, buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
  10641. }
  10642. static void ggml_backend_vk_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  10643. VK_LOG_DEBUG("ggml_backend_vk_get_tensor_async(" << size << ")");
  10644. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  10645. GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
  10646. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
  10647. vk_context transfer_ctx;
  10648. if (ctx->transfer_ctx.expired()) {
  10649. // Initialize new transfer context
  10650. transfer_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  10651. ctx->transfer_ctx = transfer_ctx;
  10652. ggml_vk_ctx_begin(ctx->device, transfer_ctx);
  10653. } else {
  10654. transfer_ctx = ctx->transfer_ctx.lock();
  10655. }
  10656. vk_buffer buf = buf_ctx->dev_buffer;
  10657. auto src_offset = vk_tensor_offset(tensor) + tensor->view_offs + offset;
  10658. bool ret = ggml_vk_buffer_read_async(transfer_ctx, buf, src_offset, data, size);
  10659. // If that failed, copy synchronously through a staging buffer
  10660. if (!ret) {
  10661. ggml_vk_ensure_sync_staging_buffer(ctx, size);
  10662. ggml_vk_sync_buffers(nullptr, transfer_ctx);
  10663. vk::BufferCopy buffer_cpy;
  10664. buffer_cpy.srcOffset = src_offset;
  10665. buffer_cpy.dstOffset = 0;
  10666. buffer_cpy.size = size;
  10667. transfer_ctx->s->buffer.copyBuffer(buf->buffer, ctx->sync_staging->buffer, { buffer_cpy });
  10668. deferred_memcpy(data, ctx->sync_staging->ptr, size, &transfer_ctx->out_memcpys);
  10669. ggml_vk_synchronize(ctx);
  10670. }
  10671. }
  10672. static bool ggml_backend_vk_cpy_tensor_async(ggml_backend_t backend, const ggml_tensor * src, ggml_tensor * dst) {
  10673. VK_LOG_DEBUG("ggml_backend_vk_cpy_tensor_async()");
  10674. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  10675. if ((dst->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || dst->buffer->buft == ggml_backend_vk_host_buffer_type()) && ggml_backend_buffer_is_vk(src->buffer)) {
  10676. ggml_backend_vk_buffer_context * src_buf_ctx = (ggml_backend_vk_buffer_context *)src->buffer->context;
  10677. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  10678. vk_context transfer_ctx;
  10679. if (ctx->transfer_ctx.expired()) {
  10680. // Initialize new transfer context
  10681. transfer_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  10682. ctx->transfer_ctx = transfer_ctx;
  10683. ggml_vk_ctx_begin(ctx->device, transfer_ctx);
  10684. } else {
  10685. transfer_ctx = ctx->transfer_ctx.lock();
  10686. }
  10687. vk_buffer src_buf = src_buf_ctx->dev_buffer;
  10688. vk_buffer dst_buf = dst_buf_ctx->dev_buffer;
  10689. ggml_vk_buffer_copy_async(transfer_ctx, dst_buf, vk_tensor_offset(dst) + dst->view_offs, src_buf, vk_tensor_offset(src) + src->view_offs, ggml_nbytes(src));
  10690. return true;
  10691. }
  10692. return false;
  10693. }
  10694. static void ggml_vk_synchronize(ggml_backend_vk_context * ctx) {
  10695. VK_LOG_DEBUG("ggml_vk_synchronize()");
  10696. bool do_transfer = !ctx->transfer_ctx.expired();
  10697. vk_context transfer_ctx;
  10698. if (do_transfer) {
  10699. transfer_ctx = ctx->transfer_ctx.lock();
  10700. ggml_vk_ctx_end(transfer_ctx);
  10701. for (auto& cpy : transfer_ctx->in_memcpys) {
  10702. memcpy(cpy.dst, cpy.src, cpy.n);
  10703. }
  10704. ggml_vk_submit(transfer_ctx, {});
  10705. ctx->submit_pending = true;
  10706. }
  10707. if (ctx->submit_pending) {
  10708. {
  10709. std::lock_guard<std::mutex> guard(queue_mutex);
  10710. ctx->device->compute_queue.queue.submit({}, ctx->fence);
  10711. }
  10712. ggml_vk_wait_for_fence(ctx);
  10713. ctx->submit_pending = false;
  10714. }
  10715. if (do_transfer) {
  10716. for (auto& cpy : transfer_ctx->out_memcpys) {
  10717. memcpy(cpy.dst, cpy.src, cpy.n);
  10718. }
  10719. ctx->transfer_ctx.reset();
  10720. }
  10721. }
  10722. static void ggml_backend_vk_synchronize(ggml_backend_t backend) {
  10723. VK_LOG_DEBUG("ggml_backend_vk_synchronize()");
  10724. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  10725. ggml_vk_synchronize(ctx);
  10726. ggml_vk_graph_cleanup(ctx);
  10727. }
  10728. static bool ggml_vk_is_empty(ggml_tensor * node) {
  10729. return ggml_is_empty(node) || node->op == GGML_OP_NONE || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE;
  10730. }
  10731. static bool ggml_vk_can_fuse(const ggml_backend_vk_context * ctx, const struct ggml_cgraph * cgraph, int node_idx, std::initializer_list<enum ggml_op> ops) {
  10732. if (!ggml_can_fuse(cgraph, node_idx, ops)) {
  10733. return false;
  10734. }
  10735. if (ops.size() == 2 && ops.begin()[0] == GGML_OP_RMS_NORM && ops.begin()[1] == GGML_OP_MUL) {
  10736. // additional constraints specific to this fusion
  10737. const ggml_tensor *rms_norm = cgraph->nodes[node_idx];
  10738. const ggml_tensor *mul = cgraph->nodes[node_idx + 1];
  10739. GGML_ASSERT(rms_norm->src[0]->type == GGML_TYPE_F32);
  10740. GGML_ASSERT(rms_norm->type == GGML_TYPE_F32);
  10741. // rms_norm only supports f32
  10742. if (mul->src[0]->type != GGML_TYPE_F32 ||
  10743. mul->src[1]->type != GGML_TYPE_F32 ||
  10744. mul->type != GGML_TYPE_F32) {
  10745. return false;
  10746. }
  10747. // if rms_norm is the B operand, then we don't handle broadcast
  10748. if (rms_norm == mul->src[1] &&
  10749. !ggml_are_same_shape(mul->src[0], rms_norm)) {
  10750. return false;
  10751. }
  10752. // rms_norm shader assumes contiguous rows
  10753. if (!ggml_is_contiguous_rows(mul->src[0]) || !ggml_is_contiguous_rows(mul->src[1])) {
  10754. return false;
  10755. }
  10756. }
  10757. auto const &mm_add_ok = [&](const ggml_tensor *mul, const ggml_tensor *add) {
  10758. const ggml_tensor *bias = add->src[0] == mul ? add->src[1] : add->src[0];
  10759. // mat-vec only
  10760. if (ggml_nrows(mul) != 1) {
  10761. return false;
  10762. }
  10763. // shaders assume the types match
  10764. if (mul->type != bias->type) {
  10765. return false;
  10766. }
  10767. // shaders reuse the D shape for bias
  10768. if (!ggml_are_same_shape(mul, bias) ||
  10769. !ggml_are_same_stride(mul, bias)) {
  10770. return false;
  10771. }
  10772. // unaligned bias isn't handled
  10773. if (get_misalign_bytes(ctx, bias) != 0) {
  10774. return false;
  10775. }
  10776. return true;
  10777. };
  10778. if ((ops.size() == 2 || ops.size() == 3) && ops.begin()[0] == GGML_OP_MUL_MAT && ops.begin()[1] == GGML_OP_ADD) {
  10779. // additional constraints specific to this fusion
  10780. const ggml_tensor *mul = cgraph->nodes[node_idx];
  10781. const ggml_tensor *add = cgraph->nodes[node_idx + 1];
  10782. if (!mm_add_ok(mul, add)) {
  10783. return false;
  10784. }
  10785. if (ops.size() == 3) {
  10786. if (ops.begin()[2] != GGML_OP_ADD) {
  10787. return false;
  10788. }
  10789. if (!mm_add_ok(add, cgraph->nodes[node_idx + 2])) {
  10790. return false;
  10791. }
  10792. }
  10793. }
  10794. auto const &mmid_mul_ok = [&](const ggml_tensor *mmid, const ggml_tensor *mul) {
  10795. const ggml_tensor *scale = mul->src[1];
  10796. if (mmid != mul->src[0]) {
  10797. return false;
  10798. }
  10799. // mat-vec only
  10800. if (!ggml_vk_use_mul_mat_vec_id(cgraph, node_idx)) {
  10801. return false;
  10802. }
  10803. // shaders assume the types match
  10804. if (mmid->type != scale->type) {
  10805. return false;
  10806. }
  10807. // shaders assume the bias is contiguous
  10808. if (!ggml_is_contiguous(scale)) {
  10809. return false;
  10810. }
  10811. // unaligned bias isn't handled
  10812. if (get_misalign_bytes(ctx, scale) != 0) {
  10813. return false;
  10814. }
  10815. // shader only indexes by expert index
  10816. if (scale->ne[0] != 1 ||
  10817. scale->ne[1] != mul->ne[1] ||
  10818. scale->ne[2] != 1 ||
  10819. scale->ne[3] != 1) {
  10820. return false;
  10821. }
  10822. return true;
  10823. };
  10824. if ((ops.size() == 2 || ops.size() == 3) && ops.begin()[0] == GGML_OP_MUL_MAT_ID && ops.begin()[1] == GGML_OP_ADD_ID) {
  10825. // additional constraints specific to this fusion
  10826. const ggml_tensor *mul = cgraph->nodes[node_idx];
  10827. const ggml_tensor *add = cgraph->nodes[node_idx + 1];
  10828. const ggml_tensor *bias = add->src[1];
  10829. if (mul != add->src[0]) {
  10830. return false;
  10831. }
  10832. // mat-vec only
  10833. if (!ggml_vk_use_mul_mat_vec_id(cgraph, node_idx)) {
  10834. return false;
  10835. }
  10836. // shaders assume the types match
  10837. if (mul->type != bias->type) {
  10838. return false;
  10839. }
  10840. // shaders assume the bias is contiguous
  10841. if (!ggml_is_contiguous(bias)) {
  10842. return false;
  10843. }
  10844. // the ID tensor must be the same for mul_mat_id and add_id
  10845. if (mul->src[2] != add->src[2]) {
  10846. return false;
  10847. }
  10848. // unaligned bias isn't handled
  10849. if (get_misalign_bytes(ctx, bias) != 0) {
  10850. return false;
  10851. }
  10852. if (ops.size() == 3) {
  10853. if (ops.begin()[2] != GGML_OP_MUL) {
  10854. return false;
  10855. }
  10856. const ggml_tensor *mul = cgraph->nodes[node_idx + 2];
  10857. return mmid_mul_ok(add, mul);
  10858. }
  10859. }
  10860. if (ops.size() == 2 && ops.begin()[0] == GGML_OP_MUL_MAT_ID && ops.begin()[1] == GGML_OP_MUL) {
  10861. // additional constraints specific to this fusion
  10862. const ggml_tensor *mmid = cgraph->nodes[node_idx];
  10863. const ggml_tensor *mul = cgraph->nodes[node_idx + 1];
  10864. if (!mmid_mul_ok(mmid, mul)) {
  10865. return false;
  10866. }
  10867. }
  10868. return true;
  10869. }
  10870. static bool ggml_vk_can_fuse_topk_moe(ggml_backend_vk_context * ctx, const struct ggml_cgraph * cgraph,
  10871. int node_idx, topk_moe_mode mode) {
  10872. const ggml_tensor * softmax;
  10873. const ggml_tensor * weights;
  10874. switch (mode) {
  10875. case TOPK_MOE_EARLY_SOFTMAX_NORM:
  10876. softmax = cgraph->nodes[node_idx + 0];
  10877. weights = cgraph->nodes[node_idx + 9];
  10878. break;
  10879. case TOPK_MOE_EARLY_SOFTMAX:
  10880. softmax = cgraph->nodes[node_idx + 0];
  10881. weights = cgraph->nodes[node_idx + 4];
  10882. break;
  10883. case TOPK_MOE_LATE_SOFTMAX:
  10884. softmax = cgraph->nodes[node_idx + 4];
  10885. weights = cgraph->nodes[node_idx + 5];
  10886. break;
  10887. default:
  10888. return false;
  10889. }
  10890. const float * op_params = (const float *)softmax->op_params;
  10891. float scale = op_params[0];
  10892. float max_bias = op_params[1];
  10893. if (!ggml_is_contiguous(softmax->src[0]) || !ggml_is_contiguous(weights)) {
  10894. return false;
  10895. }
  10896. if (scale != 1.0f || max_bias != 0.0f) {
  10897. return false;
  10898. }
  10899. // don't fuse when masks or sinks are present
  10900. if (softmax->src[1] || softmax->src[2]) {
  10901. return false;
  10902. }
  10903. const int n_expert = softmax->ne[0];
  10904. // n_expert must be a power of 2
  10905. if (!is_pow2(n_expert) || n_expert > (1 << (num_topk_moe_pipelines-1))) {
  10906. return false;
  10907. }
  10908. if (!ctx->device->subgroup_arithmetic ||
  10909. !ctx->device->subgroup_shuffle ||
  10910. !ctx->device->subgroup_require_full_support ||
  10911. ctx->device->disable_fusion) {
  10912. return false;
  10913. }
  10914. return true;
  10915. }
  10916. static bool ggml_vk_can_fuse_rope_set_rows(ggml_backend_vk_context * ctx, const struct ggml_cgraph * cgraph,
  10917. int node_idx) {
  10918. GGML_UNUSED(ctx);
  10919. const ggml_tensor *rope = cgraph->nodes[node_idx + 0];
  10920. const ggml_tensor *view = cgraph->nodes[node_idx + 1];
  10921. const ggml_tensor *set_rows = cgraph->nodes[node_idx + 2];
  10922. // ne3 not tested
  10923. if (rope->src[0]->ne[3] != 1) {
  10924. return false;
  10925. }
  10926. if (set_rows->type != GGML_TYPE_F32 && set_rows->type != GGML_TYPE_F16) {
  10927. return false;
  10928. }
  10929. if (set_rows->src[1]->type != GGML_TYPE_I64) {
  10930. return false;
  10931. }
  10932. // The view should flatten two dims of rope into one dim
  10933. if (!ggml_is_contiguous(view) ||
  10934. view->ne[0] != rope->ne[0] * rope->ne[1]) {
  10935. return false;
  10936. }
  10937. // Only norm/neox shaders have the fusion code
  10938. const int mode = ((const int32_t *) rope->op_params)[2];
  10939. if (mode != GGML_ROPE_TYPE_NORMAL && mode != GGML_ROPE_TYPE_NEOX) {
  10940. return false;
  10941. }
  10942. return true;
  10943. }
  10944. // Check whether the tensors overlap in memory but are not equal.
  10945. // Fusions can potenitally overwrite src tensors in ways that are not prevented
  10946. // by ggml-alloc. If the fusion is entirely elementwise, then it's OK for them
  10947. // to overlap if they are exactly equal.
  10948. // XXX TODO this check is probably missing from several fusion optimizations.
  10949. static bool ggml_vk_tensors_overlap_but_not_equal(const ggml_tensor * a, const ggml_tensor * b) {
  10950. ggml_backend_vk_buffer_context * a_buf_ctx = (ggml_backend_vk_buffer_context *)a->buffer->context;
  10951. vk_buffer a_buf = a_buf_ctx->dev_buffer;
  10952. ggml_backend_vk_buffer_context * b_buf_ctx = (ggml_backend_vk_buffer_context *)b->buffer->context;
  10953. vk_buffer b_buf = b_buf_ctx->dev_buffer;
  10954. if (a_buf == b_buf) {
  10955. auto a_base = vk_tensor_offset(a) + a->view_offs;
  10956. auto a_size = ggml_nbytes(a);
  10957. auto b_base = vk_tensor_offset(b) + b->view_offs;
  10958. auto b_size = ggml_nbytes(b);
  10959. if (a_base == b_base && a_size == b_size) {
  10960. return false;
  10961. }
  10962. if ((b_base <= a_base && a_base < b_base + b_size) ||
  10963. (a_base <= b_base && b_base < a_base + a_size)) {
  10964. return true;
  10965. }
  10966. }
  10967. return false;
  10968. }
  10969. static bool ggml_vk_can_fuse_rms_norm_mul_rope(ggml_backend_vk_context * ctx, const struct ggml_cgraph * cgraph,
  10970. int node_idx) {
  10971. GGML_UNUSED(ctx);
  10972. const ggml_tensor *rms = cgraph->nodes[node_idx + 0];
  10973. const ggml_tensor *mul = cgraph->nodes[node_idx + 1];
  10974. const ggml_tensor *rope = cgraph->nodes[node_idx + 2];
  10975. const int mode = ((const int32_t *) rope->op_params)[2];
  10976. // noncontig tensors aren't tested, and don't seem common in practice
  10977. if (!ggml_is_contiguous(rms) ||
  10978. !ggml_is_contiguous(mul) ||
  10979. !ggml_is_contiguous(rope)) {
  10980. return false;
  10981. }
  10982. // only norm/neox are handled in the shader
  10983. if (mode != GGML_ROPE_TYPE_NEOX && mode != GGML_ROPE_TYPE_NORMAL) {
  10984. return false;
  10985. }
  10986. // shared memory size for passing data from mul->rope
  10987. if (mul->ne[0] > 1024) {
  10988. return false;
  10989. }
  10990. // must not overwrite srcs in a way that's not elementwise
  10991. ggml_tensor *other_src = mul->src[0] == rms ? mul->src[1] : mul->src[0];
  10992. if (ggml_vk_tensors_overlap_but_not_equal(rms->src[0], rope) ||
  10993. ggml_vk_tensors_overlap_but_not_equal(other_src, rope)) {
  10994. return false;
  10995. }
  10996. // conditions for pipeline creation
  10997. if (!(ctx->device->float_controls_rte_fp16 &&
  10998. sizeof(vk_op_rms_norm_mul_rope_push_constants) <= ctx->device->properties.limits.maxPushConstantsSize)) {
  10999. return false;
  11000. }
  11001. return true;
  11002. }
  11003. static uint32_t ggml_vk_fuse_multi_add(ggml_backend_vk_context * ctx, const struct ggml_cgraph * cgraph, int node_idx) {
  11004. const ggml_tensor *first_node = cgraph->nodes[node_idx];
  11005. if (first_node->op != GGML_OP_ADD) {
  11006. return 0;
  11007. }
  11008. if (!ctx->device->multi_add) {
  11009. return 0;
  11010. }
  11011. int32_t num_adds = 1;
  11012. while (node_idx + num_adds < cgraph->n_nodes &&
  11013. cgraph->nodes[node_idx + num_adds]->op == GGML_OP_ADD &&
  11014. num_adds < MAX_FUSED_ADDS) {
  11015. num_adds++;
  11016. }
  11017. // The shader currently requires same shapes (but different strides are allowed),
  11018. // everything f32, and no misalignment
  11019. for (int32_t i = 0; i < num_adds; ++i) {
  11020. const ggml_tensor *next_node = cgraph->nodes[node_idx + i];
  11021. if (!ggml_are_same_shape(first_node, next_node->src[0]) ||
  11022. !ggml_are_same_shape(first_node, next_node->src[1]) ||
  11023. next_node->type != GGML_TYPE_F32 ||
  11024. next_node->src[0]->type != GGML_TYPE_F32 ||
  11025. next_node->src[1]->type != GGML_TYPE_F32 ||
  11026. get_misalign_bytes(ctx, next_node) ||
  11027. get_misalign_bytes(ctx, next_node->src[0]) ||
  11028. get_misalign_bytes(ctx, next_node->src[1])) {
  11029. num_adds = i;
  11030. }
  11031. }
  11032. // Verify we can fuse these
  11033. ggml_op adds[MAX_FUSED_ADDS];
  11034. for (int32_t i = 0; i < num_adds; ++i) {
  11035. adds[i] = GGML_OP_ADD;
  11036. }
  11037. // decrease num_adds if they can't all be fused
  11038. while (num_adds > 1 && !ggml_can_fuse(cgraph, node_idx, adds, num_adds)) {
  11039. num_adds--;
  11040. }
  11041. // a single add is not "fused", so just return zero
  11042. if (num_adds == 1) {
  11043. return 0;
  11044. }
  11045. return num_adds;
  11046. }
  11047. static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
  11048. VK_LOG_DEBUG("ggml_backend_vk_graph_compute(" << cgraph->n_nodes << " nodes)");
  11049. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  11050. if (vk_instance.debug_utils_support) {
  11051. vk::DebugUtilsLabelEXT dul = {};
  11052. dul.pLabelName = "ggml_backend_vk_graph_compute";
  11053. dul.color = std::array<float,4>{1.0f, 1.0f, 1.0f, 1.0f};
  11054. vk_instance.pfn_vkQueueBeginDebugUtilsLabelEXT(ctx->device->compute_queue.queue, reinterpret_cast<VkDebugUtilsLabelEXT*>(&dul));
  11055. }
  11056. ctx->prealloc_size_add_rms_partials_offset = 0;
  11057. ctx->do_add_rms_partials = false;
  11058. ctx->do_add_rms_partials_offset_calculation = false;
  11059. int last_node = cgraph->n_nodes - 1;
  11060. // If the last op in the cgraph isn't backend GPU, the command buffer doesn't get closed properly
  11061. while (last_node > 0 && ggml_vk_is_empty(cgraph->nodes[last_node])) {
  11062. last_node -= 1;
  11063. }
  11064. // Reserve tensor context space for all nodes
  11065. ctx->tensor_ctxs.resize(cgraph->n_nodes);
  11066. bool first_node_in_batch = true; // true if next node will be first node in a batch
  11067. int submit_node_idx = 0; // index to first node in a batch
  11068. vk_context compute_ctx;
  11069. if (vk_perf_logger_enabled) {
  11070. // allocate/resize the query pool
  11071. if (ctx->device->num_queries < cgraph->n_nodes + 1) {
  11072. if (ctx->device->query_pool) {
  11073. ctx->device->device.destroyQueryPool(ctx->device->query_pool);
  11074. }
  11075. vk::QueryPoolCreateInfo query_create_info;
  11076. query_create_info.queryType = vk::QueryType::eTimestamp;
  11077. query_create_info.queryCount = cgraph->n_nodes + 100;
  11078. ctx->device->query_pool = ctx->device->device.createQueryPool(query_create_info);
  11079. ctx->device->num_queries = query_create_info.queryCount;
  11080. }
  11081. ctx->device->device.resetQueryPool(ctx->device->query_pool, 0, cgraph->n_nodes+1);
  11082. GGML_ASSERT(ctx->compute_ctx.expired());
  11083. compute_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  11084. ctx->compute_ctx = compute_ctx;
  11085. ggml_vk_ctx_begin(ctx->device, compute_ctx);
  11086. compute_ctx->s->buffer.writeTimestamp(vk::PipelineStageFlagBits::eAllCommands, ctx->device->query_pool, 0);
  11087. }
  11088. ctx->prealloc_y_last_pipeline_used = nullptr;
  11089. ctx->prealloc_y_last_tensor_used = nullptr;
  11090. if (ctx->prealloc_size_add_rms_partials) {
  11091. ggml_vk_preallocate_buffers(ctx, nullptr);
  11092. if (ctx->compute_ctx.expired()) {
  11093. compute_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  11094. ctx->compute_ctx = compute_ctx;
  11095. ggml_vk_ctx_begin(ctx->device, compute_ctx);
  11096. } else {
  11097. compute_ctx = ctx->compute_ctx.lock();
  11098. }
  11099. // initialize partial sums to zero.
  11100. ggml_vk_buffer_memset_async(compute_ctx, ctx->prealloc_add_rms_partials, 0, 0, ctx->prealloc_size_add_rms_partials);
  11101. ggml_vk_sync_buffers(ctx, compute_ctx);
  11102. }
  11103. // Submit after enough work has accumulated, to overlap CPU cmdbuffer generation with GPU execution.
  11104. // Estimate the amount of matmul work by looking at the weight matrix size, and submit every 100MB
  11105. // (and scaled down based on model size, so smaller models submit earlier).
  11106. // Also submit at least every 100 nodes, in case there are workloads without as much matmul.
  11107. int nodes_per_submit = 100;
  11108. int submitted_nodes = 0;
  11109. int submit_count = 0;
  11110. uint64_t mul_mat_bytes = 0;
  11111. uint64_t total_mul_mat_bytes = 0;
  11112. uint64_t mul_mat_bytes_per_submit = std::min(uint64_t(100*1000*1000), ctx->last_total_mul_mat_bytes / 40u);
  11113. for (int i = 0; i < cgraph->n_nodes; i++) {
  11114. if (first_node_in_batch) {
  11115. submit_node_idx = i;
  11116. }
  11117. if (cgraph->nodes[i]->op == GGML_OP_MUL_MAT || cgraph->nodes[i]->op == GGML_OP_MUL_MAT_ID) {
  11118. auto bytes = ggml_nbytes(cgraph->nodes[i]->src[0]);
  11119. mul_mat_bytes += bytes;
  11120. total_mul_mat_bytes += bytes;
  11121. }
  11122. if (!ctx->device->disable_fusion) {
  11123. uint32_t num_adds = ggml_vk_fuse_multi_add(ctx, cgraph, i);
  11124. if (num_adds) {
  11125. ctx->num_additional_fused_ops = num_adds - 1;
  11126. } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_MUL_MAT, GGML_OP_ADD, GGML_OP_ADD })) {
  11127. ctx->num_additional_fused_ops = 2;
  11128. } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_MUL_MAT, GGML_OP_ADD })) {
  11129. ctx->num_additional_fused_ops = 1;
  11130. } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_MUL_MAT_ID, GGML_OP_ADD_ID, GGML_OP_MUL })) {
  11131. ctx->num_additional_fused_ops = 2;
  11132. } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_MUL_MAT_ID, GGML_OP_ADD_ID })) {
  11133. ctx->num_additional_fused_ops = 1;
  11134. } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_MUL_MAT_ID, GGML_OP_MUL })) {
  11135. ctx->num_additional_fused_ops = 1;
  11136. } else if (ggml_can_fuse_subgraph(cgraph, i, { GGML_OP_RMS_NORM, GGML_OP_MUL, GGML_OP_ROPE, GGML_OP_VIEW, GGML_OP_SET_ROWS }, { i + 4 }) &&
  11137. ggml_check_edges(cgraph, i, rms_norm_mul_rope_view_set_rows_edges) &&
  11138. ggml_vk_can_fuse_rms_norm_mul_rope(ctx, cgraph, i) &&
  11139. ggml_vk_can_fuse_rope_set_rows(ctx, cgraph, i + 2)) {
  11140. ctx->num_additional_fused_ops = 4;
  11141. } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_RMS_NORM, GGML_OP_MUL, GGML_OP_ROPE })&&
  11142. ggml_vk_can_fuse_rms_norm_mul_rope(ctx, cgraph, i)) {
  11143. ctx->num_additional_fused_ops = 2;
  11144. } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_RMS_NORM, GGML_OP_MUL })) {
  11145. ctx->num_additional_fused_ops = 1;
  11146. } else if (ggml_can_fuse_subgraph(cgraph, i, { GGML_OP_ROPE, GGML_OP_VIEW, GGML_OP_SET_ROWS }, { i + 2 }) &&
  11147. ggml_check_edges(cgraph, i, rope_view_set_rows_edges) &&
  11148. ggml_vk_can_fuse_rope_set_rows(ctx, cgraph, i)) {
  11149. ctx->num_additional_fused_ops = 2;
  11150. } else if (ggml_can_fuse_subgraph(cgraph, i, topk_moe_early_softmax_norm, { i + 3, i + 9 }) &&
  11151. ggml_check_edges(cgraph, i, topk_moe_early_softmax_norm_edges) &&
  11152. ggml_vk_can_fuse_topk_moe(ctx, cgraph, i, TOPK_MOE_EARLY_SOFTMAX_NORM)) {
  11153. ctx->num_additional_fused_ops = topk_moe_early_softmax_norm.size() - 1;
  11154. // view of argsort writes to memory
  11155. ctx->fused_ops_write_mask |= 1 << 3;
  11156. } else if (ggml_can_fuse_subgraph(cgraph, i, topk_moe_early_softmax, { i + 3, i + 4 }) &&
  11157. ggml_check_edges(cgraph, i, topk_moe_early_softmax_edges) &&
  11158. ggml_vk_can_fuse_topk_moe(ctx, cgraph, i, TOPK_MOE_EARLY_SOFTMAX)) {
  11159. ctx->num_additional_fused_ops = topk_moe_early_softmax.size() - 1;
  11160. // view of argsort writes to memory
  11161. ctx->fused_ops_write_mask |= 1 << 3;
  11162. } else if (ggml_can_fuse_subgraph(cgraph, i, topk_moe_late_softmax, { i + 1, i + 5 }) &&
  11163. ggml_check_edges(cgraph, i, topk_moe_late_softmax_edges) &&
  11164. ggml_vk_can_fuse_topk_moe(ctx, cgraph, i, TOPK_MOE_LATE_SOFTMAX)) {
  11165. ctx->num_additional_fused_ops = topk_moe_late_softmax.size() - 1;
  11166. // view of argsort writes to memory
  11167. ctx->fused_ops_write_mask |= 1 << 1;
  11168. }
  11169. }
  11170. ctx->fused_ops_write_mask |= 1 << ctx->num_additional_fused_ops;
  11171. // Signal the almost_ready fence when the graph is mostly complete (< 20% remaining)
  11172. bool almost_ready = (cgraph->n_nodes - i) < cgraph->n_nodes / 5;
  11173. bool submit = (submitted_nodes >= nodes_per_submit) ||
  11174. (mul_mat_bytes_per_submit != 0 && mul_mat_bytes >= mul_mat_bytes_per_submit) ||
  11175. (i + ctx->num_additional_fused_ops >= last_node) ||
  11176. (almost_ready && !ctx->almost_ready_fence_pending);
  11177. bool enqueued = ggml_vk_build_graph(ctx, cgraph, i, cgraph->nodes[submit_node_idx], submit_node_idx, i + ctx->num_additional_fused_ops >= last_node, almost_ready, submit);
  11178. if (vk_perf_logger_enabled) {
  11179. if (ctx->compute_ctx.expired()) {
  11180. compute_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool);
  11181. ctx->compute_ctx = compute_ctx;
  11182. ggml_vk_ctx_begin(ctx->device, compute_ctx);
  11183. } else {
  11184. compute_ctx = ctx->compute_ctx.lock();
  11185. }
  11186. // If there are fused ops, just write out timestamps for all nodes to keep the accounting simple
  11187. for (int j = 0; j < ctx->num_additional_fused_ops + 1; ++j) {
  11188. compute_ctx->s->buffer.writeTimestamp(vk::PipelineStageFlagBits::eAllCommands, ctx->device->query_pool, i+j+1);
  11189. }
  11190. }
  11191. if (enqueued) {
  11192. ++submitted_nodes;
  11193. #ifndef GGML_VULKAN_CHECK_RESULTS
  11194. if (first_node_in_batch) {
  11195. first_node_in_batch = false;
  11196. }
  11197. #endif
  11198. }
  11199. if (submit && enqueued) {
  11200. first_node_in_batch = true;
  11201. submitted_nodes = 0;
  11202. mul_mat_bytes = 0;
  11203. if (submit_count < 3) {
  11204. mul_mat_bytes_per_submit *= 2;
  11205. }
  11206. submit_count++;
  11207. }
  11208. i += ctx->num_additional_fused_ops;
  11209. ctx->num_additional_fused_ops = 0;
  11210. ctx->fused_ops_write_mask = 0;
  11211. }
  11212. ctx->last_total_mul_mat_bytes = total_mul_mat_bytes;
  11213. if (vk_perf_logger_enabled) {
  11214. // End the command buffer and submit/wait
  11215. GGML_ASSERT(!ctx->compute_ctx.expired());
  11216. compute_ctx = ctx->compute_ctx.lock();
  11217. ggml_vk_ctx_end(compute_ctx);
  11218. ggml_vk_submit(compute_ctx, ctx->device->fence);
  11219. VK_CHECK(ctx->device->device.waitForFences({ ctx->device->fence }, true, UINT64_MAX), "GGML_VULKAN_PERF waitForFences");
  11220. ctx->device->device.resetFences({ ctx->device->fence });
  11221. // Get the results and pass them to the logger
  11222. std::vector<uint64_t> timestamps(cgraph->n_nodes + 1);
  11223. VK_CHECK(ctx->device->device.getQueryPoolResults(ctx->device->query_pool, 0, cgraph->n_nodes + 1, (cgraph->n_nodes + 1)*sizeof(uint64_t), timestamps.data(), sizeof(uint64_t), vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait), "get timestamp results");
  11224. for (int i = 0; i < cgraph->n_nodes; i++) {
  11225. if (!ggml_vk_is_empty(cgraph->nodes[i])) {
  11226. ctx->device->perf_logger->log_timing(cgraph->nodes[i], uint64_t((timestamps[i+1] - timestamps[i]) * ctx->device->properties.limits.timestampPeriod));
  11227. }
  11228. }
  11229. ctx->device->perf_logger->print_timings();
  11230. }
  11231. if (!ctx->device->support_async) {
  11232. ggml_vk_synchronize(ctx);
  11233. }
  11234. return GGML_STATUS_SUCCESS;
  11235. UNUSED(backend);
  11236. }
  11237. // Sort the graph for improved parallelism.
  11238. static void ggml_vk_graph_optimize(ggml_backend_t backend, struct ggml_cgraph * graph)
  11239. {
  11240. VK_LOG_DEBUG("ggml_vk_graph_optimize(" << graph->n_nodes << " nodes)");
  11241. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  11242. if (ctx->device->disable_graph_optimize) {
  11243. return;
  11244. }
  11245. auto const &is_empty = [](ggml_tensor * node) -> bool {
  11246. return node->op == GGML_OP_NONE || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE;
  11247. };
  11248. auto const &is_src_of = [](const ggml_tensor *dst, const ggml_tensor *src) -> bool {
  11249. for (uint32_t s = 0; s < GGML_MAX_SRC; ++s) {
  11250. if (dst->src[s] == src) {
  11251. return true;
  11252. }
  11253. }
  11254. // implicit dependency if they view the same tensor
  11255. const ggml_tensor *dst2 = dst->view_src ? dst->view_src : dst;
  11256. const ggml_tensor *src2 = src->view_src ? src->view_src : src;
  11257. if (dst2 == src2) {
  11258. return true;
  11259. }
  11260. return false;
  11261. };
  11262. std::vector<ggml_tensor *> new_order;
  11263. std::vector<bool> used(graph->n_nodes, false);
  11264. std::set<ggml_tensor *> used_node_set;
  11265. int first_unused = 0;
  11266. while (first_unused < graph->n_nodes) {
  11267. std::vector<int> current_set;
  11268. // Check for fusion patterns and avoid reordering them
  11269. auto const &match_pattern = [&](const std::initializer_list<ggml_op> &pattern, int start) -> bool {
  11270. if (start + (int)pattern.size() <= graph->n_nodes) {
  11271. bool is_pattern = true;
  11272. for (size_t j = 0; j < pattern.size(); ++j) {
  11273. if (graph->nodes[start + j]->op != pattern.begin()[j] || used[start + j]) {
  11274. is_pattern = false;
  11275. }
  11276. }
  11277. return is_pattern;
  11278. }
  11279. return false;
  11280. };
  11281. auto const &keep_pattern = [&](const std::initializer_list<ggml_op> &pattern) -> bool {
  11282. if (match_pattern(pattern, first_unused)) {
  11283. for (size_t j = 0; j < pattern.size(); ++j) {
  11284. new_order.push_back(graph->nodes[first_unused + j]);
  11285. used_node_set.insert(graph->nodes[first_unused + j]);
  11286. used[first_unused + j] = true;
  11287. }
  11288. while (first_unused < graph->n_nodes && used[first_unused]) {
  11289. first_unused++;
  11290. }
  11291. return true;
  11292. }
  11293. return false;
  11294. };
  11295. if (keep_pattern(topk_moe_early_softmax_norm)) {
  11296. continue;
  11297. }
  11298. if (keep_pattern(topk_moe_early_softmax)) {
  11299. continue;
  11300. }
  11301. if (keep_pattern(topk_moe_late_softmax)) {
  11302. continue;
  11303. }
  11304. // First, grab the next unused node.
  11305. current_set.push_back(first_unused);
  11306. // Loop through the next N nodes. Grab any that don't depend on other nodes that
  11307. // haven't already been run. Nodes that have already been run have used[i] set
  11308. // to true. Allow nodes that depend on the previous node if it's a fusion pattern
  11309. // that we support (e.g. RMS_NORM + MUL).
  11310. // This first pass only grabs "real" (non-view nodes). Second pass grabs view nodes.
  11311. // The goal is to not interleave real and view nodes in a way that breaks fusion.
  11312. const int NUM_TO_CHECK = 20;
  11313. for (int j = first_unused+1; j < std::min(first_unused + NUM_TO_CHECK, graph->n_nodes); ++j) {
  11314. if (used[j]) {
  11315. continue;
  11316. }
  11317. if (is_empty(graph->nodes[j])) {
  11318. continue;
  11319. }
  11320. // Don't pull forward nodes from fusion patterns
  11321. if (match_pattern(topk_moe_early_softmax_norm, j) ||
  11322. match_pattern(topk_moe_early_softmax, j) ||
  11323. match_pattern(topk_moe_late_softmax, j)) {
  11324. continue;
  11325. }
  11326. bool ok = true;
  11327. for (int c = first_unused; c < j; ++c) {
  11328. if (!used[c] &&
  11329. is_src_of(graph->nodes[j], graph->nodes[c]) &&
  11330. !(j == c+1 && c == current_set.back() && graph->nodes[c]->op == GGML_OP_RMS_NORM && graph->nodes[j]->op == GGML_OP_MUL) &&
  11331. !(j == c+1 && c == current_set.back() && graph->nodes[c]->op == GGML_OP_MUL_MAT && graph->nodes[j]->op == GGML_OP_ADD) &&
  11332. !(j == c+1 && c == current_set.back() && graph->nodes[c]->op == GGML_OP_MUL_MAT_ID && graph->nodes[j]->op == GGML_OP_ADD_ID) &&
  11333. !(j == c+1 && c == current_set.back() && graph->nodes[c]->op == GGML_OP_MUL_MAT_ID && graph->nodes[j]->op == GGML_OP_MUL)) {
  11334. ok = false;
  11335. break;
  11336. }
  11337. }
  11338. if (ok) {
  11339. current_set.push_back(j);
  11340. int rope_idx = j;
  11341. // When we've found RMS_NORM + MUL, try to find a ROPE that uses it
  11342. if (j > 0 &&
  11343. graph->nodes[j]->op == GGML_OP_MUL &&
  11344. graph->nodes[j-1]->op == GGML_OP_RMS_NORM) {
  11345. for (int k = j + 1; k < std::min(j + 15, graph->n_nodes); ++k) {
  11346. if (graph->nodes[k]->op == GGML_OP_ROPE &&
  11347. graph->nodes[k]->src[0] == graph->nodes[j] &&
  11348. // Check that other srcs are already valid
  11349. graph->nodes[k]->src[1]->op == GGML_OP_NONE &&
  11350. (graph->nodes[k]->src[2] == nullptr || graph->nodes[k]->src[2]->op == GGML_OP_NONE)) {
  11351. rope_idx = k;
  11352. current_set.push_back(rope_idx);
  11353. used[rope_idx] = true;
  11354. break;
  11355. }
  11356. }
  11357. }
  11358. // Look for ROPE + VIEW + SET_ROWS and make them consecutive
  11359. if (graph->nodes[rope_idx]->op == GGML_OP_ROPE) {
  11360. int view_idx = -1;
  11361. int set_rows_idx = -1;
  11362. for (int k = rope_idx+1; k < std::min(rope_idx + 10, graph->n_nodes); ++k) {
  11363. if (view_idx == -1 &&
  11364. graph->nodes[k]->op == GGML_OP_VIEW &&
  11365. graph->nodes[k]->src[0] == graph->nodes[rope_idx]) {
  11366. view_idx = k;
  11367. continue;
  11368. }
  11369. if (view_idx != -1 &&
  11370. set_rows_idx == -1 &&
  11371. graph->nodes[k]->op == GGML_OP_SET_ROWS &&
  11372. graph->nodes[k]->src[0] == graph->nodes[view_idx]) {
  11373. set_rows_idx = k;
  11374. break;
  11375. }
  11376. }
  11377. if (set_rows_idx != -1) {
  11378. current_set.push_back(view_idx);
  11379. current_set.push_back(set_rows_idx);
  11380. used[view_idx] = true;
  11381. used[set_rows_idx] = true;
  11382. }
  11383. }
  11384. // Look for MUL_MAT_ID + ADD_ID + MUL
  11385. if (j > 0 &&
  11386. graph->nodes[j]->op == GGML_OP_ADD_ID &&
  11387. graph->nodes[j-1]->op == GGML_OP_MUL_MAT_ID) {
  11388. for (int k = j + 1; k < std::min(j + 15, graph->n_nodes); ++k) {
  11389. if (graph->nodes[k]->op == GGML_OP_MUL &&
  11390. graph->nodes[k]->src[0] == graph->nodes[j] &&
  11391. // src1 must either be weights or already processed
  11392. (graph->nodes[k]->src[1]->op == GGML_OP_NONE || used_node_set.find(graph->nodes[k]->src[1]) != used_node_set.end())) {
  11393. current_set.push_back(k);
  11394. used[k] = true;
  11395. break;
  11396. }
  11397. }
  11398. }
  11399. // Look for MUL_MAT + ADD + ADD
  11400. if (j > 0 &&
  11401. graph->nodes[j]->op == GGML_OP_ADD &&
  11402. graph->nodes[j-1]->op == GGML_OP_MUL_MAT) {
  11403. for (int k = j + 1; k < std::min(j + 15, graph->n_nodes); ++k) {
  11404. if (graph->nodes[k]->op == GGML_OP_ADD &&
  11405. graph->nodes[k]->src[0] == graph->nodes[j] &&
  11406. // src1 must either be weights or already processed
  11407. (graph->nodes[k]->src[1]->op == GGML_OP_NONE || used_node_set.find(graph->nodes[k]->src[1]) != used_node_set.end())) {
  11408. current_set.push_back(k);
  11409. used[k] = true;
  11410. break;
  11411. }
  11412. }
  11413. }
  11414. }
  11415. }
  11416. // Second pass grabs view nodes.
  11417. // Skip this if it would break a fusion optimization (don't split up add->rms_norm or add->add).
  11418. if (graph->nodes[current_set.back()]->op != GGML_OP_ADD) {
  11419. for (int j = first_unused+1; j < std::min(first_unused + NUM_TO_CHECK, graph->n_nodes); ++j) {
  11420. if (used[j]) {
  11421. continue;
  11422. }
  11423. if (!is_empty(graph->nodes[j])) {
  11424. continue;
  11425. }
  11426. bool ok = true;
  11427. for (int c = first_unused; c < j; ++c) {
  11428. bool c_in_current_set = std::find(current_set.begin(), current_set.end(), c) != current_set.end();
  11429. // skip views whose srcs haven't been processed.
  11430. if (!used[c] &&
  11431. is_src_of(graph->nodes[j], graph->nodes[c]) &&
  11432. !c_in_current_set) {
  11433. ok = false;
  11434. break;
  11435. }
  11436. }
  11437. if (ok) {
  11438. current_set.push_back(j);
  11439. }
  11440. }
  11441. }
  11442. // Push the current set into new_order
  11443. for (auto c : current_set) {
  11444. new_order.push_back(graph->nodes[c]);
  11445. used_node_set.insert(graph->nodes[c]);
  11446. used[c] = true;
  11447. }
  11448. while (first_unused < graph->n_nodes && used[first_unused]) {
  11449. first_unused++;
  11450. }
  11451. }
  11452. // Replace the graph with the new order.
  11453. for (int i = 0; i < graph->n_nodes; ++i) {
  11454. graph->nodes[i] = new_order[i];
  11455. }
  11456. }
  11457. // TODO: enable async and synchronize
  11458. static ggml_backend_i ggml_backend_vk_interface = {
  11459. /* .get_name = */ ggml_backend_vk_name,
  11460. /* .free = */ ggml_backend_vk_free,
  11461. /* .set_tensor_async = */ NULL, // ggml_backend_vk_set_tensor_async,
  11462. /* .get_tensor_async = */ ggml_backend_vk_get_tensor_async,
  11463. /* .cpy_tensor_async = */ NULL, // ggml_backend_vk_cpy_tensor_async,
  11464. /* .synchronize = */ ggml_backend_vk_synchronize,
  11465. /* .graph_plan_create = */ NULL,
  11466. /* .graph_plan_free = */ NULL,
  11467. /* .graph_plan_update = */ NULL,
  11468. /* .graph_plan_compute = */ NULL,
  11469. /* .graph_compute = */ ggml_backend_vk_graph_compute,
  11470. /* .event_record = */ NULL,
  11471. /* .event_wait = */ NULL,
  11472. /* .graph_optimize = */ ggml_vk_graph_optimize,
  11473. };
  11474. static ggml_guid_t ggml_backend_vk_guid() {
  11475. static ggml_guid guid = { 0xb8, 0xf7, 0x4f, 0x86, 0x40, 0x3c, 0xe1, 0x02, 0x91, 0xc8, 0xdd, 0xe9, 0x02, 0x3f, 0xc0, 0x2b };
  11476. return &guid;
  11477. }
  11478. ggml_backend_t ggml_backend_vk_init(size_t dev_num) {
  11479. VK_LOG_DEBUG("ggml_backend_vk_init(" << dev_num << ")");
  11480. ggml_backend_vk_context * ctx = new ggml_backend_vk_context;
  11481. ggml_vk_init(ctx, dev_num);
  11482. ggml_backend_t vk_backend = new ggml_backend {
  11483. /* .guid = */ ggml_backend_vk_guid(),
  11484. /* .iface = */ ggml_backend_vk_interface,
  11485. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_vk_reg(), dev_num),
  11486. /* .context = */ ctx,
  11487. };
  11488. if (!ctx->device->support_async) {
  11489. vk_backend->iface.get_tensor_async = nullptr;
  11490. }
  11491. return vk_backend;
  11492. }
  11493. bool ggml_backend_is_vk(ggml_backend_t backend) {
  11494. return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_vk_guid());
  11495. }
  11496. int ggml_backend_vk_get_device_count() {
  11497. return ggml_vk_get_device_count();
  11498. }
  11499. void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size) {
  11500. GGML_ASSERT(device < (int) vk_instance.device_indices.size());
  11501. int dev_idx = vk_instance.device_indices[device];
  11502. ggml_vk_get_device_description(dev_idx, description, description_size);
  11503. }
  11504. void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total) {
  11505. GGML_ASSERT(device < (int) vk_instance.device_indices.size());
  11506. GGML_ASSERT(device < (int) vk_instance.device_supports_membudget.size());
  11507. vk::PhysicalDevice vkdev = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device]];
  11508. vk::PhysicalDeviceMemoryBudgetPropertiesEXT budgetprops;
  11509. vk::PhysicalDeviceMemoryProperties2 memprops = {};
  11510. const bool membudget_supported = vk_instance.device_supports_membudget[device];
  11511. const bool is_integrated_gpu = vkdev.getProperties().deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
  11512. if (membudget_supported) {
  11513. memprops.pNext = &budgetprops;
  11514. }
  11515. vkdev.getMemoryProperties2(&memprops);
  11516. *total = 0;
  11517. *free = 0;
  11518. for (uint32_t i = 0; i < memprops.memoryProperties.memoryHeapCount; ++i) {
  11519. const vk::MemoryHeap & heap = memprops.memoryProperties.memoryHeaps[i];
  11520. if (is_integrated_gpu || (heap.flags & vk::MemoryHeapFlagBits::eDeviceLocal)) {
  11521. *total += heap.size;
  11522. if (membudget_supported && i < budgetprops.heapUsage.size()) {
  11523. *free += budgetprops.heapBudget[i] - budgetprops.heapUsage[i];
  11524. } else {
  11525. *free += heap.size;
  11526. }
  11527. }
  11528. }
  11529. }
  11530. static vk::PhysicalDeviceType ggml_backend_vk_get_device_type(int device_idx) {
  11531. GGML_ASSERT(device_idx >= 0 && device_idx < (int) vk_instance.device_indices.size());
  11532. vk::PhysicalDevice device = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device_idx]];
  11533. vk::PhysicalDeviceProperties2 props = {};
  11534. device.getProperties2(&props);
  11535. return props.properties.deviceType;
  11536. }
  11537. static std::string ggml_backend_vk_get_device_pci_id(int device_idx) {
  11538. GGML_ASSERT(device_idx >= 0 && device_idx < (int) vk_instance.device_indices.size());
  11539. vk::PhysicalDevice device = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device_idx]];
  11540. const std::vector<vk::ExtensionProperties> ext_props = device.enumerateDeviceExtensionProperties();
  11541. bool ext_support = false;
  11542. for (const auto& properties : ext_props) {
  11543. if (strcmp("VK_EXT_pci_bus_info", properties.extensionName) == 0) {
  11544. ext_support = true;
  11545. break;
  11546. }
  11547. }
  11548. if (!ext_support) {
  11549. return "";
  11550. }
  11551. vk::PhysicalDeviceProperties2 props = {};
  11552. vk::PhysicalDevicePCIBusInfoPropertiesEXT pci_bus_info = {};
  11553. props.pNext = &pci_bus_info;
  11554. device.getProperties2(&props);
  11555. const uint32_t pci_domain = pci_bus_info.pciDomain;
  11556. const uint32_t pci_bus = pci_bus_info.pciBus;
  11557. const uint32_t pci_device = pci_bus_info.pciDevice;
  11558. const uint8_t pci_function = (uint8_t) pci_bus_info.pciFunction; // pci function is between 0 and 7, prevent printf overflow warning
  11559. char pci_bus_id[16] = {};
  11560. snprintf(pci_bus_id, sizeof(pci_bus_id), "%04x:%02x:%02x.%x", pci_domain, pci_bus, pci_device, pci_function);
  11561. return std::string(pci_bus_id);
  11562. }
  11563. //////////////////////////
  11564. struct ggml_backend_vk_device_context {
  11565. size_t device;
  11566. std::string name;
  11567. std::string description;
  11568. bool is_integrated_gpu;
  11569. std::string pci_bus_id;
  11570. };
  11571. static const char * ggml_backend_vk_device_get_name(ggml_backend_dev_t dev) {
  11572. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11573. return ctx->name.c_str();
  11574. }
  11575. static const char * ggml_backend_vk_device_get_description(ggml_backend_dev_t dev) {
  11576. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11577. return ctx->description.c_str();
  11578. }
  11579. static void ggml_backend_vk_device_get_memory(ggml_backend_dev_t device, size_t * free, size_t * total) {
  11580. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)device->context;
  11581. ggml_backend_vk_get_device_memory(ctx->device, free, total);
  11582. }
  11583. static ggml_backend_buffer_type_t ggml_backend_vk_device_get_buffer_type(ggml_backend_dev_t dev) {
  11584. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11585. return ggml_backend_vk_buffer_type(ctx->device);
  11586. }
  11587. static ggml_backend_buffer_type_t ggml_backend_vk_device_get_host_buffer_type(ggml_backend_dev_t dev) {
  11588. UNUSED(dev);
  11589. return ggml_backend_vk_host_buffer_type();
  11590. }
  11591. static enum ggml_backend_dev_type ggml_backend_vk_device_get_type(ggml_backend_dev_t dev) {
  11592. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11593. return ctx->is_integrated_gpu ? GGML_BACKEND_DEVICE_TYPE_IGPU : GGML_BACKEND_DEVICE_TYPE_GPU;
  11594. }
  11595. static void ggml_backend_vk_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
  11596. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11597. props->name = ggml_backend_vk_device_get_name(dev);
  11598. props->description = ggml_backend_vk_device_get_description(dev);
  11599. props->type = ggml_backend_vk_device_get_type(dev);
  11600. props->device_id = ctx->pci_bus_id.empty() ? nullptr : ctx->pci_bus_id.c_str();
  11601. ggml_backend_vk_device_get_memory(dev, &props->memory_free, &props->memory_total);
  11602. props->caps = {
  11603. /* .async = */ false,
  11604. /* .host_buffer = */ true,
  11605. /* .buffer_from_host_ptr = */ false,
  11606. /* .events = */ false,
  11607. };
  11608. }
  11609. static ggml_backend_t ggml_backend_vk_device_init(ggml_backend_dev_t dev, const char * params) {
  11610. UNUSED(params);
  11611. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11612. return ggml_backend_vk_init(ctx->device);
  11613. }
  11614. static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
  11615. switch (op->op) {
  11616. case GGML_OP_UNARY:
  11617. switch (ggml_get_unary_op(op)) {
  11618. case GGML_UNARY_OP_EXP:
  11619. case GGML_UNARY_OP_GELU:
  11620. case GGML_UNARY_OP_GELU_ERF:
  11621. case GGML_UNARY_OP_GELU_QUICK:
  11622. case GGML_UNARY_OP_SILU:
  11623. case GGML_UNARY_OP_RELU:
  11624. case GGML_UNARY_OP_NEG:
  11625. case GGML_UNARY_OP_TANH:
  11626. case GGML_UNARY_OP_SIGMOID:
  11627. case GGML_UNARY_OP_HARDSIGMOID:
  11628. case GGML_UNARY_OP_HARDSWISH:
  11629. case GGML_UNARY_OP_ABS:
  11630. case GGML_UNARY_OP_SOFTPLUS:
  11631. case GGML_UNARY_OP_STEP:
  11632. case GGML_UNARY_OP_ROUND:
  11633. case GGML_UNARY_OP_CEIL:
  11634. case GGML_UNARY_OP_FLOOR:
  11635. case GGML_UNARY_OP_TRUNC:
  11636. return ggml_is_contiguous(op->src[0]) &&
  11637. (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) &&
  11638. (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16) &&
  11639. (op->src[0]->type == op->type);
  11640. default:
  11641. return false;
  11642. }
  11643. case GGML_OP_GLU:
  11644. switch (ggml_get_glu_op(op)) {
  11645. case GGML_GLU_OP_GEGLU:
  11646. case GGML_GLU_OP_REGLU:
  11647. case GGML_GLU_OP_SWIGLU:
  11648. case GGML_GLU_OP_SWIGLU_OAI:
  11649. case GGML_GLU_OP_GEGLU_ERF:
  11650. case GGML_GLU_OP_GEGLU_QUICK:
  11651. return ggml_is_contiguous(op->src[0]) &&
  11652. (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) &&
  11653. (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16) &&
  11654. (op->src[0]->type == op->type);
  11655. default:
  11656. return false;
  11657. }
  11658. case GGML_OP_MUL_MAT:
  11659. case GGML_OP_MUL_MAT_ID:
  11660. {
  11661. ggml_type src0_type = op->src[0]->type;
  11662. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11663. const vk_device& device = ggml_vk_get_device(ctx->device);
  11664. if (op->op == GGML_OP_MUL_MAT_ID) {
  11665. if (!device->mul_mat_id_s[src0_type] && !device->mul_mat_id_m[src0_type] && !device->mul_mat_id_l[src0_type]) {
  11666. // If there's not enough shared memory for row_ids and the result tile, fallback to CPU
  11667. return false;
  11668. }
  11669. }
  11670. switch (src0_type) {
  11671. case GGML_TYPE_F32:
  11672. case GGML_TYPE_F16:
  11673. case GGML_TYPE_BF16:
  11674. case GGML_TYPE_Q4_0:
  11675. case GGML_TYPE_Q4_1:
  11676. case GGML_TYPE_Q5_0:
  11677. case GGML_TYPE_Q5_1:
  11678. case GGML_TYPE_Q8_0:
  11679. case GGML_TYPE_Q2_K:
  11680. case GGML_TYPE_Q3_K:
  11681. case GGML_TYPE_Q4_K:
  11682. case GGML_TYPE_Q5_K:
  11683. case GGML_TYPE_Q6_K:
  11684. case GGML_TYPE_IQ1_S:
  11685. case GGML_TYPE_IQ1_M:
  11686. case GGML_TYPE_IQ2_XXS:
  11687. case GGML_TYPE_IQ2_XS:
  11688. case GGML_TYPE_IQ2_S:
  11689. case GGML_TYPE_IQ3_XXS:
  11690. case GGML_TYPE_IQ3_S:
  11691. case GGML_TYPE_IQ4_XS:
  11692. case GGML_TYPE_IQ4_NL:
  11693. case GGML_TYPE_MXFP4:
  11694. break;
  11695. default:
  11696. return false;
  11697. }
  11698. struct ggml_tensor * a;
  11699. struct ggml_tensor * b;
  11700. if (op->op == GGML_OP_MUL_MAT) {
  11701. a = op->src[0];
  11702. b = op->src[1];
  11703. } else {
  11704. a = op->src[2];
  11705. b = op->src[1];
  11706. }
  11707. if (a->ne[3] != b->ne[3]) {
  11708. return false;
  11709. }
  11710. if (!(ggml_vk_dim01_contiguous(op->src[0]) || op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_BF16) ||
  11711. !(ggml_vk_dim01_contiguous(op->src[1]) || op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == GGML_TYPE_F16)) {
  11712. return false;
  11713. }
  11714. if (op->src[0]->type == GGML_TYPE_BF16 && op->src[1]->type == GGML_TYPE_F16) {
  11715. // We currently don't have a bf16 x f16 shader, or an fp16->bf16 copy shader.
  11716. // So don't support this combination for now.
  11717. return false;
  11718. }
  11719. return true;
  11720. }
  11721. case GGML_OP_FLASH_ATTN_EXT:
  11722. {
  11723. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11724. auto device = ggml_vk_get_device(ctx->device);
  11725. bool coopmat2 = device->coopmat2;
  11726. uint32_t HSK = op->src[1]->ne[0];
  11727. uint32_t HSV = op->src[2]->ne[0];
  11728. if ((HSK % 8) != 0 || (HSV % 8) != 0) {
  11729. return false;
  11730. }
  11731. if (op->src[4] && op->src[4]->type != GGML_TYPE_F32) {
  11732. return false;
  11733. }
  11734. if (op->src[0]->type != GGML_TYPE_F32) {
  11735. return false;
  11736. }
  11737. if (op->type != GGML_TYPE_F32) {
  11738. return false;
  11739. }
  11740. if (op->src[3] && op->src[3]->type != GGML_TYPE_F16) {
  11741. return false;
  11742. }
  11743. // It's straightforward to support different K/V dequant, but would
  11744. // significantly increase the number of pipelines
  11745. if (op->src[1]->type != op->src[2]->type) {
  11746. return false;
  11747. }
  11748. switch (op->src[1]->type) {
  11749. case GGML_TYPE_F16:
  11750. case GGML_TYPE_F32:
  11751. case GGML_TYPE_Q4_0:
  11752. case GGML_TYPE_Q8_0:
  11753. // supported in scalar and coopmat2 paths
  11754. break;
  11755. case GGML_TYPE_Q4_1:
  11756. case GGML_TYPE_Q5_0:
  11757. case GGML_TYPE_Q5_1:
  11758. // K dequants currently disabled because D dimension is rounded up to 256 and runs inefficiently
  11759. //case GGML_TYPE_Q2_K:
  11760. //case GGML_TYPE_Q3_K:
  11761. //case GGML_TYPE_Q4_K:
  11762. //case GGML_TYPE_Q5_K:
  11763. //case GGML_TYPE_Q6_K:
  11764. //case GGML_TYPE_IQ1_S:
  11765. //case GGML_TYPE_IQ1_M:
  11766. //case GGML_TYPE_IQ2_XXS:
  11767. //case GGML_TYPE_IQ2_XS:
  11768. //case GGML_TYPE_IQ2_S:
  11769. //case GGML_TYPE_IQ3_XXS:
  11770. //case GGML_TYPE_IQ3_S:
  11771. //case GGML_TYPE_IQ4_XS:
  11772. case GGML_TYPE_IQ4_NL:
  11773. // currently supported only in coopmat2 path
  11774. if (!coopmat2) {
  11775. return false;
  11776. }
  11777. break;
  11778. default:
  11779. return false;
  11780. }
  11781. if (!coopmat2 && !(device->subgroup_shuffle && device->subgroup_vote)) {
  11782. // scalar/coopmat1 FA uses subgroupShuffle/subgroupAll
  11783. return false;
  11784. }
  11785. return true;
  11786. }
  11787. case GGML_OP_GET_ROWS:
  11788. {
  11789. switch (op->src[0]->type) {
  11790. case GGML_TYPE_F32:
  11791. case GGML_TYPE_F16:
  11792. case GGML_TYPE_BF16:
  11793. case GGML_TYPE_Q4_0:
  11794. case GGML_TYPE_Q4_1:
  11795. case GGML_TYPE_Q5_0:
  11796. case GGML_TYPE_Q5_1:
  11797. case GGML_TYPE_Q8_0:
  11798. case GGML_TYPE_Q2_K:
  11799. case GGML_TYPE_Q3_K:
  11800. case GGML_TYPE_Q4_K:
  11801. case GGML_TYPE_Q5_K:
  11802. case GGML_TYPE_Q6_K:
  11803. case GGML_TYPE_IQ1_S:
  11804. case GGML_TYPE_IQ1_M:
  11805. case GGML_TYPE_IQ2_XXS:
  11806. case GGML_TYPE_IQ2_XS:
  11807. case GGML_TYPE_IQ2_S:
  11808. case GGML_TYPE_IQ3_XXS:
  11809. case GGML_TYPE_IQ3_S:
  11810. case GGML_TYPE_IQ4_XS:
  11811. case GGML_TYPE_IQ4_NL:
  11812. case GGML_TYPE_MXFP4:
  11813. return true;
  11814. default:
  11815. return false;
  11816. }
  11817. }
  11818. case GGML_OP_SET_ROWS:
  11819. {
  11820. switch (op->type) {
  11821. case GGML_TYPE_F32:
  11822. case GGML_TYPE_F16:
  11823. case GGML_TYPE_BF16:
  11824. case GGML_TYPE_Q4_0:
  11825. case GGML_TYPE_Q4_1:
  11826. case GGML_TYPE_Q5_0:
  11827. case GGML_TYPE_Q5_1:
  11828. case GGML_TYPE_Q8_0:
  11829. case GGML_TYPE_IQ4_NL:
  11830. return true;
  11831. default:
  11832. return false;
  11833. }
  11834. }
  11835. case GGML_OP_CONT:
  11836. case GGML_OP_CPY:
  11837. case GGML_OP_DUP:
  11838. {
  11839. ggml_type src0_type = op->src[0]->type;
  11840. ggml_type src1_type = op->src[1] != nullptr ? op->src[1]->type : src0_type;
  11841. if (src0_type == GGML_TYPE_F32) {
  11842. switch (src1_type) {
  11843. case GGML_TYPE_F32:
  11844. case GGML_TYPE_F16:
  11845. case GGML_TYPE_BF16:
  11846. case GGML_TYPE_Q4_0:
  11847. case GGML_TYPE_Q4_1:
  11848. case GGML_TYPE_Q5_0:
  11849. case GGML_TYPE_Q5_1:
  11850. case GGML_TYPE_Q8_0:
  11851. case GGML_TYPE_IQ4_NL:
  11852. return true;
  11853. default:
  11854. break;
  11855. }
  11856. }
  11857. if (src1_type == GGML_TYPE_F32) {
  11858. switch (src0_type) {
  11859. case GGML_TYPE_F16:
  11860. case GGML_TYPE_Q4_0:
  11861. case GGML_TYPE_Q4_1:
  11862. case GGML_TYPE_Q5_0:
  11863. case GGML_TYPE_Q5_1:
  11864. case GGML_TYPE_Q8_0:
  11865. case GGML_TYPE_IQ4_NL:
  11866. return true;
  11867. default:
  11868. break;
  11869. }
  11870. }
  11871. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  11872. return true;
  11873. }
  11874. if (
  11875. (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_I32) ||
  11876. (src0_type == GGML_TYPE_I32 && src1_type == GGML_TYPE_F32)
  11877. ) {
  11878. return true;
  11879. }
  11880. // We can handle copying from a type to the same type if it's
  11881. // either not quantized or is quantized and contiguous.
  11882. // We use f16 or f32 shaders to do the copy,
  11883. // so the type/block size must be a multiple of 4.
  11884. if (src0_type == src1_type &&
  11885. (!ggml_is_quantized(src0_type) || (ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op))) &&
  11886. (ggml_type_size(src0_type) % 2) == 0) {
  11887. return true;
  11888. }
  11889. return false;
  11890. }
  11891. case GGML_OP_REPEAT:
  11892. return ggml_type_size(op->type) == sizeof(float) && ggml_type_size(op->src[0]->type) == sizeof(float);
  11893. case GGML_OP_REPEAT_BACK:
  11894. return op->type == GGML_TYPE_F32 && op->src[0]->type == GGML_TYPE_F32;
  11895. case GGML_OP_ROPE:
  11896. case GGML_OP_ROPE_BACK:
  11897. case GGML_OP_NONE:
  11898. case GGML_OP_RESHAPE:
  11899. case GGML_OP_VIEW:
  11900. case GGML_OP_PERMUTE:
  11901. case GGML_OP_TRANSPOSE:
  11902. case GGML_OP_RMS_NORM:
  11903. return true;
  11904. case GGML_OP_NORM:
  11905. case GGML_OP_GROUP_NORM:
  11906. case GGML_OP_L2_NORM:
  11907. return ggml_is_contiguous(op->src[0]);
  11908. case GGML_OP_ADD:
  11909. case GGML_OP_SUB:
  11910. case GGML_OP_MUL:
  11911. case GGML_OP_DIV:
  11912. return (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) &&
  11913. (op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == GGML_TYPE_F16) &&
  11914. (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16);
  11915. case GGML_OP_ADD_ID:
  11916. return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32 && op->src[2]->type == GGML_TYPE_I32 &&
  11917. op->type == GGML_TYPE_F32;
  11918. case GGML_OP_SILU_BACK:
  11919. case GGML_OP_RMS_NORM_BACK:
  11920. return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
  11921. case GGML_OP_SQR:
  11922. case GGML_OP_SQRT:
  11923. case GGML_OP_SIN:
  11924. case GGML_OP_COS:
  11925. case GGML_OP_CLAMP:
  11926. return op->src[0]->type == GGML_TYPE_F32;
  11927. case GGML_OP_LEAKY_RELU:
  11928. case GGML_OP_OPT_STEP_ADAMW:
  11929. case GGML_OP_OPT_STEP_SGD:
  11930. return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
  11931. case GGML_OP_LOG:
  11932. case GGML_OP_TRI:
  11933. return (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) &&
  11934. op->type == op->src[0]->type;
  11935. case GGML_OP_ARGSORT:
  11936. {
  11937. if (!ggml_is_contiguous(op) || !ggml_is_contiguous(op->src[0])) {
  11938. return false;
  11939. }
  11940. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11941. auto device = ggml_vk_get_device(ctx->device);
  11942. // pipeline_argsort_large_f32 requires vulkan memory model.
  11943. if (device->vulkan_memory_model) {
  11944. return true;
  11945. } else {
  11946. return op->ne[0] <= (1 << device->max_workgroup_size_log2);
  11947. }
  11948. }
  11949. case GGML_OP_TOP_K:
  11950. {
  11951. if (!ggml_is_contiguous(op) || !ggml_is_contiguous(op->src[0])) {
  11952. return false;
  11953. }
  11954. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11955. auto device = ggml_vk_get_device(ctx->device);
  11956. // We could potentially support larger, using argsort to sort the
  11957. // whole thing. Not clear if this is needed.
  11958. uint32_t min_pipeline = (uint32_t)log2f(float(op->ne[0])) + 1;
  11959. if (min_pipeline >= num_topk_pipelines ||
  11960. !device->pipeline_topk_f32[min_pipeline]) {
  11961. return false;
  11962. }
  11963. }
  11964. return true;
  11965. case GGML_OP_UPSCALE:
  11966. case GGML_OP_ACC:
  11967. return op->src[0]->type == GGML_TYPE_F32;
  11968. case GGML_OP_CONCAT:
  11969. return ggml_type_size(op->src[0]->type) == ggml_type_size(GGML_TYPE_F32);
  11970. case GGML_OP_ADD1:
  11971. return (op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32)
  11972. || (op->src[0]->type == GGML_TYPE_F16 && op->src[1]->type == GGML_TYPE_F32)
  11973. || (op->src[0]->type == GGML_TYPE_F16 && op->src[1]->type == GGML_TYPE_F16);
  11974. case GGML_OP_ARANGE:
  11975. case GGML_OP_FILL:
  11976. return op->type == GGML_TYPE_F32;
  11977. case GGML_OP_SCALE:
  11978. return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
  11979. case GGML_OP_PAD:
  11980. case GGML_OP_ROLL:
  11981. return op->src[0]->type == GGML_TYPE_F32;
  11982. case GGML_OP_DIAG_MASK_INF:
  11983. return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
  11984. case GGML_OP_SOFT_MAX:
  11985. return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32
  11986. && (!op->src[1] || (op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == GGML_TYPE_F16));
  11987. case GGML_OP_SOFT_MAX_BACK:
  11988. return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32
  11989. && ggml_is_contiguous(op->src[1]) && op->src[1]->type == GGML_TYPE_F32;
  11990. case GGML_OP_SUM:
  11991. case GGML_OP_SUM_ROWS:
  11992. case GGML_OP_MEAN:
  11993. return op->src[0]->type == GGML_TYPE_F32 && ggml_is_contiguous_rows(op->src[0]);
  11994. case GGML_OP_CUMSUM:
  11995. {
  11996. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  11997. auto device = ggml_vk_get_device(ctx->device);
  11998. if (device->subgroup_arithmetic && device->subgroup_require_full_support) {
  11999. return op->src[0]->type == GGML_TYPE_F32 && ggml_is_contiguous_rows(op->src[0]);
  12000. }
  12001. return false;
  12002. }
  12003. case GGML_OP_SOLVE_TRI:
  12004. {
  12005. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  12006. const vk_device& device = ggml_vk_get_device(ctx->device);
  12007. if (op->type != GGML_TYPE_F32 || op->src[0]->type != GGML_TYPE_F32) {
  12008. return false;
  12009. }
  12010. const uint32_t N = op->src[0]->ne[0];
  12011. const uint32_t K = op->src[1]->ne[0];
  12012. // K dimension limited to workgroup size
  12013. if (K > 128) {
  12014. return false;
  12015. }
  12016. if (N * N * sizeof(float) + N * K * sizeof(float) > device->properties.limits.maxComputeSharedMemorySize) {
  12017. return false;
  12018. }
  12019. return true;
  12020. }
  12021. case GGML_OP_ARGMAX:
  12022. return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
  12023. case GGML_OP_COUNT_EQUAL:
  12024. return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_I32
  12025. && ggml_is_contiguous(op->src[1]) && op->src[1]->type == GGML_TYPE_I32;
  12026. case GGML_OP_IM2COL:
  12027. return ggml_is_contiguous(op->src[1])
  12028. && op->src[1]->type == GGML_TYPE_F32
  12029. && (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16);
  12030. case GGML_OP_IM2COL_3D:
  12031. return op->src[1]->type == GGML_TYPE_F32
  12032. && (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16);
  12033. case GGML_OP_TIMESTEP_EMBEDDING:
  12034. return op->src[0]->type == GGML_TYPE_F32;
  12035. case GGML_OP_CONV_2D_DW:
  12036. return (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16)
  12037. && op->src[1]->type == GGML_TYPE_F32;
  12038. case GGML_OP_POOL_2D:
  12039. return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
  12040. case GGML_OP_RWKV_WKV6:
  12041. case GGML_OP_RWKV_WKV7:
  12042. return true; // all inputs are contiguous, see ggml.c
  12043. case GGML_OP_SSM_SCAN:
  12044. {
  12045. for (int i = 0; i < 6; i++) {
  12046. if (op->src[i] && ggml_is_quantized(op->src[i]->type)) {
  12047. return false;
  12048. }
  12049. }
  12050. if (op->src[6] && op->src[6]->type != GGML_TYPE_I32) {
  12051. return false;
  12052. }
  12053. if (op->src[0]->type != GGML_TYPE_F32 || op->type != GGML_TYPE_F32) {
  12054. return false;
  12055. }
  12056. const uint32_t d_state = op->src[0]->ne[0];
  12057. const uint32_t head_dim = op->src[0]->ne[1];
  12058. bool is_mamba2 = (op->src[3] && op->src[3]->nb[1] == sizeof(float));
  12059. if (!is_mamba2) {
  12060. return false;
  12061. }
  12062. if ((d_state != 128 && d_state != 256) || head_dim % 16 != 0) {
  12063. return false;
  12064. }
  12065. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  12066. const vk_device& device = ggml_vk_get_device(ctx->device);
  12067. const uint32_t SPLIT_H = 16;
  12068. size_t stateC_size = SPLIT_H * d_state * sizeof(float);
  12069. if (stateC_size > device->properties.limits.maxComputeSharedMemorySize) {
  12070. return false;
  12071. }
  12072. return true;
  12073. }
  12074. case GGML_OP_SSM_CONV:
  12075. return op->src[0]->type == GGML_TYPE_F32;
  12076. case GGML_OP_CONV_TRANSPOSE_1D:
  12077. return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32;
  12078. case GGML_OP_CONV_2D:
  12079. case GGML_OP_CONV_TRANSPOSE_2D:
  12080. {
  12081. // Op is disabled for Apple because it segfaults at pipeline create time on MoltenVK
  12082. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  12083. const vk_device& device = ggml_vk_get_device(ctx->device);
  12084. if (op->op == GGML_OP_CONV_TRANSPOSE_2D &&
  12085. device->properties.limits.maxPushConstantsSize < sizeof(vk_op_conv_transpose_2d_push_constants)) {
  12086. return false;
  12087. }
  12088. // Channel-contiguous format is not supported yet.
  12089. return ((op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) &&
  12090. op->src[1]->type == GGML_TYPE_F32 &&
  12091. op->type == GGML_TYPE_F32 &&
  12092. ggml_is_contiguous(op->src[0]) &&
  12093. ggml_is_contiguous(op->src[1]) &&
  12094. ggml_is_contiguous(op));
  12095. }
  12096. default:
  12097. return false;
  12098. }
  12099. UNUSED(dev);
  12100. }
  12101. static bool ggml_backend_vk_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
  12102. if (buft->iface.get_name != ggml_backend_vk_buffer_type_name) {
  12103. return false;
  12104. }
  12105. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  12106. ggml_backend_vk_buffer_type_context * buft_ctx = (ggml_backend_vk_buffer_type_context *)buft->context;
  12107. return buft_ctx->device->idx == ctx->device;
  12108. }
  12109. static bool ggml_backend_vk_device_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
  12110. const int min_batch_size = 32;
  12111. return (op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS) ||
  12112. (op->ne[2] >= min_batch_size && op->op == GGML_OP_MUL_MAT_ID);
  12113. UNUSED(dev);
  12114. }
  12115. static const struct ggml_backend_device_i ggml_backend_vk_device_i = {
  12116. /* .get_name = */ ggml_backend_vk_device_get_name,
  12117. /* .get_description = */ ggml_backend_vk_device_get_description,
  12118. /* .get_memory = */ ggml_backend_vk_device_get_memory,
  12119. /* .get_type = */ ggml_backend_vk_device_get_type,
  12120. /* .get_props = */ ggml_backend_vk_device_get_props,
  12121. /* .init_backend = */ ggml_backend_vk_device_init,
  12122. /* .get_buffer_type = */ ggml_backend_vk_device_get_buffer_type,
  12123. /* .get_host_buffer_type = */ ggml_backend_vk_device_get_host_buffer_type,
  12124. /* .buffer_from_host_ptr = */ NULL,
  12125. /* .supports_op = */ ggml_backend_vk_device_supports_op,
  12126. /* .supports_buft = */ ggml_backend_vk_device_supports_buft,
  12127. /* .offload_op = */ ggml_backend_vk_device_offload_op,
  12128. /* .event_new = */ NULL,
  12129. /* .event_free = */ NULL,
  12130. /* .event_synchronize = */ NULL,
  12131. };
  12132. static const char * ggml_backend_vk_reg_get_name(ggml_backend_reg_t reg) {
  12133. UNUSED(reg);
  12134. return GGML_VK_NAME;
  12135. }
  12136. static size_t ggml_backend_vk_reg_get_device_count(ggml_backend_reg_t reg) {
  12137. UNUSED(reg);
  12138. return ggml_backend_vk_get_device_count();
  12139. }
  12140. static ggml_backend_dev_t ggml_backend_vk_reg_get_device(ggml_backend_reg_t reg, size_t device) {
  12141. static std::vector<ggml_backend_dev_t> devices;
  12142. static bool initialized = false;
  12143. {
  12144. static std::mutex mutex;
  12145. std::lock_guard<std::mutex> lock(mutex);
  12146. if (!initialized) {
  12147. for (int i = 0; i < ggml_backend_vk_get_device_count(); i++) {
  12148. ggml_backend_vk_device_context * ctx = new ggml_backend_vk_device_context;
  12149. char desc[256];
  12150. ggml_backend_vk_get_device_description(i, desc, sizeof(desc));
  12151. ctx->device = i;
  12152. ctx->name = GGML_VK_NAME + std::to_string(i);
  12153. ctx->description = desc;
  12154. ctx->is_integrated_gpu = ggml_backend_vk_get_device_type(i) == vk::PhysicalDeviceType::eIntegratedGpu;
  12155. ctx->pci_bus_id = ggml_backend_vk_get_device_pci_id(i);
  12156. devices.push_back(new ggml_backend_device {
  12157. /* .iface = */ ggml_backend_vk_device_i,
  12158. /* .reg = */ reg,
  12159. /* .context = */ ctx,
  12160. });
  12161. }
  12162. initialized = true;
  12163. }
  12164. }
  12165. GGML_ASSERT(device < devices.size());
  12166. return devices[device];
  12167. }
  12168. static const struct ggml_backend_reg_i ggml_backend_vk_reg_i = {
  12169. /* .get_name = */ ggml_backend_vk_reg_get_name,
  12170. /* .get_device_count = */ ggml_backend_vk_reg_get_device_count,
  12171. /* .get_device = */ ggml_backend_vk_reg_get_device,
  12172. /* .get_proc_address = */ NULL,
  12173. };
  12174. ggml_backend_reg_t ggml_backend_vk_reg() {
  12175. static ggml_backend_reg reg = {
  12176. /* .api_version = */ GGML_BACKEND_API_VERSION,
  12177. /* .iface = */ ggml_backend_vk_reg_i,
  12178. /* .context = */ nullptr,
  12179. };
  12180. try {
  12181. ggml_vk_instance_init();
  12182. return &reg;
  12183. } catch (const vk::SystemError& e) {
  12184. VK_LOG_DEBUG("ggml_backend_vk_reg() -> Error: System error: " << e.what());
  12185. return nullptr;
  12186. } catch (const std::exception &e) {
  12187. VK_LOG_DEBUG("ggml_backend_vk_reg() -> Error: " << e.what());
  12188. return nullptr;
  12189. } catch (...) {
  12190. VK_LOG_DEBUG("ggml_backend_vk_reg() -> Error: unknown exception during Vulkan init");
  12191. return nullptr;
  12192. }
  12193. }
  12194. // Extension availability
  12195. static bool ggml_vk_instance_validation_ext_available() {
  12196. #ifdef GGML_VULKAN_VALIDATE
  12197. // Check if validation layer provides the extension
  12198. const std::string layer_name = "VK_LAYER_KHRONOS_validation";
  12199. for (const auto& layer : vk::enumerateInstanceLayerProperties()) {
  12200. if (layer_name == layer.layerName.data()) {
  12201. for (const auto& ext : vk::enumerateInstanceExtensionProperties(layer_name)) {
  12202. if (strcmp("VK_EXT_validation_features", ext.extensionName.data()) == 0) {
  12203. return true;
  12204. }
  12205. }
  12206. }
  12207. }
  12208. std::cerr << "ggml_vulkan: WARNING: Validation layer or layer extension VK_EXT_validation_features not found." << std::endl;
  12209. #endif
  12210. return false;
  12211. }
  12212. static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions) {
  12213. #ifdef __APPLE__
  12214. // Check for portability enumeration extension for MoltenVK support
  12215. for (const auto& properties : instance_extensions) {
  12216. if (strcmp("VK_KHR_portability_enumeration", properties.extensionName) == 0) {
  12217. return true;
  12218. }
  12219. }
  12220. std::cerr << "ggml_vulkan: WARNING: Instance extension VK_KHR_portability_enumeration not found." << std::endl;
  12221. #endif
  12222. return false;
  12223. UNUSED(instance_extensions);
  12224. }
  12225. // Extension availability
  12226. static bool ggml_vk_instance_debug_utils_ext_available(
  12227. const std::vector<vk::ExtensionProperties> & instance_extensions) {
  12228. // Check for portability enumeration extension for MoltenVK support
  12229. for (const auto & properties : instance_extensions) {
  12230. if (strcmp("VK_EXT_debug_utils", properties.extensionName) == 0) {
  12231. return true;
  12232. }
  12233. }
  12234. std::cerr << "ggml_vulkan: WARNING: Instance extension VK_EXT_debug_utils not found." << std::endl;
  12235. return false;
  12236. UNUSED(instance_extensions);
  12237. }
  12238. static bool ggml_vk_device_is_supported(const vk::PhysicalDevice & vkdev) {
  12239. VkPhysicalDeviceFeatures2 device_features2;
  12240. device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
  12241. VkPhysicalDeviceVulkan11Features vk11_features;
  12242. vk11_features.pNext = nullptr;
  12243. vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
  12244. device_features2.pNext = &vk11_features;
  12245. vkGetPhysicalDeviceFeatures2(vkdev, &device_features2);
  12246. return vk11_features.storageBuffer16BitAccess;
  12247. }
  12248. static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props, vk_device_architecture arch) {
  12249. switch (props.vendorID) {
  12250. case VK_VENDOR_ID_INTEL:
  12251. // Only allowing Xe2 GPU at the moment since Xe2 GPU can gain significant performance boost,
  12252. // while some older hardware (ex. Arc A770) has performance regressions
  12253. return arch == vk_device_architecture::INTEL_XE2;
  12254. case VK_VENDOR_ID_AMD:
  12255. if (driver_props.driverID == vk::DriverId::eAmdProprietary || driver_props.driverID == vk::DriverId::eAmdOpenSource) {
  12256. // Workaround for AMD proprietary driver reporting support on all GPUs
  12257. return arch == vk_device_architecture::AMD_RDNA3;
  12258. }
  12259. return true;
  12260. default:
  12261. return true;
  12262. }
  12263. }
  12264. // checks
  12265. #ifdef GGML_VULKAN_CHECK_RESULTS
  12266. static void ggml_vk_print_graph_origin(const ggml_tensor * tensor, std::vector<const ggml_tensor *>& done, int level = 0) {
  12267. if (std::find(done.begin(), done.end(), tensor) != done.end() || level > 10) {
  12268. return;
  12269. }
  12270. for (int j = 0; j < level; j++) {
  12271. std::cerr << " ";
  12272. }
  12273. std::cerr << ggml_op_name(tensor->op) << " gpu=" << (tensor->extra != nullptr) << std::endl;
  12274. done.push_back(tensor);
  12275. for (int i = 0; i < GGML_MAX_SRC; i++) {
  12276. if (tensor->src[i] != nullptr) {
  12277. ggml_vk_print_graph_origin(tensor->src[i], done, level + 1);
  12278. }
  12279. }
  12280. }
  12281. static void ggml_vk_print_tensor_area(const ggml_tensor * tensor, const void * data, int i0, int i1, int i2, int i3) {
  12282. if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16 && tensor->type != GGML_TYPE_I32) {
  12283. return;
  12284. }
  12285. i0 = std::max(i0, 5);
  12286. i1 = std::max(i1, 5);
  12287. i2 = std::max(i2, 0);
  12288. i3 = std::max(i3, 0);
  12289. fprintf(stderr, " ");
  12290. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  12291. fprintf(stderr, "%7d ", idx1);
  12292. }
  12293. fprintf(stderr, "\n");
  12294. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  12295. fprintf(stderr, "%7d: ", idx0);
  12296. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  12297. if (idx0 >= 0 && idx0 < tensor->ne[0] && idx1 >= 0 && idx1 < tensor->ne[1] && i2 >= 0 && i2 < tensor->ne[2] && i3 >= 0 && i3 < tensor->ne[3]) {
  12298. float val;
  12299. if (tensor->type == GGML_TYPE_F32) {
  12300. val = *(const float *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  12301. } else if (tensor->type == GGML_TYPE_F16) {
  12302. val = ggml_fp16_to_fp32(*(const ggml_fp16_t *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]));
  12303. } else if (tensor->type == GGML_TYPE_I32) {
  12304. val = *(const int32_t *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  12305. } else {
  12306. GGML_ABORT("fatal error");
  12307. }
  12308. fprintf(stderr, "% 7.2f ", val);
  12309. } else {
  12310. fprintf(stderr, " ");
  12311. }
  12312. }
  12313. fprintf(stderr, "\n");
  12314. }
  12315. }
  12316. static void ggml_vk_print_tensor(const ggml_tensor * tensor, const char * name) {
  12317. void * tensor_data = tensor->data;
  12318. const bool is_gpu = tensor->buffer != nullptr && ggml_backend_buffer_is_vk(tensor->buffer);
  12319. if (is_gpu) {
  12320. const size_t tensor_size = ggml_nbytes(tensor);
  12321. tensor_data = malloc(tensor_size);
  12322. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
  12323. vk_buffer buffer_gpu = buf_ctx->dev_buffer;
  12324. ggml_vk_buffer_read(buffer_gpu, vk_tensor_offset(tensor) + tensor->view_offs, tensor_data, tensor_size);
  12325. }
  12326. std::cerr << "TENSOR CHECK " << name << " (" << tensor->name << "): " << ggml_op_name(tensor->op) << std::endl;
  12327. std::cerr << "tensor=" << tensor << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << std::endl;
  12328. if (tensor->src[0] != nullptr) {
  12329. std::cerr << "tensor->src[0]=" << tensor->src[0] << " name=" << tensor->src[0]->name << " op=" << ggml_op_name(tensor->src[0]->op) << " type=" << ggml_type_name(tensor->src[0]->type) << " ne0=" << tensor->src[0]->ne[0] << " nb0=" << tensor->src[0]->nb[0] << " ne1=" << tensor->src[0]->ne[1] << " nb1=" << tensor->src[0]->nb[1] << " ne2=" << tensor->src[0]->ne[2] << " nb2=" << tensor->src[0]->nb[2] << " ne3=" << tensor->src[0]->ne[3] << " nb3=" << tensor->src[0]->nb[3] << std::endl;
  12330. }
  12331. if (tensor->src[1] != nullptr) {
  12332. std::cerr << "tensor->src[1]=" << tensor->src[1] << " name=" << tensor->src[1]->name << " op=" << ggml_op_name(tensor->src[1]->op) << " type=" << ggml_type_name(tensor->src[1]->type) << " ne0=" << tensor->src[1]->ne[0] << " nb0=" << tensor->src[1]->nb[0] << " ne1=" << tensor->src[1]->ne[1] << " nb1=" << tensor->src[1]->nb[1] << " ne2=" << tensor->src[1]->ne[2] << " nb2=" << tensor->src[1]->nb[2] << " ne3=" << tensor->src[1]->ne[3] << " nb3=" << tensor->src[1]->nb[3] << std::endl;
  12333. }
  12334. std::cerr << std::endl << "Result:" << std::endl;
  12335. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 0, 0);
  12336. std::cerr << std::endl;
  12337. std::vector<const ggml_tensor *> done;
  12338. ggml_vk_print_graph_origin(tensor, done);
  12339. if (is_gpu) {
  12340. free(tensor_data);
  12341. }
  12342. }
  12343. void * comp_result;
  12344. size_t comp_size;
  12345. size_t comp_nb[GGML_MAX_DIMS];
  12346. size_t check_counter = 0;
  12347. static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, int tensor_idx) {
  12348. ggml_tensor * tensor = cgraph->nodes[tensor_idx + ctx->num_additional_fused_ops];
  12349. if (tensor->op == GGML_OP_TRANSPOSE || tensor->op == GGML_OP_SET_ROWS) {
  12350. return;
  12351. }
  12352. check_counter++;
  12353. if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {
  12354. return;
  12355. }
  12356. VK_LOG_DEBUG("ggml_vk_check_results_0(" << tensor->name << ")");
  12357. struct ggml_init_params iparams = {
  12358. /*.mem_size =*/ 2ul*1024ul*1024ul*1024ul,
  12359. /*.mem_buffer =*/ NULL,
  12360. /*.no_alloc =*/ false,
  12361. };
  12362. struct ggml_context * ggml_ctx = ggml_init(iparams);
  12363. std::array<struct ggml_tensor *, GGML_MAX_SRC> src_clone = {nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr};
  12364. const char * srci_name[GGML_MAX_SRC] = {"src0", "src1", "src2", "src3", "src4", "src5", "src6", "src7", "src8", "src9"};
  12365. std::map<ggml_tensor *, ggml_tensor *> cloned_tensors;
  12366. std::vector<void *> cloned_mallocs;
  12367. struct ggml_tensor * tensor_clone = nullptr;
  12368. for (int f = 0; f < ctx->num_additional_fused_ops + 1; ++f) {
  12369. tensor = cgraph->nodes[tensor_idx + f];
  12370. for (int i = 0; i < GGML_MAX_SRC; i++) {
  12371. ggml_tensor * srci = tensor->src[i];
  12372. if (srci == nullptr) {
  12373. continue;
  12374. }
  12375. // If a src tensor has been cloned, use that one
  12376. auto it = cloned_tensors.find(srci);
  12377. if (it != cloned_tensors.end()) {
  12378. src_clone[i] = it->second;
  12379. continue;
  12380. }
  12381. ggml_tensor * srci_clone = ggml_dup_tensor(ggml_ctx, srci);
  12382. size_t srci_size = ggml_nbytes(srci);
  12383. src_clone[i] = srci_clone;
  12384. void *src_buffer = malloc(srci_size);
  12385. cloned_mallocs.push_back(src_buffer);
  12386. srci_clone->data = src_buffer;
  12387. if (ggml_backend_buffer_is_host(srci->buffer)) {
  12388. memcpy(srci_clone->data, srci->data, srci_size);
  12389. memcpy(srci_clone->nb, srci->nb, sizeof(size_t) * GGML_MAX_DIMS);
  12390. } else if (ggml_backend_buffer_is_vk(srci->buffer)) {
  12391. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)srci->buffer->context;
  12392. vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
  12393. uint64_t offset = vk_tensor_offset(srci) + srci->view_offs;
  12394. if (!ggml_is_contiguous(srci) && ggml_vk_dim01_contiguous(srci)) {
  12395. for (int i3 = 0; i3 < srci->ne[3]; i3++) {
  12396. for (int i2 = 0; i2 < srci->ne[2]; i2++) {
  12397. const int idx = i3*srci->ne[2] + i2;
  12398. ggml_vk_buffer_read(buffer_gpu, offset + idx * srci->nb[2], ((char *)srci_clone->data + idx * srci_clone->nb[2]), srci->ne[1] * srci->nb[1]);
  12399. }
  12400. }
  12401. srci_clone->nb[0] = srci->nb[0];
  12402. srci_clone->nb[1] = srci->nb[1];
  12403. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  12404. srci_clone->nb[i] = srci_clone->nb[i - 1]*srci_clone->ne[i - 1];
  12405. }
  12406. } else {
  12407. if (offset + srci_size >= buffer_gpu->size) {
  12408. srci_size = buffer_gpu->size - offset;
  12409. }
  12410. ggml_vk_buffer_read(buffer_gpu, offset, srci_clone->data, srci_size);
  12411. memcpy(srci_clone->nb, srci->nb, sizeof(size_t) * GGML_MAX_DIMS);
  12412. }
  12413. } else {
  12414. GGML_ABORT("fatal error");
  12415. }
  12416. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  12417. ggml_vk_print_tensor(srci, srci_name[i]);
  12418. }
  12419. }
  12420. if (tensor->op == GGML_OP_FLASH_ATTN_EXT) {
  12421. const float * params = (const float *)tensor->op_params;
  12422. tensor_clone = ggml_flash_attn_ext(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], src_clone[3], params[0], params[1], params[2]);
  12423. if (src_clone[4]) {
  12424. ggml_flash_attn_ext_add_sinks(tensor_clone, src_clone[4]);
  12425. }
  12426. } else if (tensor->op == GGML_OP_MUL_MAT) {
  12427. tensor_clone = ggml_mul_mat(ggml_ctx, src_clone[0], src_clone[1]);
  12428. } else if (tensor->op == GGML_OP_MUL_MAT_ID) {
  12429. tensor_clone = ggml_mul_mat_id(ggml_ctx, src_clone[0], src_clone[1], src_clone[2]);
  12430. } else if (tensor->op == GGML_OP_SUB) {
  12431. tensor_clone = ggml_sub(ggml_ctx, src_clone[0], src_clone[1]);
  12432. } else if (tensor->op == GGML_OP_MUL) {
  12433. tensor_clone = ggml_mul(ggml_ctx, src_clone[0], src_clone[1]);
  12434. } else if (tensor->op == GGML_OP_DIV) {
  12435. tensor_clone = ggml_div(ggml_ctx, src_clone[0], src_clone[1]);
  12436. } else if (tensor->op == GGML_OP_CONCAT) {
  12437. tensor_clone = ggml_concat(ggml_ctx, src_clone[0], src_clone[1], *(int *)tensor->op_params);
  12438. } else if (tensor->op == GGML_OP_UPSCALE) {
  12439. tensor_clone = ggml_interpolate(ggml_ctx, src_clone[0], tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], (ggml_scale_mode) tensor->op_params[0]);
  12440. } else if (tensor->op == GGML_OP_SCALE) {
  12441. const float * params = (const float *)tensor->op_params;
  12442. tensor_clone = ggml_scale_bias(ggml_ctx, src_clone[0], params[0], params[1]);
  12443. } else if (tensor->op == GGML_OP_ADD1) {
  12444. tensor_clone = ggml_add1(ggml_ctx, src_clone[0], src_clone[1]);
  12445. } else if (tensor->op == GGML_OP_ARANGE) {
  12446. const float start = ggml_get_op_params_f32(tensor, 0);
  12447. const float stop = ggml_get_op_params_f32(tensor, 1);
  12448. const float step = ggml_get_op_params_f32(tensor, 2);
  12449. tensor_clone = ggml_arange(ggml_ctx, start, stop, step);
  12450. } else if (tensor->op == GGML_OP_FILL) {
  12451. const float value = ggml_get_op_params_f32(tensor, 0);
  12452. tensor_clone = ggml_fill(ggml_ctx, tensor_clone, value);
  12453. } else if (tensor->op == GGML_OP_SQR) {
  12454. tensor_clone = ggml_sqr(ggml_ctx, src_clone[0]);
  12455. } else if (tensor->op == GGML_OP_SQRT) {
  12456. tensor_clone = ggml_sqrt(ggml_ctx, src_clone[0]);
  12457. } else if (tensor->op == GGML_OP_SIN) {
  12458. tensor_clone = ggml_sin(ggml_ctx, src_clone[0]);
  12459. } else if (tensor->op == GGML_OP_COS) {
  12460. tensor_clone = ggml_cos(ggml_ctx, src_clone[0]);
  12461. } else if (tensor->op == GGML_OP_LOG) {
  12462. tensor_clone = ggml_log(ggml_ctx, src_clone[0]);
  12463. } else if (tensor->op == GGML_OP_TRI) {
  12464. tensor_clone = ggml_tri(ggml_ctx, src_clone[0], ggml_get_op_params_i32(tensor, 0));
  12465. } else if (tensor->op == GGML_OP_CLAMP) {
  12466. const float * params = (const float *)tensor->op_params;
  12467. tensor_clone = ggml_clamp(ggml_ctx, src_clone[0], params[0], params[1]);
  12468. } else if (tensor->op == GGML_OP_PAD) {
  12469. tensor_clone = ggml_pad_ext(ggml_ctx, src_clone[0], tensor->op_params[0], tensor->op_params[1], tensor->op_params[2], tensor->op_params[3],
  12470. tensor->op_params[4], tensor->op_params[5], tensor->op_params[6], tensor->op_params[7]);
  12471. } else if (tensor->op == GGML_OP_REPEAT) {
  12472. tensor_clone = ggml_repeat(ggml_ctx, src_clone[0], tensor);
  12473. } else if (tensor->op == GGML_OP_REPEAT_BACK) {
  12474. tensor_clone = ggml_repeat_back(ggml_ctx, src_clone[0], tensor);
  12475. } else if (tensor->op == GGML_OP_ADD) {
  12476. tensor_clone = ggml_add(ggml_ctx, src_clone[0], src_clone[1]);
  12477. } else if (tensor->op == GGML_OP_ACC) {
  12478. tensor_clone = ggml_acc(ggml_ctx, src_clone[0], src_clone[1], tensor->op_params[0], tensor->op_params[1], tensor->op_params[2], tensor->op_params[3]);
  12479. } else if (tensor->op == GGML_OP_NORM) {
  12480. tensor_clone = ggml_norm(ggml_ctx, src_clone[0], *(float *)tensor->op_params);
  12481. } else if (tensor->op == GGML_OP_GROUP_NORM) {
  12482. const float * float_params = (const float *)tensor->op_params;
  12483. tensor_clone = ggml_group_norm(ggml_ctx, src_clone[0], tensor->op_params[0], float_params[1]);
  12484. } else if (tensor->op == GGML_OP_RMS_NORM) {
  12485. tensor_clone = ggml_rms_norm(ggml_ctx, src_clone[0], *(float *)tensor->op_params);
  12486. } else if (tensor->op == GGML_OP_RMS_NORM_BACK) {
  12487. const float eps = ((float *) tensor->op_params)[0];
  12488. tensor_clone = ggml_rms_norm_back(ggml_ctx, src_clone[0], src_clone[1], eps);
  12489. } else if (tensor->op == GGML_OP_SILU_BACK) {
  12490. tensor_clone = ggml_silu_back(ggml_ctx, src_clone[0], src_clone[1]);
  12491. } else if (tensor->op == GGML_OP_L2_NORM) {
  12492. const float eps = ((float *) tensor->op_params)[0];
  12493. tensor_clone = ggml_l2_norm(ggml_ctx, src_clone[0], eps);
  12494. } else if (tensor->op == GGML_OP_SOFT_MAX) {
  12495. if (tensor->src[1] != nullptr) {
  12496. const float * params = (const float *)tensor->op_params;
  12497. tensor_clone = ggml_soft_max_ext(ggml_ctx, src_clone[0], src_clone[1], params[0], params[1]);
  12498. } else {
  12499. tensor_clone = ggml_soft_max(ggml_ctx, src_clone[0]);
  12500. }
  12501. } else if (tensor->op == GGML_OP_SOFT_MAX_BACK) {
  12502. tensor_clone = ggml_soft_max_ext_back(ggml_ctx, src_clone[0], src_clone[1], ((float *)tensor->op_params)[0], ((float *)tensor->op_params)[1]);
  12503. } else if (tensor->op == GGML_OP_DIAG_MASK_INF) {
  12504. tensor_clone = ggml_diag_mask_inf(ggml_ctx, src_clone[0], tensor->op_params[0]);
  12505. } else if (tensor->op == GGML_OP_ROPE || tensor->op == GGML_OP_ROPE_BACK) {
  12506. const int n_dims = ((int32_t *) tensor->op_params)[1];
  12507. const int mode = ((int32_t *) tensor->op_params)[2];
  12508. //const int n_ctx_ggml = ((int32_t *) tensor->op_params)[3];
  12509. const int n_ctx_orig_ggml = ((int32_t *) tensor->op_params)[4];
  12510. const float freq_base = ((float *) tensor->op_params)[5];
  12511. const float freq_scale = ((float *) tensor->op_params)[6];
  12512. const float ext_factor = ((float *) tensor->op_params)[7];
  12513. const float attn_factor = ((float *) tensor->op_params)[8];
  12514. const float beta_fast = ((float *) tensor->op_params)[9];
  12515. const float beta_slow = ((float *) tensor->op_params)[10];
  12516. if (mode & GGML_ROPE_TYPE_MROPE) {
  12517. int32_t *sections = ((int32_t *) tensor->op_params) + 11;
  12518. if (tensor->op == GGML_OP_ROPE) {
  12519. tensor_clone = ggml_rope_multi(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], n_dims, sections, mode, n_ctx_orig_ggml, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
  12520. } else {
  12521. tensor_clone = ggml_rope_multi_back(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], n_dims, sections, mode, n_ctx_orig_ggml, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
  12522. }
  12523. } else {
  12524. if (tensor->op == GGML_OP_ROPE) {
  12525. tensor_clone = ggml_rope_ext(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], n_dims, mode, n_ctx_orig_ggml, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
  12526. } else {
  12527. tensor_clone = ggml_rope_ext_back(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], n_dims, mode, n_ctx_orig_ggml, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
  12528. }
  12529. }
  12530. } else if (tensor->op == GGML_OP_UNARY) {
  12531. switch (ggml_get_unary_op(tensor)) {
  12532. case GGML_UNARY_OP_EXP:
  12533. tensor_clone = ggml_exp(ggml_ctx, src_clone[0]);
  12534. break;
  12535. case GGML_UNARY_OP_SILU:
  12536. tensor_clone = ggml_silu(ggml_ctx, src_clone[0]);
  12537. break;
  12538. case GGML_UNARY_OP_GELU:
  12539. tensor_clone = ggml_gelu(ggml_ctx, src_clone[0]);
  12540. break;
  12541. case GGML_UNARY_OP_GELU_ERF:
  12542. tensor_clone = ggml_gelu_erf(ggml_ctx, src_clone[0]);
  12543. break;
  12544. case GGML_UNARY_OP_GELU_QUICK:
  12545. tensor_clone = ggml_gelu_quick(ggml_ctx, src_clone[0]);
  12546. break;
  12547. case GGML_UNARY_OP_RELU:
  12548. tensor_clone = ggml_relu(ggml_ctx, src_clone[0]);
  12549. break;
  12550. case GGML_UNARY_OP_NEG:
  12551. tensor_clone = ggml_neg(ggml_ctx, src_clone[0]);
  12552. break;
  12553. case GGML_UNARY_OP_TANH:
  12554. tensor_clone = ggml_tanh(ggml_ctx, src_clone[0]);
  12555. break;
  12556. case GGML_UNARY_OP_SIGMOID:
  12557. tensor_clone = ggml_sigmoid(ggml_ctx, src_clone[0]);
  12558. break;
  12559. case GGML_UNARY_OP_HARDSIGMOID:
  12560. tensor_clone = ggml_hardsigmoid(ggml_ctx, src_clone[0]);
  12561. break;
  12562. case GGML_UNARY_OP_HARDSWISH:
  12563. tensor_clone = ggml_hardswish(ggml_ctx, src_clone[0]);
  12564. break;
  12565. case GGML_UNARY_OP_ABS:
  12566. tensor_clone = ggml_abs(ggml_ctx, src_clone[0]);
  12567. break;
  12568. case GGML_UNARY_OP_SOFTPLUS:
  12569. tensor_clone = ggml_softplus(ggml_ctx, src_clone[0]);
  12570. break;
  12571. case GGML_UNARY_OP_STEP:
  12572. tensor_clone = ggml_step(ggml_ctx, src_clone[0]);
  12573. break;
  12574. case GGML_UNARY_OP_ROUND:
  12575. tensor_clone = ggml_round(ggml_ctx, src_clone[0]);
  12576. break;
  12577. case GGML_UNARY_OP_CEIL:
  12578. tensor_clone = ggml_ceil(ggml_ctx, src_clone[0]);
  12579. break;
  12580. case GGML_UNARY_OP_FLOOR:
  12581. tensor_clone = ggml_floor(ggml_ctx, src_clone[0]);
  12582. break;
  12583. case GGML_UNARY_OP_TRUNC:
  12584. tensor_clone = ggml_trunc(ggml_ctx, src_clone[0]);
  12585. break;
  12586. default:
  12587. std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
  12588. GGML_ABORT("fatal error");
  12589. }
  12590. } else if (tensor->op == GGML_OP_GLU) {
  12591. if (src_clone[1] == nullptr) {
  12592. tensor_clone = ggml_glu(ggml_ctx, src_clone[0], (ggml_glu_op) tensor->op_params[0], tensor->op_params[1]);
  12593. } else {
  12594. tensor_clone = ggml_glu_split(ggml_ctx, src_clone[0], src_clone[1], (ggml_glu_op) tensor->op_params[0]);
  12595. }
  12596. ggml_set_op_params_i32(tensor_clone, 2, ggml_get_op_params_i32(tensor, 2));
  12597. ggml_set_op_params_i32(tensor_clone, 3, ggml_get_op_params_i32(tensor, 3));
  12598. } else if (tensor->op == GGML_OP_CPY || tensor->op == GGML_OP_DUP) {
  12599. if (tensor->src[1] == nullptr) {
  12600. tensor_clone = ggml_dup(ggml_ctx, src_clone[0]);
  12601. tensor_clone->type = tensor->type;
  12602. } else {
  12603. tensor_clone = ggml_cpy(ggml_ctx, src_clone[0], src_clone[1]);
  12604. }
  12605. } else if (tensor->op == GGML_OP_CONT) {
  12606. tensor_clone = ggml_cont_4d(ggml_ctx, src_clone[0], tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  12607. } else if (tensor->op == GGML_OP_RESHAPE) {
  12608. tensor_clone = ggml_reshape_4d(ggml_ctx, src_clone[0], tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  12609. } else if (tensor->op == GGML_OP_VIEW) {
  12610. tensor_clone = ggml_view_4d(ggml_ctx, src_clone[0], tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], tensor->nb[1], tensor->nb[2], tensor->nb[3], ((int32_t *) tensor->op_params)[0]);
  12611. } else if (tensor->op == GGML_OP_PERMUTE) {
  12612. int32_t * params = (int32_t *)tensor->op_params;
  12613. tensor_clone = ggml_permute(ggml_ctx, src_clone[0], params[0], params[1], params[2], params[3]);
  12614. } else if (tensor->op == GGML_OP_TRANSPOSE) {
  12615. tensor_clone = ggml_transpose(ggml_ctx, src_clone[0]);
  12616. } else if (tensor->op == GGML_OP_GET_ROWS) {
  12617. tensor_clone = ggml_get_rows(ggml_ctx, src_clone[0], src_clone[1]);
  12618. } else if (tensor->op == GGML_OP_ARGSORT) {
  12619. tensor_clone = ggml_argsort(ggml_ctx, src_clone[0], (ggml_sort_order) *(int *)tensor->op_params);
  12620. } else if (tensor->op == GGML_OP_TOP_K) {
  12621. tensor_clone = ggml_top_k(ggml_ctx, src_clone[0], tensor->ne[0]);
  12622. } else if (tensor->op == GGML_OP_SUM) {
  12623. tensor_clone = ggml_sum(ggml_ctx, src_clone[0]);
  12624. } else if (tensor->op == GGML_OP_SUM_ROWS) {
  12625. tensor_clone = ggml_sum_rows(ggml_ctx, src_clone[0]);
  12626. } else if (tensor->op == GGML_OP_CUMSUM) {
  12627. tensor_clone = ggml_cumsum(ggml_ctx, src_clone[0]);
  12628. } else if (tensor->op == GGML_OP_MEAN) {
  12629. tensor_clone = ggml_mean(ggml_ctx, src_clone[0]);
  12630. } else if (tensor->op == GGML_OP_ARGMAX) {
  12631. tensor_clone = ggml_argmax(ggml_ctx, src_clone[0]);
  12632. } else if (tensor->op == GGML_OP_COUNT_EQUAL) {
  12633. tensor_clone = ggml_count_equal(ggml_ctx, src_clone[0], src_clone[1]);
  12634. } else if (tensor->op == GGML_OP_SOLVE_TRI) {
  12635. tensor_clone = ggml_solve_tri(ggml_ctx, src_clone[0], src_clone[1], true, true, false);
  12636. } else if (tensor->op == GGML_OP_IM2COL) {
  12637. const int32_t s0 = tensor->op_params[0];
  12638. const int32_t s1 = tensor->op_params[1];
  12639. const int32_t p0 = tensor->op_params[2];
  12640. const int32_t p1 = tensor->op_params[3];
  12641. const int32_t d0 = tensor->op_params[4];
  12642. const int32_t d1 = tensor->op_params[5];
  12643. const bool is_2D = tensor->op_params[6] == 1;
  12644. tensor_clone = ggml_im2col(ggml_ctx, src_clone[0], src_clone[1], s0, s1, p0, p1, d0, d1, is_2D, tensor->type);
  12645. } else if (tensor->op == GGML_OP_IM2COL_3D) {
  12646. const int32_t s0 = tensor->op_params[0];
  12647. const int32_t s1 = tensor->op_params[1];
  12648. const int32_t s2 = tensor->op_params[2];
  12649. const int32_t p0 = tensor->op_params[3];
  12650. const int32_t p1 = tensor->op_params[4];
  12651. const int32_t p2 = tensor->op_params[5];
  12652. const int32_t d0 = tensor->op_params[6];
  12653. const int32_t d1 = tensor->op_params[7];
  12654. const int32_t d2 = tensor->op_params[8];
  12655. const int32_t IC = tensor->op_params[9];
  12656. tensor_clone = ggml_im2col_3d(ggml_ctx, src_clone[0], src_clone[1], IC, s0, s1, s2, p0, p1, p2, d0, d1, d2, tensor->type);
  12657. } else if (tensor->op == GGML_OP_TIMESTEP_EMBEDDING) {
  12658. const int32_t dim = tensor->op_params[0];
  12659. const int32_t max_period = tensor->op_params[1];
  12660. tensor_clone = ggml_timestep_embedding(ggml_ctx, src_clone[0], dim, max_period);
  12661. } else if (tensor->op == GGML_OP_CONV_TRANSPOSE_1D){
  12662. const int32_t s0 = tensor->op_params[0];
  12663. const int32_t p0 = tensor->op_params[1];
  12664. const int32_t d0 = tensor->op_params[2];
  12665. tensor_clone = ggml_conv_transpose_1d(ggml_ctx, src_clone[0], src_clone[1], s0, p0, d0);
  12666. } else if (tensor->op == GGML_OP_POOL_2D) {
  12667. enum ggml_op_pool op = static_cast<ggml_op_pool>(tensor->op_params[0]);
  12668. const int32_t k0 = tensor->op_params[1];
  12669. const int32_t k1 = tensor->op_params[2];
  12670. const int32_t s0 = tensor->op_params[3];
  12671. const int32_t s1 = tensor->op_params[4];
  12672. const int32_t p0 = tensor->op_params[5];
  12673. const int32_t p1 = tensor->op_params[6];
  12674. tensor_clone = ggml_pool_2d(ggml_ctx, src_clone[0], op, k0, k1, s0, s1, p0, p1);
  12675. } else if (tensor->op == GGML_OP_CONV_2D) {
  12676. const int32_t s0 = tensor->op_params[0];
  12677. const int32_t s1 = tensor->op_params[1];
  12678. const int32_t p0 = tensor->op_params[2];
  12679. const int32_t p1 = tensor->op_params[3];
  12680. const int32_t d0 = tensor->op_params[4];
  12681. const int32_t d1 = tensor->op_params[5];
  12682. tensor_clone = ggml_conv_2d(ggml_ctx, src_clone[0], src_clone[1], s0, s1, p0, p1, d0, d1);
  12683. } else if (tensor->op == GGML_OP_CONV_2D_DW) {
  12684. const int32_t s0 = tensor->op_params[0];
  12685. const int32_t s1 = tensor->op_params[1];
  12686. const int32_t p0 = tensor->op_params[2];
  12687. const int32_t p1 = tensor->op_params[3];
  12688. const int32_t d0 = tensor->op_params[4];
  12689. const int32_t d1 = tensor->op_params[5];
  12690. tensor_clone = ggml_conv_2d_dw_direct(ggml_ctx, src_clone[0], src_clone[1], s0, s1, p0, p1, d0, d1);
  12691. } else if (tensor->op == GGML_OP_CONV_TRANSPOSE_2D) {
  12692. const int32_t s = tensor->op_params[0];
  12693. tensor_clone = ggml_conv_transpose_2d_p0(ggml_ctx, src_clone[0], src_clone[1], s);
  12694. } else if (tensor->op == GGML_OP_LEAKY_RELU) {
  12695. const float * op_params = (const float *)tensor->op_params;
  12696. tensor_clone = ggml_leaky_relu(ggml_ctx, src_clone[0], op_params[0], false);
  12697. } else if (tensor->op == GGML_OP_RWKV_WKV6) {
  12698. tensor_clone = ggml_rwkv_wkv6(ggml_ctx, src_clone[0], src_clone[1],
  12699. src_clone[2], src_clone[3], src_clone[4], src_clone[5]);
  12700. } else if (tensor->op == GGML_OP_RWKV_WKV7) {
  12701. tensor_clone = ggml_rwkv_wkv7(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], src_clone[3],
  12702. src_clone[4], src_clone[5], src_clone[6]);
  12703. } else if (tensor->op == GGML_OP_OPT_STEP_ADAMW) {
  12704. src_clone[0]->flags = tensor->src[0]->flags;
  12705. tensor_clone = ggml_opt_step_adamw(ggml_ctx, src_clone[0], src_clone[1],
  12706. src_clone[2], src_clone[3], src_clone[4]);
  12707. } else if (tensor->op == GGML_OP_OPT_STEP_SGD) {
  12708. src_clone[0]->flags = tensor->src[0]->flags;
  12709. tensor_clone = ggml_opt_step_sgd(ggml_ctx, src_clone[0], src_clone[1],
  12710. src_clone[2]);
  12711. } else if (tensor->op == GGML_OP_ADD_ID) {
  12712. tensor_clone = ggml_add_id(ggml_ctx, src_clone[0], src_clone[1], src_clone[2]);
  12713. } else if (tensor->op == GGML_OP_SSM_SCAN) {
  12714. tensor_clone = ggml_ssm_scan(ggml_ctx, src_clone[0], src_clone[1], src_clone[2],
  12715. src_clone[3], src_clone[4], src_clone[5], src_clone[6]);
  12716. } else if (tensor->op == GGML_OP_SSM_CONV) {
  12717. tensor_clone = ggml_ssm_conv(ggml_ctx, src_clone[0], src_clone[1]);
  12718. } else if (tensor->op == GGML_OP_ROLL) {
  12719. const int32_t s0 = tensor->op_params[0];
  12720. const int32_t s1 = tensor->op_params[1];
  12721. const int32_t s2 = tensor->op_params[2];
  12722. const int32_t s3 = tensor->op_params[3];
  12723. tensor_clone = ggml_roll(ggml_ctx, src_clone[0], s0, s1, s2, s3);
  12724. }
  12725. else {
  12726. std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
  12727. GGML_ABORT("fatal error");
  12728. }
  12729. cloned_tensors[tensor] = tensor_clone;
  12730. }
  12731. ggml_cgraph * cgraph_cpu = ggml_new_graph(ggml_ctx);
  12732. ggml_build_forward_expand(cgraph_cpu, tensor_clone);
  12733. ggml_graph_compute_with_ctx(ggml_ctx, cgraph_cpu, 8);
  12734. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  12735. ggml_vk_print_tensor(tensor_clone, "tensor_clone");
  12736. }
  12737. comp_size = ggml_nbytes(tensor_clone);
  12738. comp_result = malloc(comp_size);
  12739. memcpy(comp_result, tensor_clone->data, comp_size);
  12740. memcpy(comp_nb, tensor_clone->nb, sizeof(size_t) * GGML_MAX_DIMS);
  12741. for (auto m : cloned_mallocs) {
  12742. free(m);
  12743. }
  12744. ggml_free(ggml_ctx);
  12745. VK_LOG_DEBUG("END ggml_vk_check_results_0(" << tensor->name << ")");
  12746. }
  12747. static void ggml_vk_check_results_1(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, int tensor_idx) {
  12748. ggml_tensor * tensor = cgraph->nodes[tensor_idx + ctx->num_additional_fused_ops];
  12749. if (tensor->op == GGML_OP_TRANSPOSE || tensor->op == GGML_OP_SET_ROWS) {
  12750. return;
  12751. }
  12752. if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {
  12753. return;
  12754. }
  12755. VK_LOG_DEBUG("ggml_vk_check_results_1(" << tensor->name << ")");
  12756. ggml_tensor * src0 = tensor->src[0];
  12757. ggml_tensor * src1 = tensor->src[1];
  12758. ggml_tensor * src2 = tensor->src[2];
  12759. ggml_tensor * src3 = tensor->src[3];
  12760. void * tensor_data = tensor->data;
  12761. if (ggml_backend_buffer_is_vk(tensor->buffer)) {
  12762. size_t tensor_size = ggml_nbytes(tensor);
  12763. tensor_data = malloc(tensor_size);
  12764. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
  12765. vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
  12766. uint64_t offset = vk_tensor_offset(tensor) + tensor->view_offs;
  12767. if (offset + tensor_size >= buffer_gpu->size) {
  12768. tensor_size = buffer_gpu->size - offset;
  12769. }
  12770. ggml_vk_buffer_read(buffer_gpu, offset, tensor_data, tensor_size);
  12771. }
  12772. float first_error_result = -1.0f;
  12773. float first_error_correct = -1.0f;
  12774. std::array<int, 4> first_error = { -1, -1, -1, -1 };
  12775. double avg_err = 0.0;
  12776. size_t counter = 0;
  12777. for (int i3 = 0; i3 < tensor->ne[3]; i3++) {
  12778. for (int i2 = 0; i2 < tensor->ne[2]; i2++) {
  12779. for (int i1 = 0; i1 < tensor->ne[1]; i1++) {
  12780. for (int i0 = 0; i0 < tensor->ne[0]; i0++) {
  12781. const bool buffer_size_fit = i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0] < comp_size;
  12782. float correct = 0.0f;
  12783. float result = 0.0f;
  12784. if (buffer_size_fit) {
  12785. if (tensor->type == GGML_TYPE_F32) {
  12786. correct = *(float *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
  12787. result = *(float *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  12788. } else if (tensor->type == GGML_TYPE_F16) {
  12789. correct = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]));
  12790. result = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]));
  12791. } else if (tensor->type == GGML_TYPE_BF16) {
  12792. correct = ggml_bf16_to_fp32(*(ggml_bf16_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]));
  12793. result = ggml_bf16_to_fp32(*(ggml_bf16_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]));
  12794. } else if (tensor->type == GGML_TYPE_I32) {
  12795. correct = *(int32_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
  12796. result = *(int32_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  12797. } else if (tensor->type == GGML_TYPE_I64) {
  12798. correct = *(int64_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
  12799. result = *(int64_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  12800. } else {
  12801. std::cerr << "Results check not implemented for type " << ggml_type_name(tensor->type) << std::endl;
  12802. }
  12803. } else {
  12804. std::cerr << "Missing debug code for type " << ggml_type_name(tensor->type) << std::endl;
  12805. GGML_ABORT("fatal error");
  12806. }
  12807. if ((std::isnan(correct) != std::isnan(result)) || (std::isinf(correct) != std::isinf(result)) || !buffer_size_fit) {
  12808. std::cerr << "ERROR: Invalid value in " << ggml_op_name(tensor->op) << " i3=" << i3 << " i2=" << i2 << " i1=" << i1 << " i0=" << i0 << " result=" << result << " correct=" << correct << " avg_err=" << (avg_err / counter) << std::endl;
  12809. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  12810. if (src0 != nullptr) {
  12811. std::cerr << "src0=" << src0 << " src0->name=" << src0->name << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  12812. }
  12813. if (src1 != nullptr) {
  12814. std::cerr << "src1=" << src1 << " src1->name=" << src1->name << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  12815. }
  12816. if (src2 != nullptr) {
  12817. std::cerr << "src2=" << src2 << " src2->name=" << src2->name << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  12818. }
  12819. if (src3 != nullptr) {
  12820. std::cerr << "src3=" << src3 << " src3->name=" << src3->name << " op=" << ggml_op_name(src3->op) << " type=" << ggml_type_name(src3->type) << " ne0=" << src3->ne[0] << " nb0=" << src3->nb[0] << " ne1=" << src3->ne[1] << " nb1=" << src3->nb[1] << " ne2=" << src3->ne[2] << " nb2=" << src3->nb[2] << " ne3=" << src3->ne[3] << " nb3=" << src3->nb[3] << " offset=" << src3->view_offs << std::endl;
  12821. }
  12822. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  12823. std::cerr << std::endl << "Result:" << std::endl;
  12824. ggml_vk_print_tensor_area(tensor, tensor_data, i0, i1, i2, i3);
  12825. std::cerr << std::endl << "Correct:" << std::endl;
  12826. ggml_vk_print_tensor_area(tensor, comp_result, i0, i1, i2, i3);
  12827. std::cerr << std::endl;
  12828. std::vector<const ggml_tensor *> done;
  12829. ggml_vk_print_graph_origin(tensor, done);
  12830. GGML_ABORT("fatal error");
  12831. }
  12832. const double denom = std::fabs(correct) > 1.0f ? (std::fabs(correct) > 1e-8 ? std::fabs(correct) : 1e-8) : 1.0f;
  12833. if (first_error[0] == -1 && std::fabs(correct - result) / denom > 0.5) {
  12834. first_error[0] = i0;
  12835. first_error[1] = i1;
  12836. first_error[2] = i2;
  12837. first_error[3] = i3;
  12838. first_error_result = result;
  12839. first_error_correct = correct;
  12840. }
  12841. // Special case, value is infinite, avoid NaN result in avg_err
  12842. // NaN also appears in results, if both are nan error is 0
  12843. if (!std::isinf(correct) && !std::isinf(result) && !std::isnan(correct) && !std::isnan(result)) {
  12844. avg_err += std::fabs(correct - result) / denom;
  12845. }
  12846. counter++;
  12847. }
  12848. }
  12849. }
  12850. }
  12851. avg_err /= counter;
  12852. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  12853. std::cerr << "TENSOR CHECK: avg_err=" << avg_err << " in " << ggml_op_name(tensor->op) << " (check " << check_counter << ")" << std::endl;
  12854. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  12855. if (src0 != nullptr) {
  12856. std::cerr << "src0=" << src0 << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  12857. }
  12858. if (src1 != nullptr) {
  12859. std::cerr << "src1=" << src1 << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  12860. }
  12861. if (src2 != nullptr) {
  12862. std::cerr << "src2=" << src2 << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  12863. }
  12864. if (src3 != nullptr) {
  12865. std::cerr << "src3=" << src3 << " op=" << ggml_op_name(src3->op) << " type=" << ggml_type_name(src3->type) << " ne0=" << src3->ne[0] << " nb0=" << src3->nb[0] << " ne1=" << src3->ne[1] << " nb1=" << src3->nb[1] << " ne2=" << src3->ne[2] << " nb2=" << src3->nb[2] << " ne3=" << src3->ne[3] << " nb3=" << src3->nb[3] << " offset=" << src3->view_offs << std::endl;
  12866. }
  12867. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  12868. std::cerr << std::endl << "Result:" << std::endl;
  12869. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 0, 0);
  12870. std::cerr << std::endl << "Correct:" << std::endl;
  12871. ggml_vk_print_tensor_area(tensor, comp_result, 5, 5, 0, 0);
  12872. std::cerr << std::endl;
  12873. std::vector<const ggml_tensor *> done;
  12874. ggml_vk_print_graph_origin(tensor, done);
  12875. }
  12876. if (avg_err > 0.5 || std::isnan(avg_err)) {
  12877. std::cerr << "ERROR: avg_err=" << avg_err << " in " << ggml_op_name(tensor->op) << " (check " << check_counter << ")" << std::endl;
  12878. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  12879. if (src0 != nullptr) {
  12880. std::cerr << "src0=" << src0 << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  12881. }
  12882. if (src1 != nullptr) {
  12883. std::cerr << "src1=" << src1 << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  12884. }
  12885. if (src2 != nullptr) {
  12886. std::cerr << "src2=" << src2 << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  12887. }
  12888. if (src3 != nullptr) {
  12889. std::cerr << "src3=" << src3 << " op=" << ggml_op_name(src3->op) << " type=" << ggml_type_name(src3->type) << " ne0=" << src3->ne[0] << " nb0=" << src3->nb[0] << " ne1=" << src3->ne[1] << " nb1=" << src3->nb[1] << " ne2=" << src3->ne[2] << " nb2=" << src3->nb[2] << " ne3=" << src3->ne[3] << " nb3=" << src3->nb[3] << " offset=" << src3->view_offs << std::endl;
  12890. }
  12891. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  12892. std::cerr << std::endl << "Result:" << std::endl;
  12893. ggml_vk_print_tensor_area(tensor, tensor_data, first_error[0], first_error[1], first_error[2], first_error[3]);
  12894. std::cerr << std::endl << "Correct:" << std::endl;
  12895. ggml_vk_print_tensor_area(tensor, comp_result, first_error[0], first_error[1], first_error[2], first_error[3]);
  12896. std::cerr << std::endl;
  12897. std::vector<const ggml_tensor *> done;
  12898. ggml_vk_print_graph_origin(tensor, done);
  12899. GGML_ABORT("fatal error");
  12900. } else {
  12901. std::cerr << check_counter << " " << tensor->name << " op=" << ggml_op_name(tensor->op) << " avg_err=" << avg_err << std::endl;
  12902. }
  12903. free(comp_result);
  12904. comp_result = nullptr;
  12905. comp_size = 0;
  12906. if (ggml_backend_buffer_is_vk(tensor->buffer)) {
  12907. free(tensor_data);
  12908. }
  12909. VK_LOG_DEBUG("END ggml_vk_check_results_1(" << tensor->name << ")");
  12910. }
  12911. #endif
  12912. GGML_BACKEND_DL_IMPL(ggml_backend_vk_reg)