ggml-vulkan.cpp 391 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183
  1. #include "ggml-vulkan.h"
  2. #include <vulkan/vulkan_core.h>
  3. #ifdef GGML_VULKAN_RUN_TESTS
  4. #include <chrono>
  5. #endif
  6. #include <vulkan/vulkan.hpp>
  7. #include <algorithm>
  8. #include <cmath>
  9. #include <iomanip>
  10. #include <iostream>
  11. #include <tuple>
  12. #include <vector>
  13. #include <sstream>
  14. #include <utility>
  15. #include <memory>
  16. #include <limits>
  17. #include <map>
  18. #include "ggml.h"
  19. #include "ggml-backend-impl.h"
  20. #include "ggml-vulkan-shaders.hpp"
  21. #define VK_API_VERSION VK_API_VERSION_1_2
  22. #define CEIL_DIV(M, N) (((M) + (N)-1) / (N))
  23. #define VK_VENDOR_ID_AMD 0x1002
  24. #define VK_VENDOR_ID_APPLE 0x106b
  25. #define VK_VENDOR_ID_INTEL 0x8086
  26. #define VK_VENDOR_ID_NVIDIA 0x10de
  27. #define VK_DEVICE_DESCRIPTOR_POOL_MODE_UNKNOWN 0
  28. #define VK_DEVICE_DESCRIPTOR_POOL_MODE_MULTI 1
  29. #define VK_DEVICE_DESCRIPTOR_POOL_MODE_SINGLE 2
  30. #define VK_NUM_TYPES 16
  31. #define GGML_VK_MAX_NODES 8192
  32. #define MAX_VK_BUFFERS 256
  33. #ifndef K_QUANTS_PER_ITERATION
  34. #define K_QUANTS_PER_ITERATION 1
  35. #else
  36. static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2");
  37. #endif
  38. #define VK_CHECK(err, msg) \
  39. do { \
  40. vk::Result err_ = (err); \
  41. if (err_ != vk::Result::eSuccess) { \
  42. fprintf(stderr, "ggml_vulkan: %s error %s at %s:%d\n", \
  43. #err, to_string(err_).c_str(), __FILE__, __LINE__); \
  44. exit(1); \
  45. } \
  46. } while (0)
  47. #ifdef GGML_VULKAN_DEBUG
  48. #define VK_LOG_DEBUG(msg) std::cerr << msg << std::endl
  49. #else
  50. #define VK_LOG_DEBUG(msg) ((void) 0)
  51. #endif // GGML_VULKAN_DEBUG
  52. struct ggml_backend_vk_context;
  53. struct vk_queue {
  54. uint32_t queue_family_index;
  55. vk::Queue queue;
  56. vk::CommandPool pool;
  57. uint32_t cmd_buffer_idx;
  58. std::vector<vk::CommandBuffer> cmd_buffers;
  59. vk::PipelineStageFlags stage_flags;
  60. };
  61. struct vk_pipeline_struct {
  62. std::string name;
  63. vk::ShaderModule shader_module;
  64. vk::DescriptorSetLayout dsl;
  65. std::vector<vk::DescriptorPool> descriptor_pools;
  66. std::vector<vk::DescriptorSet> descriptor_sets;
  67. uint32_t descriptor_set_idx;
  68. vk::PipelineLayout layout;
  69. vk::Pipeline pipeline;
  70. uint32_t push_constant_size;
  71. uint32_t parameter_count;
  72. std::array<uint32_t, 3> wg_denoms;
  73. uint32_t align;
  74. };
  75. typedef std::shared_ptr<vk_pipeline_struct> vk_pipeline;
  76. typedef std::weak_ptr<vk_pipeline_struct> vk_pipeline_ref;
  77. static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline);
  78. struct vk_matmul_pipeline_struct {
  79. vk_pipeline l, m, s;
  80. vk_pipeline a_l, a_m, a_s;
  81. };
  82. typedef std::shared_ptr<vk_matmul_pipeline_struct> vk_matmul_pipeline;
  83. struct vk_device {
  84. vk::PhysicalDevice physical_device;
  85. vk::PhysicalDeviceProperties properties;
  86. std::string name;
  87. uint64_t max_memory_allocation_size;
  88. bool fp16;
  89. vk::Device device;
  90. uint32_t vendor_id;
  91. vk_queue compute_queue;
  92. vk_queue transfer_queue;
  93. bool single_queue;
  94. uint32_t descriptor_set_mode;
  95. uint32_t subgroup_size;
  96. bool uma;
  97. bool initialized;
  98. size_t idx;
  99. vk_matmul_pipeline pipeline_matmul_f32;
  100. vk_matmul_pipeline pipeline_matmul_f32_f16;
  101. vk_matmul_pipeline pipeline_matmul_f16;
  102. vk_matmul_pipeline pipeline_matmul_f16_f32;
  103. vk_pipeline pipeline_matmul_split_k_reduce;
  104. vk_matmul_pipeline pipeline_dequant_mul_mat_mat[VK_NUM_TYPES];
  105. vk_matmul_pipeline pipeline_matmul_id_f32;
  106. vk_matmul_pipeline pipeline_matmul_id_f16;
  107. vk_matmul_pipeline pipeline_matmul_id_f16_f32;
  108. vk_matmul_pipeline pipeline_dequant_mul_mat_mat_id[VK_NUM_TYPES];
  109. vk_pipeline pipeline_dequant[VK_NUM_TYPES];
  110. vk_pipeline pipeline_dequant_mul_mat_vec_f32_f32[VK_NUM_TYPES];
  111. vk_pipeline pipeline_dequant_mul_mat_vec_f16_f32[VK_NUM_TYPES];
  112. vk_pipeline pipeline_dequant_mul_mat_vec_id_f32[VK_NUM_TYPES];
  113. vk_pipeline pipeline_mul_mat_vec_p021_f16_f32;
  114. vk_pipeline pipeline_mul_mat_vec_nc_f16_f32;
  115. vk_pipeline pipeline_get_rows[VK_NUM_TYPES];
  116. vk_pipeline pipeline_get_rows_f32[VK_NUM_TYPES];
  117. vk_pipeline pipeline_mul_f32;
  118. vk_pipeline pipeline_div_f32;
  119. vk_pipeline pipeline_add_f32;
  120. vk_pipeline pipeline_scale_f32;
  121. vk_pipeline pipeline_sqr_f32;
  122. vk_pipeline pipeline_clamp_f32;
  123. vk_pipeline pipeline_cpy_f32_f32, pipeline_cpy_f32_f16, pipeline_cpy_f16_f16;
  124. vk_pipeline pipeline_norm_f32;
  125. vk_pipeline pipeline_rms_norm_f32;
  126. vk_pipeline pipeline_gelu_f32;
  127. vk_pipeline pipeline_silu_f32;
  128. vk_pipeline pipeline_relu_f32;
  129. vk_pipeline pipeline_diag_mask_inf_f32;
  130. vk_pipeline pipeline_soft_max_f32, pipeline_soft_max_f32_f16;
  131. vk_pipeline pipeline_rope_norm_f32, pipeline_rope_norm_f16;
  132. vk_pipeline pipeline_rope_neox_f32, pipeline_rope_neox_f16;
  133. vk_pipeline pipeline_argsort_f32;
  134. vk_pipeline pipeline_sum_rows_f32;
  135. std::vector<vk_pipeline_ref> pipelines;
  136. ~vk_device() {
  137. VK_LOG_DEBUG("destroy device " << name);
  138. device.destroyCommandPool(compute_queue.pool);
  139. if (!single_queue) {
  140. device.destroyCommandPool(transfer_queue.pool);
  141. }
  142. for (auto& pipeline : pipelines) {
  143. if (pipeline.expired()) {
  144. continue;
  145. }
  146. vk_pipeline pl = pipeline.lock();
  147. ggml_vk_destroy_pipeline(device, pl);
  148. }
  149. pipelines.clear();
  150. device.destroy();
  151. }
  152. };
  153. struct vk_buffer_struct {
  154. vk::Buffer buffer;
  155. vk::DeviceMemory device_memory;
  156. vk::MemoryPropertyFlags memory_property_flags;
  157. void * ptr;
  158. size_t size = 0;
  159. ggml_backend_vk_context * ctx;
  160. std::shared_ptr<vk_device> device;
  161. ~vk_buffer_struct() {
  162. if (size == 0) {
  163. return;
  164. }
  165. VK_LOG_DEBUG("~vk_buffer_struct(" << buffer << ", " << size << ")");
  166. device->device.freeMemory(device_memory);
  167. device->device.destroyBuffer(buffer);
  168. }
  169. };
  170. typedef std::shared_ptr<vk_buffer_struct> vk_buffer;
  171. typedef std::weak_ptr<vk_buffer_struct> vk_buffer_ref;
  172. struct vk_subbuffer {
  173. vk_buffer buffer;
  174. uint64_t offset;
  175. uint64_t size;
  176. };
  177. struct vk_semaphore {
  178. vk::Semaphore s;
  179. uint64_t value;
  180. };
  181. struct vk_submission {
  182. vk::CommandBuffer buffer;
  183. std::vector<vk_semaphore> wait_semaphores;
  184. std::vector<vk_semaphore> signal_semaphores;
  185. };
  186. typedef std::vector<vk_submission> vk_sequence;
  187. struct vk_mat_mat_push_constants {
  188. uint32_t M; uint32_t N; uint32_t K;
  189. uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  190. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  191. uint32_t k_split;
  192. uint32_t ne02; uint32_t ne12; uint32_t broadcast2; uint32_t broadcast3;
  193. };
  194. struct vk_mat_vec_push_constants {
  195. uint32_t ncols; uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  196. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  197. uint32_t ne02; uint32_t ne12; uint32_t broadcast2; uint32_t broadcast3;
  198. };
  199. struct vk_mat_mat_id_push_constants {
  200. uint32_t M; uint32_t N; uint32_t K;
  201. uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  202. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  203. uint32_t nei0; uint32_t nei1; uint32_t nbi1; uint32_t ne11;
  204. };
  205. struct vk_mat_vec_id_push_constants {
  206. uint32_t ncols; uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  207. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  208. uint32_t nei0; uint32_t ne11;
  209. };
  210. struct vk_op_push_constants {
  211. uint32_t KX;
  212. uint32_t KY;
  213. float param1;
  214. float param2;
  215. };
  216. struct vk_op_unary_push_constants {
  217. uint32_t ne;
  218. uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  219. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13;
  220. uint32_t d_offset;
  221. float param1; float param2;
  222. };
  223. struct vk_op_binary_push_constants {
  224. uint32_t ne;
  225. uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  226. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13;
  227. uint32_t ne20; uint32_t ne21; uint32_t ne22; uint32_t ne23; uint32_t nb20; uint32_t nb21; uint32_t nb22; uint32_t nb23;
  228. uint32_t d_offset;
  229. float param1; float param2;
  230. };
  231. struct vk_op_diag_mask_push_constants {
  232. uint32_t ncols;
  233. uint32_t rows_per_channel;
  234. int32_t n_past;
  235. };
  236. struct vk_op_rope_push_constants {
  237. uint32_t ncols;
  238. uint32_t n_dims;
  239. float freq_scale;
  240. uint32_t p_delta_rows;
  241. float freq_base;
  242. float ext_factor;
  243. float attn_factor;
  244. float corr_dims[2];
  245. float theta_scale;
  246. uint32_t has_ff;
  247. };
  248. struct vk_op_soft_max_push_constants {
  249. uint32_t KX;
  250. uint32_t KY;
  251. float scale;
  252. float max_bias;
  253. float m0;
  254. float m1;
  255. uint32_t n_head_log2;
  256. };
  257. struct vk_op_argsort_push_constants {
  258. uint32_t ncols;
  259. uint32_t ncols_pad;
  260. int32_t order;
  261. };
  262. // Allow pre-recording command buffers
  263. struct vk_staging_memcpy {
  264. vk_staging_memcpy(void * _dst, const void * _src, size_t _n) : dst(_dst), src(_src), n(_n) {}
  265. void * dst;
  266. const void * src;
  267. size_t n;
  268. };
  269. struct vk_context {
  270. size_t idx;
  271. vk_submission * s;
  272. std::vector<vk_sequence> seqs;
  273. ggml_tensor * exit_tensor;
  274. std::vector<vk_staging_memcpy> in_memcpys;
  275. std::vector<vk_staging_memcpy> out_memcpys;
  276. vk_queue * q;
  277. };
  278. struct ggml_tensor_extra_gpu {
  279. size_t ctx_idx;
  280. vk_buffer_ref buffer_gpu;
  281. uint64_t offset;
  282. void reset() {
  283. ctx_idx = 0;
  284. buffer_gpu.reset();
  285. offset = 0;
  286. }
  287. };
  288. struct ggml_vk_garbage_collector {
  289. std::vector<vk_semaphore> tl_semaphores;
  290. std::vector<vk_semaphore> semaphores;
  291. std::vector<vk::Event> events;
  292. std::vector<vk_buffer> temp_buffers;
  293. std::vector<vk_context> contexts;
  294. };
  295. #if defined(GGML_VULKAN_MEMORY_DEBUG) || defined(GGML_VULKAN_DEBUG)
  296. #include <mutex>
  297. #define VK_LOG_MEMORY(msg) std::cerr << "ggml_vulkan memory: " << msg << std::endl
  298. static std::string format_size(size_t size) {
  299. const size_t kib = 1024;
  300. const size_t mib = kib * 1024;
  301. const size_t gib = mib * 1024;
  302. std::ostringstream oss;
  303. oss << std::fixed << std::setprecision(2);
  304. if (size >= gib) {
  305. oss << static_cast<double>(size) / gib << " GiB";
  306. } else if (size >= mib) {
  307. oss << static_cast<double>(size) / mib << " MiB";
  308. } else if (size >= kib) {
  309. oss << static_cast<double>(size) / kib << " KiB";
  310. } else {
  311. oss << size << " B";
  312. }
  313. return oss.str();
  314. }
  315. static std::mutex log_mutex;
  316. class vk_memory_logger {
  317. public:
  318. vk_memory_logger(): total_device(0), total_host(0) {}
  319. void log_allocation(vk_buffer_ref buf_ref, size_t size);
  320. void log_deallocation(vk_buffer_ref buf_ref);
  321. private:
  322. std::map<vk::Buffer, size_t> allocations; // Track allocations
  323. size_t total_device;
  324. size_t total_host;
  325. };
  326. #else
  327. #define VK_LOG_MEMORY(msg) ((void) 0)
  328. #endif // GGML_VULKAN_MEMORY_DEBUG
  329. struct ggml_backend_vk_context {
  330. std::string name;
  331. std::shared_ptr<vk_device> device;
  332. size_t semaphore_idx, event_idx;
  333. ggml_vk_garbage_collector gc;
  334. std::vector<std::tuple<void*, size_t, vk_buffer>> pinned_memory;
  335. size_t prealloc_size_x, prealloc_size_y, prealloc_size_split_k;
  336. vk_buffer prealloc_x, prealloc_y, prealloc_split_k;
  337. vk::Fence fence;
  338. vk_buffer staging;
  339. size_t staging_size;
  340. size_t staging_offset;
  341. vk_buffer sync_staging;
  342. vk_buffer buffer_pool[MAX_VK_BUFFERS];
  343. vk_context * compute_ctx;
  344. vk_context * transfer_ctx;
  345. bool initialized;
  346. size_t idx;
  347. #ifdef GGML_VULKAN_MEMORY_DEBUG
  348. vk_memory_logger memory_logger;
  349. #endif
  350. };
  351. #ifdef GGML_VULKAN_MEMORY_DEBUG
  352. void vk_memory_logger::log_allocation(vk_buffer_ref buf_ref, size_t size) {
  353. std::lock_guard<std::mutex> guard(log_mutex);
  354. vk_buffer buf = buf_ref.lock();
  355. const bool device = bool(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eDeviceLocal);
  356. const std::string type = device ? "device" : "host";
  357. allocations[buf->buffer] = size;
  358. total_device += device ? size : 0;
  359. total_host += device ? 0 : size;
  360. VK_LOG_MEMORY("VULKAN" << buf->ctx->idx << ": +" << format_size(size) << " " << type << " at " << buf->buffer << ". Total device: " << format_size(total_device) << ", total host: " << format_size(total_host));
  361. }
  362. void vk_memory_logger::log_deallocation(vk_buffer_ref buf_ref) {
  363. if (buf_ref.expired() || buf_ref.lock()->size == 0) {
  364. return;
  365. }
  366. std::lock_guard<std::mutex> guard(log_mutex);
  367. vk_buffer buf = buf_ref.lock();
  368. const bool device = bool(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eDeviceLocal);
  369. std::string type = device ? "device" : "host";
  370. auto it = allocations.find(buf->buffer);
  371. total_device -= device ? it->second : 0;
  372. total_host -= device ? 0 : it->second;
  373. if (it != allocations.end()) {
  374. VK_LOG_MEMORY("VULKAN" << buf->ctx->idx << ": -" << format_size(it->second) << " " << type << " at " << buf->buffer << ". Total device: " << format_size(total_device) << ", total host: " << format_size(total_host));
  375. allocations.erase(it);
  376. } else {
  377. VK_LOG_MEMORY("ERROR VULKAN" << buf->ctx->idx << ": Attempted to deallocate unknown " << type << " memory at " << buf->buffer);
  378. }
  379. }
  380. #endif // GGML_VULKAN_MEMORY_DEBUG
  381. struct vk_instance_t {
  382. vk::Instance instance;
  383. std::vector<size_t> device_indices;
  384. ggml_backend_t backends[GGML_VK_MAX_DEVICES];
  385. ggml_backend_vk_context contexts[GGML_VK_MAX_DEVICES];
  386. ggml_backend_buffer_type buffer_types[GGML_VK_MAX_DEVICES];
  387. bool initialized[GGML_VK_MAX_DEVICES];
  388. };
  389. static std::shared_ptr<vk_device> ggml_vk_get_device(size_t idx) {
  390. VK_LOG_DEBUG("ggml_vk_get_device(" << idx << ")");
  391. static std::weak_ptr<vk_device> devices[GGML_VK_MAX_DEVICES];
  392. if (devices[idx].expired()) {
  393. VK_LOG_DEBUG("Initializing new vk_device");
  394. std::shared_ptr<vk_device> device = std::make_shared<vk_device>();
  395. device->initialized = false;
  396. devices[idx] = device;
  397. return device;
  398. }
  399. return devices[idx].lock();
  400. }
  401. #ifdef GGML_VULKAN_CHECK_RESULTS
  402. static size_t vk_skip_checks;
  403. static size_t vk_output_tensor;
  404. static void ggml_vk_print_tensor(ggml_backend * ctx, const ggml_tensor * tensor, const char * name);
  405. static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor);
  406. static void ggml_vk_check_results_1(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor);
  407. #endif
  408. typedef void (*ggml_vk_func_t)(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
  409. static bool vk_instance_initialized = false;
  410. static vk_instance_t vk_instance;
  411. GGML_CALL static void ggml_backend_vk_free(ggml_backend_t backend);
  412. static void ggml_vk_create_pipeline(ggml_backend_vk_context * ctx, vk_pipeline& pipeline, const std::string& name, size_t spv_size, const void* spv_data, const std::string& entrypoint, uint32_t parameter_count, uint32_t push_constant_size, std::array<uint32_t, 3> wg_denoms, std::vector<uint32_t>&& specialization_constants, uint32_t align) {
  413. VK_LOG_DEBUG("ggml_vk_create_pipeline(" << name << ", " << entrypoint << ", " << parameter_count << ", " << push_constant_size << ", (" << wg_denoms[0] << "," << wg_denoms[1] << "," << wg_denoms[2] << "), specialization_constants, " << align << ")");
  414. GGML_ASSERT(parameter_count > 0);
  415. GGML_ASSERT(wg_denoms[0] > 0 && wg_denoms[1] > 0 && wg_denoms[2] > 0); // NOLINT
  416. pipeline = std::make_shared<vk_pipeline_struct>();
  417. pipeline->name = name;
  418. pipeline->parameter_count = parameter_count;
  419. pipeline->push_constant_size = push_constant_size;
  420. pipeline->wg_denoms = wg_denoms;
  421. pipeline->align = align;
  422. vk::ShaderModuleCreateInfo shader_module_create_info({}, spv_size, reinterpret_cast<const uint32_t *>(spv_data));
  423. pipeline->shader_module = ctx->device->device.createShaderModule(shader_module_create_info);
  424. std::vector<vk::DescriptorSetLayoutBinding> dsl_binding;
  425. std::vector<vk::DescriptorBindingFlags> dsl_binding_flags;
  426. for (uint32_t i = 0; i < parameter_count; i++) {
  427. dsl_binding.push_back({i, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eCompute});
  428. dsl_binding_flags.push_back({});
  429. }
  430. vk::DescriptorSetLayoutBindingFlagsCreateInfo dslbfci = { dsl_binding_flags };
  431. vk::PushConstantRange pcr(
  432. vk::ShaderStageFlagBits::eCompute,
  433. 0,
  434. pipeline->push_constant_size
  435. );
  436. vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_create_info(
  437. {},
  438. dsl_binding);
  439. descriptor_set_layout_create_info.setPNext(&dslbfci);
  440. pipeline->dsl = ctx->device->device.createDescriptorSetLayout(descriptor_set_layout_create_info);
  441. // Check if device supports multiple descriptors per pool
  442. if (ctx->device->descriptor_set_mode == VK_DEVICE_DESCRIPTOR_POOL_MODE_UNKNOWN) {
  443. const uint32_t alloc_count = 2;
  444. // Try allocating multiple sets from one pool
  445. // This fails on AMD for some reason, so add a fall back to allocating one pool per set
  446. vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline->parameter_count);
  447. vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, alloc_count, descriptor_pool_size);
  448. vk::DescriptorPool pool = ctx->device->device.createDescriptorPool(descriptor_pool_create_info);
  449. std::vector<vk::DescriptorSetLayout> layouts(alloc_count);
  450. for (uint32_t i = 0; i < alloc_count; i++) {
  451. layouts[i] = pipeline->dsl;
  452. }
  453. try {
  454. vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(pool, alloc_count, layouts.data());
  455. std::vector<vk::DescriptorSet> sets = ctx->device->device.allocateDescriptorSets(descriptor_set_alloc_info);
  456. } catch(vk::OutOfPoolMemoryError const&) {
  457. ctx->device->descriptor_set_mode = VK_DEVICE_DESCRIPTOR_POOL_MODE_SINGLE;
  458. }
  459. ctx->device->device.destroyDescriptorPool(pool);
  460. }
  461. if (ctx->device->descriptor_set_mode == VK_DEVICE_DESCRIPTOR_POOL_MODE_MULTI) {
  462. vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline->parameter_count);
  463. vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, 128, descriptor_pool_size);
  464. pipeline->descriptor_pools.push_back(ctx->device->device.createDescriptorPool(descriptor_pool_create_info));
  465. }
  466. pipeline->descriptor_set_idx = 0;
  467. vk::PipelineLayoutCreateInfo pipeline_layout_create_info(vk::PipelineLayoutCreateFlags(), pipeline->dsl, pcr);
  468. pipeline->layout = ctx->device->device.createPipelineLayout(pipeline_layout_create_info);
  469. std::vector<vk::SpecializationMapEntry> specialization_entries(specialization_constants.size());
  470. for (size_t i = 0; i < specialization_constants.size(); i++) {
  471. specialization_entries[i].constantID = i;
  472. specialization_entries[i].offset = i * sizeof(uint32_t);
  473. specialization_entries[i].size = sizeof(uint32_t);
  474. }
  475. vk::SpecializationInfo specialization_info(
  476. specialization_entries.size(),
  477. specialization_entries.data(),
  478. specialization_constants.size() * sizeof(uint32_t),
  479. specialization_constants.data()
  480. );
  481. vk::PipelineShaderStageCreateInfo pipeline_shader_create_info(
  482. vk::PipelineShaderStageCreateFlags(),
  483. vk::ShaderStageFlagBits::eCompute,
  484. pipeline->shader_module,
  485. entrypoint.c_str(),
  486. &specialization_info);
  487. vk::ComputePipelineCreateInfo compute_pipeline_create_info(
  488. vk::PipelineCreateFlags(),
  489. pipeline_shader_create_info,
  490. pipeline->layout);
  491. pipeline->pipeline = ctx->device->device.createComputePipeline(VK_NULL_HANDLE, compute_pipeline_create_info).value;
  492. ctx->device->pipelines.push_back(pipeline);
  493. }
  494. static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline) {
  495. VK_LOG_DEBUG("ggml_pipeline_destroy_pipeline(" << pipeline->name << ")");
  496. for (auto& pool : pipeline->descriptor_pools) {
  497. device.destroyDescriptorPool(pool);
  498. }
  499. pipeline->descriptor_pools.clear();
  500. pipeline->descriptor_sets.clear();
  501. pipeline->descriptor_set_idx = 0;
  502. device.destroyDescriptorSetLayout(pipeline->dsl);
  503. device.destroyPipelineLayout(pipeline->layout);
  504. device.destroyShaderModule(pipeline->shader_module);
  505. device.destroyPipeline(pipeline->pipeline);
  506. }
  507. static void ggml_pipeline_allocate_descriptor_sets(ggml_backend_vk_context * ctx, vk_pipeline& pipeline, uint32_t n) {
  508. VK_LOG_DEBUG("ggml_pipeline_allocate_descriptor_sets(" << pipeline->name << ", " << n << ")");
  509. if (pipeline->descriptor_sets.size() >= pipeline->descriptor_set_idx + n) {
  510. // Enough descriptors are available
  511. return;
  512. }
  513. if (ctx->device->descriptor_set_mode == VK_DEVICE_DESCRIPTOR_POOL_MODE_MULTI) {
  514. const uint32_t alloc_count = pipeline->descriptor_set_idx + n - pipeline->descriptor_sets.size();
  515. std::vector<vk::DescriptorSetLayout> layouts(alloc_count);
  516. for (uint32_t i = 0; i < alloc_count; i++) {
  517. layouts[i] = pipeline->dsl;
  518. }
  519. vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(pipeline->descriptor_pools[0], alloc_count, layouts.data());
  520. std::vector<vk::DescriptorSet> sets = ctx->device->device.allocateDescriptorSets(descriptor_set_alloc_info);
  521. pipeline->descriptor_sets.insert(pipeline->descriptor_sets.end(), sets.begin(), sets.end());
  522. } else {
  523. for (uint32_t i = pipeline->descriptor_sets.size(); i < pipeline->descriptor_set_idx + n; i++) {
  524. vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline->parameter_count);
  525. vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, 1, descriptor_pool_size);
  526. pipeline->descriptor_pools.push_back(ctx->device->device.createDescriptorPool(descriptor_pool_create_info));
  527. vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(pipeline->descriptor_pools[i], 1, &pipeline->dsl);
  528. std::vector<vk::DescriptorSet> sets = ctx->device->device.allocateDescriptorSets(descriptor_set_alloc_info);
  529. pipeline->descriptor_sets.push_back(sets[0]);
  530. }
  531. }
  532. }
  533. static void ggml_pipeline_cleanup(vk_pipeline& pipeline) {
  534. VK_LOG_DEBUG("ggml_pipeline_cleanup(" << pipeline->name << ")");
  535. pipeline->descriptor_set_idx = 0;
  536. }
  537. static vk::CommandBuffer ggml_vk_create_cmd_buffer(ggml_backend_vk_context * ctx, vk_queue& q) {
  538. VK_LOG_DEBUG("ggml_vk_create_cmd_buffer()");
  539. if (q.cmd_buffers.size() > q.cmd_buffer_idx) {
  540. // Reuse command buffer
  541. return q.cmd_buffers[q.cmd_buffer_idx++];
  542. }
  543. vk::CommandBufferAllocateInfo command_buffer_alloc_info(
  544. q.pool,
  545. vk::CommandBufferLevel::ePrimary,
  546. 1);
  547. const std::vector<vk::CommandBuffer> cmd_buffers = ctx->device->device.allocateCommandBuffers(command_buffer_alloc_info);
  548. auto buf = cmd_buffers.front();
  549. q.cmd_buffers.push_back(buf);
  550. q.cmd_buffer_idx++;
  551. return buf;
  552. }
  553. static vk_submission ggml_vk_create_submission(ggml_backend_vk_context * ctx, vk_queue& q, std::vector<vk_semaphore> wait_semaphores, std::vector<vk_semaphore> signal_semaphores) {
  554. VK_LOG_DEBUG("ggml_vk_create_submission()");
  555. vk_submission s;
  556. s.buffer = ggml_vk_create_cmd_buffer(ctx, q);
  557. s.wait_semaphores = std::move(wait_semaphores);
  558. s.signal_semaphores = std::move(signal_semaphores);
  559. return s;
  560. }
  561. static void ggml_vk_submit(vk_context * ctx, vk::Fence fence) {
  562. VK_LOG_DEBUG("ggml_vk_submit(" << ctx->seqs.size() << ", " << fence << ")");
  563. if (ctx->seqs.empty()) {
  564. return;
  565. }
  566. std::vector<std::vector<uint64_t>> tl_wait_vals;
  567. std::vector<std::vector<uint64_t>> tl_signal_vals;
  568. std::vector<std::vector<vk::Semaphore>> tl_wait_semaphores;
  569. std::vector<std::vector<vk::Semaphore>> tl_signal_semaphores;
  570. std::vector<vk::TimelineSemaphoreSubmitInfo> tl_submit_infos;
  571. std::vector<vk::SubmitInfo> submit_infos;
  572. int idx = -1;
  573. std::vector<std::vector<vk::PipelineStageFlags>> stage_flags;
  574. size_t reserve = 0;
  575. for (const auto& sequence : ctx->seqs) {
  576. reserve += sequence.size();
  577. }
  578. // Pre-reserve vectors to prevent reallocation, which invalidates pointers
  579. tl_wait_semaphores.reserve(reserve);
  580. tl_wait_vals.reserve(reserve);
  581. tl_signal_semaphores.reserve(reserve);
  582. tl_signal_vals.reserve(reserve);
  583. tl_submit_infos.reserve(reserve);
  584. submit_infos.reserve(reserve);
  585. stage_flags.reserve(reserve);
  586. for (const auto& sequence : ctx->seqs) {
  587. for (const auto& submission : sequence) {
  588. stage_flags.push_back({});
  589. idx++;
  590. tl_wait_vals.push_back({});
  591. tl_wait_semaphores.push_back({});
  592. tl_signal_vals.push_back({});
  593. tl_signal_semaphores.push_back({});
  594. for (size_t i = 0; i < submission.wait_semaphores.size(); i++) {
  595. stage_flags[idx].push_back(ctx->q->stage_flags);
  596. tl_wait_vals[idx].push_back(submission.wait_semaphores[i].value);
  597. tl_wait_semaphores[idx].push_back(submission.wait_semaphores[i].s);
  598. }
  599. for (size_t i = 0; i < submission.signal_semaphores.size(); i++) {
  600. tl_signal_vals[idx].push_back(submission.signal_semaphores[i].value);
  601. tl_signal_semaphores[idx].push_back(submission.signal_semaphores[i].s);
  602. }
  603. tl_submit_infos.push_back({
  604. (uint32_t) submission.wait_semaphores.size(),
  605. tl_wait_vals[idx].data(),
  606. (uint32_t) submission.signal_semaphores.size(),
  607. tl_signal_vals[idx].data(),
  608. });
  609. tl_submit_infos[idx].sType = vk::StructureType::eTimelineSemaphoreSubmitInfo;
  610. tl_submit_infos[idx].pNext = nullptr;
  611. vk::SubmitInfo si{
  612. (uint32_t) submission.wait_semaphores.size(),
  613. tl_wait_semaphores[idx].data(),
  614. stage_flags[idx].data(),
  615. 1,
  616. &submission.buffer,
  617. (uint32_t) submission.signal_semaphores.size(),
  618. tl_signal_semaphores[idx].data(),
  619. };
  620. si.setPNext(&tl_submit_infos[idx]);
  621. submit_infos.push_back(si);
  622. }
  623. }
  624. ctx->q->queue.submit(submit_infos, fence);
  625. ctx->seqs.clear();
  626. }
  627. static uint32_t ggml_vk_find_queue_family_index(std::vector<vk::QueueFamilyProperties>& queue_family_props, const vk::QueueFlags& required, const vk::QueueFlags& avoid, int32_t compute_index, uint32_t min_num_queues) {
  628. VK_LOG_DEBUG("ggml_vk_find_queue_family_index()");
  629. const uint32_t qfsize = queue_family_props.size();
  630. // Try with avoid preferences first
  631. for (uint32_t i = 0; i < qfsize; i++) {
  632. if (queue_family_props[i].queueCount >= min_num_queues && (compute_index < 0 || i != (uint32_t) compute_index) && queue_family_props[i].queueFlags & required && !(queue_family_props[i].queueFlags & avoid)) {
  633. return i;
  634. }
  635. }
  636. // Fall back to only required
  637. for (size_t i = 0; i < qfsize; i++) {
  638. if (queue_family_props[i].queueCount >= min_num_queues && (compute_index < 0 || i != (uint32_t) compute_index) && queue_family_props[i].queueFlags & required) {
  639. return i;
  640. }
  641. }
  642. // Fall back to reusing compute queue
  643. for (size_t i = 0; i < qfsize; i++) {
  644. if (queue_family_props[i].queueCount >= min_num_queues && queue_family_props[i].queueFlags & required) {
  645. return i;
  646. }
  647. }
  648. // Fall back to ignoring min_num_queries
  649. for (size_t i = 0; i < qfsize; i++) {
  650. if (queue_family_props[i].queueFlags & required) {
  651. return i;
  652. }
  653. }
  654. // All commands that are allowed on a queue that supports transfer operations are also allowed on a queue that supports either graphics or compute operations.
  655. // Thus, if the capabilities of a queue family include VK_QUEUE_GRAPHICS_BIT or VK_QUEUE_COMPUTE_BIT, then reporting the VK_QUEUE_TRANSFER_BIT capability separately for that queue family is optional.
  656. if (compute_index >= 0) {
  657. return compute_index;
  658. }
  659. std::cerr << "ggml_vulkan: No suitable queue family index found." << std::endl;
  660. for(auto &q_family : queue_family_props) {
  661. std::cerr << "Queue number: " + std::to_string(q_family.queueCount) << " flags: " + to_string(q_family.queueFlags) << std::endl;
  662. }
  663. abort();
  664. }
  665. static void ggml_vk_create_queue(ggml_backend_vk_context * ctx, vk_queue& q, uint32_t queue_family_index, uint32_t queue_index, vk::PipelineStageFlags&& stage_flags) {
  666. VK_LOG_DEBUG("ggml_vk_create_queue()");
  667. q.queue_family_index = queue_family_index;
  668. vk::CommandPoolCreateInfo command_pool_create_info_compute(vk::CommandPoolCreateFlags(VK_COMMAND_POOL_CREATE_TRANSIENT_BIT), queue_family_index);
  669. q.pool = ctx->device->device.createCommandPool(command_pool_create_info_compute);
  670. q.cmd_buffer_idx = 0;
  671. q.queue = ctx->device->device.getQueue(queue_family_index, queue_index);
  672. q.stage_flags = stage_flags;
  673. }
  674. static vk_context * ggml_vk_create_context(ggml_backend_vk_context * ctx, vk_queue& q) {
  675. VK_LOG_DEBUG("ggml_vk_create_context()");
  676. ctx->gc.contexts.emplace_back();
  677. vk_context * result = &ctx->gc.contexts[ctx->gc.contexts.size() - 1];
  678. memset((void *) result, 0, sizeof(vk_context));
  679. result->idx = ctx->gc.contexts.size() - 1;
  680. result->q = &q;
  681. return result;
  682. }
  683. static vk_semaphore * ggml_vk_create_binary_semaphore(ggml_backend_vk_context * ctx) {
  684. VK_LOG_DEBUG("ggml_vk_create_timeline_semaphore()");
  685. vk::SemaphoreTypeCreateInfo tci{ vk::SemaphoreType::eBinary, 0 };
  686. vk::SemaphoreCreateInfo ci{};
  687. ci.setPNext(&tci);
  688. vk::Semaphore semaphore = ctx->device->device.createSemaphore(ci);
  689. ctx->gc.semaphores.push_back({ semaphore, 0 });
  690. return &ctx->gc.semaphores[ctx->gc.semaphores.size() - 1];
  691. }
  692. static vk_semaphore * ggml_vk_create_timeline_semaphore(ggml_backend_vk_context * ctx) {
  693. VK_LOG_DEBUG("ggml_vk_create_timeline_semaphore()");
  694. if (ctx->semaphore_idx >= ctx->gc.tl_semaphores.size()) {
  695. vk::SemaphoreTypeCreateInfo tci{ vk::SemaphoreType::eTimeline, 0 };
  696. vk::SemaphoreCreateInfo ci{};
  697. ci.setPNext(&tci);
  698. vk::Semaphore semaphore = ctx->device->device.createSemaphore(ci);
  699. ctx->gc.tl_semaphores.push_back({ semaphore, 0 });
  700. }
  701. return &ctx->gc.tl_semaphores[ctx->semaphore_idx++];
  702. }
  703. static vk::Event ggml_vk_create_event(ggml_backend_vk_context * ctx) {
  704. if (ctx->event_idx >= ctx->gc.events.size()) {
  705. ctx->gc.events.push_back(ctx->device->device.createEvent({}));
  706. }
  707. return ctx->gc.events[ctx->event_idx++];
  708. }
  709. static void ggml_vk_queue_cleanup(ggml_backend_vk_context * ctx, vk_queue& q) {
  710. VK_LOG_DEBUG("ggml_vk_queue_cleanup()");
  711. // Requires command buffers to be done
  712. ctx->device->device.resetCommandPool(q.pool);
  713. q.cmd_buffer_idx = 0;
  714. }
  715. static uint32_t find_properties(const vk::PhysicalDeviceMemoryProperties* mem_props, vk::MemoryRequirements* mem_req, vk::MemoryPropertyFlags flags) {
  716. for (uint32_t i = 0; i < mem_props->memoryTypeCount; ++i) {
  717. vk::MemoryType memory_type = mem_props->memoryTypes[i];
  718. if ((mem_req->memoryTypeBits & ((uint64_t)1 << i)) &&
  719. (flags & memory_type.propertyFlags) == flags &&
  720. mem_props->memoryHeaps[memory_type.heapIndex].size >= mem_req->size) {
  721. return static_cast<int32_t>(i);
  722. }
  723. }
  724. return UINT32_MAX;
  725. }
  726. static vk_buffer ggml_vk_create_buffer(ggml_backend_vk_context * ctx, size_t size, vk::MemoryPropertyFlags req_flags, vk::MemoryPropertyFlags fallback_flags = vk::MemoryPropertyFlags(0)) {
  727. VK_LOG_DEBUG("ggml_vk_create_buffer(device " << ctx->idx << ", " << size << ", " << to_string(req_flags) << ", " << to_string(fallback_flags) << ")");
  728. vk_buffer buf = std::make_shared<vk_buffer_struct>();
  729. if (size == 0) {
  730. buf->size = 0;
  731. return buf;
  732. }
  733. buf->size = size;
  734. vk::BufferCreateInfo buffer_create_info{
  735. vk::BufferCreateFlags(),
  736. size,
  737. vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst,
  738. vk::SharingMode::eExclusive,
  739. 0,
  740. nullptr,
  741. };
  742. buf->buffer = ctx->device->device.createBuffer(buffer_create_info);
  743. vk::MemoryRequirements mem_req = ctx->device->device.getBufferMemoryRequirements(buf->buffer);
  744. vk::PhysicalDeviceMemoryProperties mem_props = ctx->device->physical_device.getMemoryProperties();
  745. uint32_t memory_type_index = UINT32_MAX;
  746. memory_type_index = find_properties(&mem_props, &mem_req, req_flags);
  747. buf->memory_property_flags = req_flags;
  748. if (memory_type_index == UINT32_MAX && fallback_flags) {
  749. memory_type_index = find_properties(&mem_props, &mem_req, fallback_flags);
  750. buf->memory_property_flags = fallback_flags;
  751. }
  752. if (memory_type_index == UINT32_MAX) {
  753. ctx->device->device.destroyBuffer(buf->buffer);
  754. buf->size = 0;
  755. throw vk::OutOfDeviceMemoryError("No suitable memory type found");
  756. }
  757. try {
  758. buf->device_memory = ctx->device->device.allocateMemory({ mem_req.size, memory_type_index });
  759. } catch (const vk::SystemError& e) {
  760. // Out of Host/Device memory, clean up buffer
  761. ctx->device->device.destroyBuffer(buf->buffer);
  762. buf->size = 0;
  763. throw e;
  764. }
  765. buf->ptr = nullptr;
  766. if (buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  767. buf->ptr = ctx->device->device.mapMemory(buf->device_memory, 0, VK_WHOLE_SIZE);
  768. }
  769. ctx->device->device.bindBufferMemory(buf->buffer, buf->device_memory, 0);
  770. buf->ctx = ctx;
  771. buf->device = ctx->device;
  772. #ifdef GGML_VULKAN_MEMORY_DEBUG
  773. ctx->memory_logger.log_allocation(buf, size);
  774. #endif
  775. return buf;
  776. }
  777. static vk_buffer ggml_vk_create_buffer_check(ggml_backend_vk_context * ctx, size_t size, vk::MemoryPropertyFlags req_flags, vk::MemoryPropertyFlags fallback_flags = vk::MemoryPropertyFlags(0)) {
  778. try {
  779. return ggml_vk_create_buffer(ctx, size, req_flags, fallback_flags);
  780. } catch (const vk::SystemError& e) {
  781. std::cerr << "ggml_vulkan: Memory allocation of size " << size << " failed." << std::endl;
  782. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  783. throw e;
  784. }
  785. }
  786. static vk_buffer ggml_vk_create_buffer_device(ggml_backend_vk_context * ctx, size_t size) {
  787. vk_buffer buf;
  788. try {
  789. if (ctx->device->uma) {
  790. // Fall back to host memory type
  791. buf = ggml_vk_create_buffer(ctx, size, vk::MemoryPropertyFlagBits::eDeviceLocal, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  792. } else {
  793. buf = ggml_vk_create_buffer(ctx, size, vk::MemoryPropertyFlagBits::eDeviceLocal);
  794. }
  795. } catch (const vk::SystemError& e) {
  796. std::cerr << "ggml_vulkan: Device memory allocation of size " << size << " failed." << std::endl;
  797. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  798. throw e;
  799. }
  800. return buf;
  801. }
  802. static void ggml_vk_destroy_buffer(vk_buffer& buf) {
  803. if (buf == nullptr) {
  804. return;
  805. }
  806. #ifdef GGML_VULKAN_MEMORY_DEBUG
  807. buf->ctx->memory_logger.log_deallocation(buf);
  808. #endif
  809. buf.reset();
  810. }
  811. static vk_subbuffer ggml_vk_subbuffer(vk_buffer& buf) {
  812. return { buf, 0, VK_WHOLE_SIZE };
  813. }
  814. static void ggml_vk_sync_buffers(vk_context * ctx) {
  815. VK_LOG_DEBUG("ggml_vk_sync_buffers()");
  816. const std::vector<vk::MemoryBarrier> mem_barriers{ { { vk::AccessFlagBits::eMemoryRead | vk::AccessFlagBits::eMemoryWrite }, { vk::AccessFlagBits::eMemoryRead | vk::AccessFlagBits::eMemoryWrite } } };
  817. ctx->s->buffer.pipelineBarrier(
  818. ctx->q->stage_flags,
  819. ctx->q->stage_flags,
  820. {},
  821. mem_barriers,
  822. {},
  823. {}
  824. );
  825. }
  826. static void ggml_vk_wait_events(vk_context * ctx, std::vector<vk::Event>&& events) {
  827. VK_LOG_DEBUG("ggml_vk_wait_events()");
  828. if (events.empty()) {
  829. return;
  830. }
  831. ctx->s->buffer.waitEvents(
  832. events,
  833. ctx->q->stage_flags,
  834. ctx->q->stage_flags,
  835. {},
  836. {},
  837. {}
  838. );
  839. }
  840. static bool ggml_vk_build_shader(ggml_type type) {
  841. switch(type) {
  842. case GGML_TYPE_F16:
  843. case GGML_TYPE_Q4_0:
  844. case GGML_TYPE_Q4_1:
  845. case GGML_TYPE_Q5_0:
  846. case GGML_TYPE_Q5_1:
  847. case GGML_TYPE_Q8_0:
  848. case GGML_TYPE_Q2_K:
  849. case GGML_TYPE_Q3_K:
  850. case GGML_TYPE_Q4_K:
  851. case GGML_TYPE_Q5_K:
  852. case GGML_TYPE_Q6_K:
  853. return true;
  854. default:
  855. return false;
  856. }
  857. }
  858. static void ggml_vk_load_shaders(ggml_backend_vk_context * ctx) {
  859. VK_LOG_DEBUG("ggml_vk_load_shaders(" << ctx->name << ")");
  860. const std::shared_ptr<vk_device> device = ctx->device;
  861. // mulmat
  862. std::initializer_list<uint32_t> warptile_l = { 128, 128, 128, 16, device->subgroup_size * 2, 64, 2, 4, 4, device->subgroup_size };
  863. std::initializer_list<uint32_t> warptile_m = { 128, 64, 64, 16, device->subgroup_size, 32, 2, 4, 2, device->subgroup_size };
  864. std::initializer_list<uint32_t> warptile_s = { device->subgroup_size, 32, 32, 16, 32, 32, 2, 2, 2, device->subgroup_size };
  865. std::initializer_list<uint32_t> warptile_mmq_l = { 128, 128, 128, 32, device->subgroup_size * 2, 64, 2, 4, 4, device->subgroup_size };
  866. std::initializer_list<uint32_t> warptile_mmq_m = { 128, 64, 64, 32, device->subgroup_size, 32, 2, 4, 2, device->subgroup_size };
  867. std::initializer_list<uint32_t> warptile_mmq_s = { device->subgroup_size, 32, 32, 32, 32, 32, 2, 2, 2, device->subgroup_size };
  868. std::array<uint32_t, 3> l_wg_denoms = {128, 128, 1 };
  869. std::array<uint32_t, 3> m_wg_denoms = { 64, 64, 1 };
  870. std::array<uint32_t, 3> s_wg_denoms = { 32, 32, 1 };
  871. uint32_t l_align = 128;
  872. uint32_t m_align = 64;
  873. uint32_t s_align = 32;
  874. ctx->device->pipeline_matmul_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  875. ctx->device->pipeline_matmul_f32_f16 = std::make_shared<vk_matmul_pipeline_struct>();
  876. ctx->device->pipeline_matmul_f16_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  877. ctx->device->pipeline_matmul_f16 = std::make_shared<vk_matmul_pipeline_struct>();
  878. ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0] = std::make_shared<vk_matmul_pipeline_struct>();
  879. ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1] = std::make_shared<vk_matmul_pipeline_struct>();
  880. ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0] = std::make_shared<vk_matmul_pipeline_struct>();
  881. ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1] = std::make_shared<vk_matmul_pipeline_struct>();
  882. ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0] = std::make_shared<vk_matmul_pipeline_struct>();
  883. ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K] = std::make_shared<vk_matmul_pipeline_struct>();
  884. ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K] = std::make_shared<vk_matmul_pipeline_struct>();
  885. ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K] = std::make_shared<vk_matmul_pipeline_struct>();
  886. ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K] = std::make_shared<vk_matmul_pipeline_struct>();
  887. ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K] = std::make_shared<vk_matmul_pipeline_struct>();
  888. ctx->device->pipeline_matmul_id_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  889. ctx->device->pipeline_matmul_id_f16_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  890. ctx->device->pipeline_matmul_id_f16 = std::make_shared<vk_matmul_pipeline_struct>();
  891. ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0] = std::make_shared<vk_matmul_pipeline_struct>();
  892. ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1] = std::make_shared<vk_matmul_pipeline_struct>();
  893. ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0] = std::make_shared<vk_matmul_pipeline_struct>();
  894. ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1] = std::make_shared<vk_matmul_pipeline_struct>();
  895. ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0] = std::make_shared<vk_matmul_pipeline_struct>();
  896. ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K] = std::make_shared<vk_matmul_pipeline_struct>();
  897. ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K] = std::make_shared<vk_matmul_pipeline_struct>();
  898. ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K] = std::make_shared<vk_matmul_pipeline_struct>();
  899. ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K] = std::make_shared<vk_matmul_pipeline_struct>();
  900. ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K] = std::make_shared<vk_matmul_pipeline_struct>();
  901. if (device->fp16) {
  902. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32->l, "matmul_f32_l", matmul_f32_f32_len, matmul_f32_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  903. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32->m, "matmul_f32_m", matmul_f32_f32_len, matmul_f32_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  904. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32->s, "matmul_f32_s", matmul_f32_f32_len, matmul_f32_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  905. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32->a_l, "matmul_f32_aligned_l", matmul_f32_f32_aligned_len, matmul_f32_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  906. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32->a_m, "matmul_f32_aligned_m", matmul_f32_f32_aligned_len, matmul_f32_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  907. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32->a_s, "matmul_f32_aligned_s", matmul_f32_f32_aligned_len, matmul_f32_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  908. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32_f16->l, "matmul_f32_f16_l", matmul_f32_f16_len, matmul_f32_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  909. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32_f16->m, "matmul_f32_f16_m", matmul_f32_f16_len, matmul_f32_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  910. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32_f16->s, "matmul_f32_f16_s", matmul_f32_f16_len, matmul_f32_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  911. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32_f16->a_l, "matmul_f32_f16_aligned_l", matmul_f32_f16_aligned_len, matmul_f32_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  912. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32_f16->a_m, "matmul_f32_f16_aligned_m", matmul_f32_f16_aligned_len, matmul_f32_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  913. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32_f16->a_s, "matmul_f32_f16_aligned_s", matmul_f32_f16_aligned_len, matmul_f32_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  914. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16->l, "matmul_f16_l", matmul_f16_len, matmul_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  915. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16->m, "matmul_f16_m", matmul_f16_len, matmul_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  916. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16->s, "matmul_f16_s", matmul_f16_len, matmul_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  917. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16->a_l, "matmul_f16_aligned_l", matmul_f16_aligned_len, matmul_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  918. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16->a_m, "matmul_f16_aligned_m", matmul_f16_aligned_len, matmul_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  919. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16->a_s, "matmul_f16_aligned_s", matmul_f16_aligned_len, matmul_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  920. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16_f32->l, "matmul_f16_f32_l", matmul_f16_f32_len, matmul_f16_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  921. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16_f32->m, "matmul_f16_f32_m", matmul_f16_f32_len, matmul_f16_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  922. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16_f32->s, "matmul_f16_f32_s", matmul_f16_f32_len, matmul_f16_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  923. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16_f32->a_l, "matmul_f16_f32_aligned_l", matmul_f16_f32_aligned_len, matmul_f16_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  924. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16_f32->a_m, "matmul_f16_f32_aligned_m", matmul_f16_f32_aligned_len, matmul_f16_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  925. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16_f32->a_s, "matmul_f16_f32_aligned_s", matmul_f16_f32_aligned_len, matmul_f16_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  926. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->l, "matmul_q4_0_f32_l", matmul_q4_0_f32_len, matmul_q4_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  927. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->m, "matmul_q4_0_f32_m", matmul_q4_0_f32_len, matmul_q4_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  928. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->s, "matmul_q4_0_f32_s", matmul_q4_0_f32_len, matmul_q4_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  929. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_l, "matmul_q4_0_f32_aligned_l", matmul_q4_0_f32_aligned_len, matmul_q4_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  930. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_m, "matmul_q4_0_f32_aligned_m", matmul_q4_0_f32_aligned_len, matmul_q4_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  931. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_s, "matmul_q4_0_f32_aligned_s", matmul_q4_0_f32_aligned_len, matmul_q4_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  932. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->l, "matmul_q4_1_f32_l", matmul_q4_1_f32_len, matmul_q4_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  933. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->m, "matmul_q4_1_f32_m", matmul_q4_1_f32_len, matmul_q4_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  934. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->s, "matmul_q4_1_f32_s", matmul_q4_1_f32_len, matmul_q4_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  935. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_l, "matmul_q4_1_f32_aligned_l", matmul_q4_1_f32_aligned_len, matmul_q4_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  936. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_m, "matmul_q4_1_f32_aligned_m", matmul_q4_1_f32_aligned_len, matmul_q4_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  937. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_s, "matmul_q4_1_f32_aligned_s", matmul_q4_1_f32_aligned_len, matmul_q4_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  938. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->l, "matmul_q5_0_f32_l", matmul_q5_0_f32_len, matmul_q5_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  939. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->m, "matmul_q5_0_f32_m", matmul_q5_0_f32_len, matmul_q5_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  940. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->s, "matmul_q5_0_f32_s", matmul_q5_0_f32_len, matmul_q5_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  941. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_l, "matmul_q5_0_f32_aligned_l", matmul_q5_0_f32_aligned_len, matmul_q5_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  942. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_m, "matmul_q5_0_f32_aligned_m", matmul_q5_0_f32_aligned_len, matmul_q5_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  943. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_s, "matmul_q5_0_f32_aligned_s", matmul_q5_0_f32_aligned_len, matmul_q5_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  944. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->l, "matmul_q5_1_f32_l", matmul_q5_1_f32_len, matmul_q5_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  945. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->m, "matmul_q5_1_f32_m", matmul_q5_1_f32_len, matmul_q5_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  946. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->s, "matmul_q5_1_f32_s", matmul_q5_1_f32_len, matmul_q5_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  947. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_l, "matmul_q5_1_f32_aligned_l", matmul_q5_1_f32_aligned_len, matmul_q5_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  948. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_m, "matmul_q5_1_f32_aligned_m", matmul_q5_1_f32_aligned_len, matmul_q5_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  949. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_s, "matmul_q5_1_f32_aligned_s", matmul_q5_1_f32_aligned_len, matmul_q5_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  950. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->l, "matmul_q8_0_f32_l", matmul_q8_0_f32_len, matmul_q8_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  951. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->m, "matmul_q8_0_f32_m", matmul_q8_0_f32_len, matmul_q8_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  952. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->s, "matmul_q8_0_f32_s", matmul_q8_0_f32_len, matmul_q8_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  953. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_l, "matmul_q8_0_f32_aligned_l", matmul_q8_0_f32_aligned_len, matmul_q8_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  954. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_m, "matmul_q8_0_f32_aligned_m", matmul_q8_0_f32_aligned_len, matmul_q8_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  955. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_s, "matmul_q8_0_f32_aligned_s", matmul_q8_0_f32_aligned_len, matmul_q8_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  956. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->l, "matmul_q2_k_f32_l", matmul_q2_k_f32_len, matmul_q2_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  957. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->m, "matmul_q2_k_f32_m", matmul_q2_k_f32_len, matmul_q2_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  958. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->s, "matmul_q2_k_f32_s", matmul_q2_k_f32_len, matmul_q2_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  959. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_l, "matmul_q2_k_f32_aligned_l", matmul_q2_k_f32_aligned_len, matmul_q2_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  960. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_m, "matmul_q2_k_f32_aligned_m", matmul_q2_k_f32_aligned_len, matmul_q2_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  961. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_s, "matmul_q2_k_f32_aligned_s", matmul_q2_k_f32_aligned_len, matmul_q2_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  962. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->l, "matmul_q3_k_f32_l", matmul_q3_k_f32_len, matmul_q3_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  963. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->m, "matmul_q3_k_f32_m", matmul_q3_k_f32_len, matmul_q3_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  964. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->s, "matmul_q3_k_f32_s", matmul_q3_k_f32_len, matmul_q3_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  965. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_l, "matmul_q3_k_f32_aligned_l", matmul_q3_k_f32_aligned_len, matmul_q3_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  966. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_m, "matmul_q3_k_f32_aligned_m", matmul_q3_k_f32_aligned_len, matmul_q3_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  967. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_s, "matmul_q3_k_f32_aligned_s", matmul_q3_k_f32_aligned_len, matmul_q3_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  968. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->l, "matmul_q4_k_f32_l", matmul_q4_k_f32_len, matmul_q4_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  969. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->m, "matmul_q4_k_f32_m", matmul_q4_k_f32_len, matmul_q4_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  970. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->s, "matmul_q4_k_f32_s", matmul_q4_k_f32_len, matmul_q4_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  971. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_l, "matmul_q4_k_f32_aligned_l", matmul_q4_k_f32_aligned_len, matmul_q4_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  972. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_m, "matmul_q4_k_f32_aligned_m", matmul_q4_k_f32_aligned_len, matmul_q4_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  973. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_s, "matmul_q4_k_f32_aligned_s", matmul_q4_k_f32_aligned_len, matmul_q4_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  974. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->l, "matmul_q5_k_f32_l", matmul_q5_k_f32_len, matmul_q5_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  975. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->m, "matmul_q5_k_f32_m", matmul_q5_k_f32_len, matmul_q5_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  976. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->s, "matmul_q5_k_f32_s", matmul_q5_k_f32_len, matmul_q5_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  977. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_l, "matmul_q5_k_f32_aligned_l", matmul_q5_k_f32_aligned_len, matmul_q5_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  978. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_m, "matmul_q5_k_f32_aligned_m", matmul_q5_k_f32_aligned_len, matmul_q5_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  979. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_s, "matmul_q5_k_f32_aligned_s", matmul_q5_k_f32_aligned_len, matmul_q5_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  980. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->l, "matmul_q6_k_f32_l", matmul_q6_k_f32_len, matmul_q6_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  981. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->m, "matmul_q6_k_f32_m", matmul_q6_k_f32_len, matmul_q6_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  982. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->s, "matmul_q6_k_f32_s", matmul_q6_k_f32_len, matmul_q6_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  983. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_l, "matmul_q6_k_f32_aligned_l", matmul_q6_k_f32_aligned_len, matmul_q6_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  984. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_m, "matmul_q6_k_f32_aligned_m", matmul_q6_k_f32_aligned_len, matmul_q6_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  985. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_s, "matmul_q6_k_f32_aligned_s", matmul_q6_k_f32_aligned_len, matmul_q6_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  986. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f32->l, "matmul_id_f32_l", matmul_id_f32_f32_len, matmul_id_f32_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  987. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f32->m, "matmul_id_f32_m", matmul_id_f32_f32_len, matmul_id_f32_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  988. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f32->s, "matmul_id_f32_s", matmul_id_f32_f32_len, matmul_id_f32_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  989. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f32->a_l, "matmul_id_f32_aligned_l", matmul_id_f32_f32_aligned_len, matmul_id_f32_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  990. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f32->a_m, "matmul_id_f32_aligned_m", matmul_id_f32_f32_aligned_len, matmul_id_f32_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  991. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f32->a_s, "matmul_id_f32_aligned_s", matmul_id_f32_f32_aligned_len, matmul_id_f32_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  992. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16->l, "matmul_id_f16_l", matmul_id_f16_len, matmul_id_f16_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  993. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16->m, "matmul_id_f16_m", matmul_id_f16_len, matmul_id_f16_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  994. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16->s, "matmul_id_f16_s", matmul_id_f16_len, matmul_id_f16_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  995. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16->a_l, "matmul_id_f16_aligned_l", matmul_id_f16_aligned_len, matmul_id_f16_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  996. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16->a_m, "matmul_id_f16_aligned_m", matmul_id_f16_aligned_len, matmul_id_f16_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  997. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16->a_s, "matmul_id_f16_aligned_s", matmul_id_f16_aligned_len, matmul_id_f16_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  998. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16_f32->l, "matmul_id_f16_f32_l", matmul_id_f16_f32_len, matmul_id_f16_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  999. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16_f32->m, "matmul_id_f16_f32_m", matmul_id_f16_f32_len, matmul_id_f16_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1000. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16_f32->s, "matmul_id_f16_f32_s", matmul_id_f16_f32_len, matmul_id_f16_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1001. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16_f32->a_l, "matmul_id_f16_f32_aligned_l", matmul_id_f16_f32_aligned_len, matmul_id_f16_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1002. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16_f32->a_m, "matmul_id_f16_f32_aligned_m", matmul_id_f16_f32_aligned_len, matmul_id_f16_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1003. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16_f32->a_s, "matmul_id_f16_f32_aligned_s", matmul_id_f16_f32_aligned_len, matmul_id_f16_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1004. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->l, "matmul_id_q4_0_f32_l", matmul_id_q4_0_f32_len, matmul_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1005. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->m, "matmul_id_q4_0_f32_m", matmul_id_q4_0_f32_len, matmul_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1006. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->s, "matmul_id_q4_0_f32_s", matmul_id_q4_0_f32_len, matmul_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1007. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_l, "matmul_id_q4_0_f32_aligned_l", matmul_id_q4_0_f32_aligned_len, matmul_id_q4_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1008. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_m, "matmul_id_q4_0_f32_aligned_m", matmul_id_q4_0_f32_aligned_len, matmul_id_q4_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1009. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_s, "matmul_id_q4_0_f32_aligned_s", matmul_id_q4_0_f32_aligned_len, matmul_id_q4_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1010. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->l, "matmul_id_q4_1_f32_l", matmul_id_q4_1_f32_len, matmul_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1011. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->m, "matmul_id_q4_1_f32_m", matmul_id_q4_1_f32_len, matmul_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1012. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->s, "matmul_id_q4_1_f32_s", matmul_id_q4_1_f32_len, matmul_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1013. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_l, "matmul_id_q4_1_f32_aligned_l", matmul_id_q4_1_f32_aligned_len, matmul_id_q4_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1014. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_m, "matmul_id_q4_1_f32_aligned_m", matmul_id_q4_1_f32_aligned_len, matmul_id_q4_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1015. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_s, "matmul_id_q4_1_f32_aligned_s", matmul_id_q4_1_f32_aligned_len, matmul_id_q4_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1016. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->l, "matmul_id_q5_0_f32_l", matmul_id_q5_0_f32_len, matmul_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1017. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->m, "matmul_id_q5_0_f32_m", matmul_id_q5_0_f32_len, matmul_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1018. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->s, "matmul_id_q5_0_f32_s", matmul_id_q5_0_f32_len, matmul_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1019. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_l, "matmul_id_q5_0_f32_aligned_l", matmul_id_q5_0_f32_aligned_len, matmul_id_q5_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1020. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_m, "matmul_id_q5_0_f32_aligned_m", matmul_id_q5_0_f32_aligned_len, matmul_id_q5_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1021. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_s, "matmul_id_q5_0_f32_aligned_s", matmul_id_q5_0_f32_aligned_len, matmul_id_q5_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1022. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->l, "matmul_id_q5_1_f32_l", matmul_id_q5_1_f32_len, matmul_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1023. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->m, "matmul_id_q5_1_f32_m", matmul_id_q5_1_f32_len, matmul_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1024. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->s, "matmul_id_q5_1_f32_s", matmul_id_q5_1_f32_len, matmul_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1025. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_l, "matmul_id_q5_1_f32_aligned_l", matmul_id_q5_1_f32_aligned_len, matmul_id_q5_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1026. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_m, "matmul_id_q5_1_f32_aligned_m", matmul_id_q5_1_f32_aligned_len, matmul_id_q5_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1027. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_s, "matmul_id_q5_1_f32_aligned_s", matmul_id_q5_1_f32_aligned_len, matmul_id_q5_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1028. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->l, "matmul_id_q8_0_f32_l", matmul_id_q8_0_f32_len, matmul_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1029. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->m, "matmul_id_q8_0_f32_m", matmul_id_q8_0_f32_len, matmul_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1030. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->s, "matmul_id_q8_0_f32_s", matmul_id_q8_0_f32_len, matmul_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1031. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_l, "matmul_id_q8_0_f32_aligned_l", matmul_id_q8_0_f32_aligned_len, matmul_id_q8_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1032. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_m, "matmul_id_q8_0_f32_aligned_m", matmul_id_q8_0_f32_aligned_len, matmul_id_q8_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1033. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_s, "matmul_id_q8_0_f32_aligned_s", matmul_id_q8_0_f32_aligned_len, matmul_id_q8_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1034. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->l, "matmul_id_q2_k_f32_l", matmul_id_q2_k_f32_len, matmul_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1035. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->m, "matmul_id_q2_k_f32_m", matmul_id_q2_k_f32_len, matmul_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1036. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->s, "matmul_id_q2_k_f32_s", matmul_id_q2_k_f32_len, matmul_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1037. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_l, "matmul_id_q2_k_f32_aligned_l", matmul_id_q2_k_f32_aligned_len, matmul_id_q2_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1038. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_m, "matmul_id_q2_k_f32_aligned_m", matmul_id_q2_k_f32_aligned_len, matmul_id_q2_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1039. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_s, "matmul_id_q2_k_f32_aligned_s", matmul_id_q2_k_f32_aligned_len, matmul_id_q2_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1040. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->l, "matmul_id_q3_k_f32_l", matmul_id_q3_k_f32_len, matmul_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1041. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->m, "matmul_id_q3_k_f32_m", matmul_id_q3_k_f32_len, matmul_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1042. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->s, "matmul_id_q3_k_f32_s", matmul_id_q3_k_f32_len, matmul_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1043. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_l, "matmul_id_q3_k_f32_aligned_l", matmul_id_q3_k_f32_aligned_len, matmul_id_q3_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1044. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_m, "matmul_id_q3_k_f32_aligned_m", matmul_id_q3_k_f32_aligned_len, matmul_id_q3_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1045. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_s, "matmul_id_q3_k_f32_aligned_s", matmul_id_q3_k_f32_aligned_len, matmul_id_q3_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1046. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->l, "matmul_id_q4_k_f32_l", matmul_id_q4_k_f32_len, matmul_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1047. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->m, "matmul_id_q4_k_f32_m", matmul_id_q4_k_f32_len, matmul_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1048. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->s, "matmul_id_q4_k_f32_s", matmul_id_q4_k_f32_len, matmul_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1049. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_l, "matmul_id_q4_k_f32_aligned_l", matmul_id_q4_k_f32_aligned_len, matmul_id_q4_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1050. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_m, "matmul_id_q4_k_f32_aligned_m", matmul_id_q4_k_f32_aligned_len, matmul_id_q4_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1051. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_s, "matmul_id_q4_k_f32_aligned_s", matmul_id_q4_k_f32_aligned_len, matmul_id_q4_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1052. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->l, "matmul_id_q5_k_f32_l", matmul_id_q5_k_f32_len, matmul_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1053. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->m, "matmul_id_q5_k_f32_m", matmul_id_q5_k_f32_len, matmul_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1054. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->s, "matmul_id_q5_k_f32_s", matmul_id_q5_k_f32_len, matmul_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1055. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_l, "matmul_id_q5_k_f32_aligned_l", matmul_id_q5_k_f32_aligned_len, matmul_id_q5_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1056. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_m, "matmul_id_q5_k_f32_aligned_m", matmul_id_q5_k_f32_aligned_len, matmul_id_q5_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1057. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_s, "matmul_id_q5_k_f32_aligned_s", matmul_id_q5_k_f32_aligned_len, matmul_id_q5_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1058. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->l, "matmul_id_q6_k_f32_l", matmul_id_q6_k_f32_len, matmul_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1059. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->m, "matmul_id_q6_k_f32_m", matmul_id_q6_k_f32_len, matmul_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1060. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->s, "matmul_id_q6_k_f32_s", matmul_id_q6_k_f32_len, matmul_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1061. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_l, "matmul_id_q6_k_f32_aligned_l", matmul_id_q6_k_f32_aligned_len, matmul_id_q6_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1062. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_m, "matmul_id_q6_k_f32_aligned_m", matmul_id_q6_k_f32_aligned_len, matmul_id_q6_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1063. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_s, "matmul_id_q6_k_f32_aligned_s", matmul_id_q6_k_f32_aligned_len, matmul_id_q6_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1064. } else {
  1065. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32->l, "matmul_f32_l", matmul_f32_f32_fp32_len, matmul_f32_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1066. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32->m, "matmul_f32_m", matmul_f32_f32_fp32_len, matmul_f32_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1067. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32->s, "matmul_f32_s", matmul_f32_f32_fp32_len, matmul_f32_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1068. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32->a_l, "matmul_f32_aligned_l", matmul_f32_f32_aligned_fp32_len, matmul_f32_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1069. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32->a_m, "matmul_f32_aligned_m", matmul_f32_f32_aligned_fp32_len, matmul_f32_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1070. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32->a_s, "matmul_f32_aligned_s", matmul_f32_f32_aligned_fp32_len, matmul_f32_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1071. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32_f16->l, "matmul_f32_f16_l", matmul_f32_f16_fp32_len, matmul_f32_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1072. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32_f16->m, "matmul_f32_f16_m", matmul_f32_f16_fp32_len, matmul_f32_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1073. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32_f16->s, "matmul_f32_f16_s", matmul_f32_f16_fp32_len, matmul_f32_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1074. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32_f16->a_l, "matmul_f32_f16_aligned_l", matmul_f32_f16_aligned_fp32_len, matmul_f32_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1075. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32_f16->a_m, "matmul_f32_f16_aligned_m", matmul_f32_f16_aligned_fp32_len, matmul_f32_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1076. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f32_f16->a_s, "matmul_f32_f16_aligned_s", matmul_f32_f16_aligned_fp32_len, matmul_f32_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1077. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16->l, "matmul_f16_l", matmul_f16_fp32_len, matmul_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1078. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16->m, "matmul_f16_m", matmul_f16_fp32_len, matmul_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1079. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16->s, "matmul_f16_s", matmul_f16_fp32_len, matmul_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1080. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16->a_l, "matmul_f16_aligned_l", matmul_f16_aligned_fp32_len, matmul_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1081. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16->a_m, "matmul_f16_aligned_m", matmul_f16_aligned_fp32_len, matmul_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1082. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16->a_s, "matmul_f16_aligned_s", matmul_f16_aligned_fp32_len, matmul_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1083. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16_f32->l, "matmul_f16_f32_l", matmul_f16_f32_fp32_len, matmul_f16_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1084. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16_f32->m, "matmul_f16_f32_m", matmul_f16_f32_fp32_len, matmul_f16_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1085. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16_f32->s, "matmul_f16_f32_s", matmul_f16_f32_fp32_len, matmul_f16_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1086. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16_f32->a_l, "matmul_f16_f32_aligned_l", matmul_f16_f32_aligned_fp32_len, matmul_f16_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1087. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16_f32->a_m, "matmul_f16_f32_aligned_m", matmul_f16_f32_aligned_fp32_len, matmul_f16_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1088. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_f16_f32->a_s, "matmul_f16_f32_aligned_s", matmul_f16_f32_aligned_fp32_len, matmul_f16_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1089. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->l, "matmul_q4_0_f32_l", matmul_q4_0_f32_fp32_len, matmul_q4_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1090. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->m, "matmul_q4_0_f32_m", matmul_q4_0_f32_fp32_len, matmul_q4_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1091. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->s, "matmul_q4_0_f32_s", matmul_q4_0_f32_fp32_len, matmul_q4_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1092. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_l, "matmul_q4_0_f32_aligned_l", matmul_q4_0_f32_aligned_fp32_len, matmul_q4_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1093. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_m, "matmul_q4_0_f32_aligned_m", matmul_q4_0_f32_aligned_fp32_len, matmul_q4_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1094. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_s, "matmul_q4_0_f32_aligned_s", matmul_q4_0_f32_aligned_fp32_len, matmul_q4_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1095. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->l, "matmul_q4_1_f32_l", matmul_q4_1_f32_fp32_len, matmul_q4_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1096. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->m, "matmul_q4_1_f32_m", matmul_q4_1_f32_fp32_len, matmul_q4_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1097. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->s, "matmul_q4_1_f32_s", matmul_q4_1_f32_fp32_len, matmul_q4_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1098. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_l, "matmul_q4_1_f32_aligned_l", matmul_q4_1_f32_aligned_fp32_len, matmul_q4_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1099. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_m, "matmul_q4_1_f32_aligned_m", matmul_q4_1_f32_aligned_fp32_len, matmul_q4_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1100. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_s, "matmul_q4_1_f32_aligned_s", matmul_q4_1_f32_aligned_fp32_len, matmul_q4_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1101. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->l, "matmul_q5_0_f32_l", matmul_q5_0_f32_fp32_len, matmul_q5_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1102. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->m, "matmul_q5_0_f32_m", matmul_q5_0_f32_fp32_len, matmul_q5_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1103. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->s, "matmul_q5_0_f32_s", matmul_q5_0_f32_fp32_len, matmul_q5_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1104. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_l, "matmul_q5_0_f32_aligned_l", matmul_q5_0_f32_aligned_fp32_len, matmul_q5_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1105. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_m, "matmul_q5_0_f32_aligned_m", matmul_q5_0_f32_aligned_fp32_len, matmul_q5_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1106. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_s, "matmul_q5_0_f32_aligned_s", matmul_q5_0_f32_aligned_fp32_len, matmul_q5_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1107. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->l, "matmul_q5_1_f32_l", matmul_q5_1_f32_fp32_len, matmul_q5_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1108. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->m, "matmul_q5_1_f32_m", matmul_q5_1_f32_fp32_len, matmul_q5_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1109. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->s, "matmul_q5_1_f32_s", matmul_q5_1_f32_fp32_len, matmul_q5_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1110. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_l, "matmul_q5_1_f32_aligned_l", matmul_q5_1_f32_aligned_fp32_len, matmul_q5_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1111. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_m, "matmul_q5_1_f32_aligned_m", matmul_q5_1_f32_aligned_fp32_len, matmul_q5_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1112. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_s, "matmul_q5_1_f32_aligned_s", matmul_q5_1_f32_aligned_fp32_len, matmul_q5_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1113. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->l, "matmul_q8_0_f32_l", matmul_q8_0_f32_fp32_len, matmul_q8_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1114. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->m, "matmul_q8_0_f32_m", matmul_q8_0_f32_fp32_len, matmul_q8_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1115. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->s, "matmul_q8_0_f32_s", matmul_q8_0_f32_fp32_len, matmul_q8_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1116. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_l, "matmul_q8_0_f32_aligned_l", matmul_q8_0_f32_aligned_fp32_len, matmul_q8_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1117. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_m, "matmul_q8_0_f32_aligned_m", matmul_q8_0_f32_aligned_fp32_len, matmul_q8_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1118. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_s, "matmul_q8_0_f32_aligned_s", matmul_q8_0_f32_aligned_fp32_len, matmul_q8_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1119. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->l, "matmul_q2_k_f32_l", matmul_q2_k_f32_fp32_len, matmul_q2_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1120. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->m, "matmul_q2_k_f32_m", matmul_q2_k_f32_fp32_len, matmul_q2_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1121. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->s, "matmul_q2_k_f32_s", matmul_q2_k_f32_fp32_len, matmul_q2_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1122. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_l, "matmul_q2_k_f32_aligned_l", matmul_q2_k_f32_aligned_fp32_len, matmul_q2_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1123. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_m, "matmul_q2_k_f32_aligned_m", matmul_q2_k_f32_aligned_fp32_len, matmul_q2_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1124. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_s, "matmul_q2_k_f32_aligned_s", matmul_q2_k_f32_aligned_fp32_len, matmul_q2_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1125. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->l, "matmul_q3_k_f32_l", matmul_q3_k_f32_fp32_len, matmul_q3_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1126. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->m, "matmul_q3_k_f32_m", matmul_q3_k_f32_fp32_len, matmul_q3_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1127. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->s, "matmul_q3_k_f32_s", matmul_q3_k_f32_fp32_len, matmul_q3_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1128. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_l, "matmul_q3_k_f32_aligned_l", matmul_q3_k_f32_aligned_fp32_len, matmul_q3_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1129. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_m, "matmul_q3_k_f32_aligned_m", matmul_q3_k_f32_aligned_fp32_len, matmul_q3_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1130. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_s, "matmul_q3_k_f32_aligned_s", matmul_q3_k_f32_aligned_fp32_len, matmul_q3_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1131. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->l, "matmul_q4_k_f32_l", matmul_q4_k_f32_fp32_len, matmul_q4_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1132. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->m, "matmul_q4_k_f32_m", matmul_q4_k_f32_fp32_len, matmul_q4_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1133. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->s, "matmul_q4_k_f32_s", matmul_q4_k_f32_fp32_len, matmul_q4_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1134. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_l, "matmul_q4_k_f32_aligned_l", matmul_q4_k_f32_aligned_fp32_len, matmul_q4_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1135. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_m, "matmul_q4_k_f32_aligned_m", matmul_q4_k_f32_aligned_fp32_len, matmul_q4_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1136. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_s, "matmul_q4_k_f32_aligned_s", matmul_q4_k_f32_aligned_fp32_len, matmul_q4_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1137. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->l, "matmul_q5_k_f32_l", matmul_q5_k_f32_fp32_len, matmul_q5_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1138. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->m, "matmul_q5_k_f32_m", matmul_q5_k_f32_fp32_len, matmul_q5_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1139. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->s, "matmul_q5_k_f32_s", matmul_q5_k_f32_fp32_len, matmul_q5_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1140. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_l, "matmul_q5_k_f32_aligned_l", matmul_q5_k_f32_aligned_fp32_len, matmul_q5_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1141. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_m, "matmul_q5_k_f32_aligned_m", matmul_q5_k_f32_aligned_fp32_len, matmul_q5_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1142. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_s, "matmul_q5_k_f32_aligned_s", matmul_q5_k_f32_aligned_fp32_len, matmul_q5_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1143. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->l, "matmul_q6_k_f32_l", matmul_q6_k_f32_fp32_len, matmul_q6_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1144. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->m, "matmul_q6_k_f32_m", matmul_q6_k_f32_fp32_len, matmul_q6_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1145. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->s, "matmul_q6_k_f32_s", matmul_q6_k_f32_fp32_len, matmul_q6_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1146. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_l, "matmul_q6_k_f32_aligned_l", matmul_q6_k_f32_aligned_fp32_len, matmul_q6_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1147. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_m, "matmul_q6_k_f32_aligned_m", matmul_q6_k_f32_aligned_fp32_len, matmul_q6_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1148. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_s, "matmul_q6_k_f32_aligned_s", matmul_q6_k_f32_aligned_fp32_len, matmul_q6_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1149. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f32->l, "matmul_id_f32_l", matmul_id_f32_f32_fp32_len, matmul_id_f32_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1150. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f32->m, "matmul_id_f32_m", matmul_id_f32_f32_fp32_len, matmul_id_f32_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1151. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f32->s, "matmul_id_f32_s", matmul_id_f32_f32_fp32_len, matmul_id_f32_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1152. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f32->a_l, "matmul_id_f32_aligned_l", matmul_id_f32_f32_aligned_fp32_len, matmul_id_f32_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1153. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f32->a_m, "matmul_id_f32_aligned_m", matmul_id_f32_f32_aligned_fp32_len, matmul_id_f32_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1154. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f32->a_s, "matmul_id_f32_aligned_s", matmul_id_f32_f32_aligned_fp32_len, matmul_id_f32_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1155. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16->l, "matmul_id_f16_l", matmul_id_f16_fp32_len, matmul_id_f16_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1156. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16->m, "matmul_id_f16_m", matmul_id_f16_fp32_len, matmul_id_f16_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1157. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16->s, "matmul_id_f16_s", matmul_id_f16_fp32_len, matmul_id_f16_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1158. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16->a_l, "matmul_id_f16_aligned_l", matmul_id_f16_aligned_fp32_len, matmul_id_f16_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1159. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16->a_m, "matmul_id_f16_aligned_m", matmul_id_f16_aligned_fp32_len, matmul_id_f16_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1160. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16->a_s, "matmul_id_f16_aligned_s", matmul_id_f16_aligned_fp32_len, matmul_id_f16_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1161. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16_f32->l, "matmul_id_f16_f32_l", matmul_id_f16_f32_fp32_len, matmul_id_f16_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1162. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16_f32->m, "matmul_id_f16_f32_m", matmul_id_f16_f32_fp32_len, matmul_id_f16_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1163. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16_f32->s, "matmul_id_f16_f32_s", matmul_id_f16_f32_fp32_len, matmul_id_f16_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1164. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16_f32->a_l, "matmul_id_f16_f32_aligned_l", matmul_id_f16_f32_aligned_fp32_len, matmul_id_f16_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1165. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16_f32->a_m, "matmul_id_f16_f32_aligned_m", matmul_id_f16_f32_aligned_fp32_len, matmul_id_f16_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1166. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_id_f16_f32->a_s, "matmul_id_f16_f32_aligned_s", matmul_id_f16_f32_aligned_fp32_len, matmul_id_f16_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1167. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->l, "matmul_id_q4_0_f32_l", matmul_id_q4_0_f32_fp32_len, matmul_id_q4_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1168. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->m, "matmul_id_q4_0_f32_m", matmul_id_q4_0_f32_fp32_len, matmul_id_q4_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1169. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->s, "matmul_id_q4_0_f32_s", matmul_id_q4_0_f32_fp32_len, matmul_id_q4_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1170. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_l, "matmul_id_q4_0_f32_aligned_l", matmul_id_q4_0_f32_aligned_fp32_len, matmul_id_q4_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1171. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_m, "matmul_id_q4_0_f32_aligned_m", matmul_id_q4_0_f32_aligned_fp32_len, matmul_id_q4_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1172. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_s, "matmul_id_q4_0_f32_aligned_s", matmul_id_q4_0_f32_aligned_fp32_len, matmul_id_q4_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1173. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->l, "matmul_id_q4_1_f32_l", matmul_id_q4_1_f32_fp32_len, matmul_id_q4_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1174. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->m, "matmul_id_q4_1_f32_m", matmul_id_q4_1_f32_fp32_len, matmul_id_q4_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1175. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->s, "matmul_id_q4_1_f32_s", matmul_id_q4_1_f32_fp32_len, matmul_id_q4_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1176. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_l, "matmul_id_q4_1_f32_aligned_l", matmul_id_q4_1_f32_aligned_fp32_len, matmul_id_q4_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1177. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_m, "matmul_id_q4_1_f32_aligned_m", matmul_id_q4_1_f32_aligned_fp32_len, matmul_id_q4_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1178. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_s, "matmul_id_q4_1_f32_aligned_s", matmul_id_q4_1_f32_aligned_fp32_len, matmul_id_q4_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1179. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->l, "matmul_id_q5_0_f32_l", matmul_id_q5_0_f32_fp32_len, matmul_id_q5_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1180. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->m, "matmul_id_q5_0_f32_m", matmul_id_q5_0_f32_fp32_len, matmul_id_q5_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1181. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->s, "matmul_id_q5_0_f32_s", matmul_id_q5_0_f32_fp32_len, matmul_id_q5_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1182. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_l, "matmul_id_q5_0_f32_aligned_l", matmul_id_q5_0_f32_aligned_fp32_len, matmul_id_q5_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1183. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_m, "matmul_id_q5_0_f32_aligned_m", matmul_id_q5_0_f32_aligned_fp32_len, matmul_id_q5_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1184. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_s, "matmul_id_q5_0_f32_aligned_s", matmul_id_q5_0_f32_aligned_fp32_len, matmul_id_q5_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1185. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->l, "matmul_id_q5_1_f32_l", matmul_id_q5_1_f32_fp32_len, matmul_id_q5_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1186. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->m, "matmul_id_q5_1_f32_m", matmul_id_q5_1_f32_fp32_len, matmul_id_q5_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1187. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->s, "matmul_id_q5_1_f32_s", matmul_id_q5_1_f32_fp32_len, matmul_id_q5_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1188. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_l, "matmul_id_q5_1_f32_aligned_l", matmul_id_q5_1_f32_aligned_fp32_len, matmul_id_q5_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1189. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_m, "matmul_id_q5_1_f32_aligned_m", matmul_id_q5_1_f32_aligned_fp32_len, matmul_id_q5_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1190. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_s, "matmul_id_q5_1_f32_aligned_s", matmul_id_q5_1_f32_aligned_fp32_len, matmul_id_q5_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1191. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->l, "matmul_id_q8_0_f32_l", matmul_id_q8_0_f32_fp32_len, matmul_id_q8_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1192. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->m, "matmul_id_q8_0_f32_m", matmul_id_q8_0_f32_fp32_len, matmul_id_q8_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1193. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->s, "matmul_id_q8_0_f32_s", matmul_id_q8_0_f32_fp32_len, matmul_id_q8_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1194. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_l, "matmul_id_q8_0_f32_aligned_l", matmul_id_q8_0_f32_aligned_fp32_len, matmul_id_q8_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1195. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_m, "matmul_id_q8_0_f32_aligned_m", matmul_id_q8_0_f32_aligned_fp32_len, matmul_id_q8_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1196. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_s, "matmul_id_q8_0_f32_aligned_s", matmul_id_q8_0_f32_aligned_fp32_len, matmul_id_q8_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1197. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->l, "matmul_id_q2_k_f32_l", matmul_id_q2_k_f32_fp32_len, matmul_id_q2_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1198. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->m, "matmul_id_q2_k_f32_m", matmul_id_q2_k_f32_fp32_len, matmul_id_q2_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1199. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->s, "matmul_id_q2_k_f32_s", matmul_id_q2_k_f32_fp32_len, matmul_id_q2_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1200. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_l, "matmul_id_q2_k_f32_aligned_l", matmul_id_q2_k_f32_aligned_fp32_len, matmul_id_q2_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1201. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_m, "matmul_id_q2_k_f32_aligned_m", matmul_id_q2_k_f32_aligned_fp32_len, matmul_id_q2_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1202. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_s, "matmul_id_q2_k_f32_aligned_s", matmul_id_q2_k_f32_aligned_fp32_len, matmul_id_q2_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1203. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->l, "matmul_id_q3_k_f32_l", matmul_id_q3_k_f32_fp32_len, matmul_id_q3_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1204. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->m, "matmul_id_q3_k_f32_m", matmul_id_q3_k_f32_fp32_len, matmul_id_q3_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1205. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->s, "matmul_id_q3_k_f32_s", matmul_id_q3_k_f32_fp32_len, matmul_id_q3_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1206. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_l, "matmul_id_q3_k_f32_aligned_l", matmul_id_q3_k_f32_aligned_fp32_len, matmul_id_q3_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1207. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_m, "matmul_id_q3_k_f32_aligned_m", matmul_id_q3_k_f32_aligned_fp32_len, matmul_id_q3_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1208. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_s, "matmul_id_q3_k_f32_aligned_s", matmul_id_q3_k_f32_aligned_fp32_len, matmul_id_q3_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1209. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->l, "matmul_id_q4_k_f32_l", matmul_id_q4_k_f32_fp32_len, matmul_id_q4_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1210. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->m, "matmul_id_q4_k_f32_m", matmul_id_q4_k_f32_fp32_len, matmul_id_q4_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1211. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->s, "matmul_id_q4_k_f32_s", matmul_id_q4_k_f32_fp32_len, matmul_id_q4_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1212. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_l, "matmul_id_q4_k_f32_aligned_l", matmul_id_q4_k_f32_aligned_fp32_len, matmul_id_q4_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1213. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_m, "matmul_id_q4_k_f32_aligned_m", matmul_id_q4_k_f32_aligned_fp32_len, matmul_id_q4_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1214. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_s, "matmul_id_q4_k_f32_aligned_s", matmul_id_q4_k_f32_aligned_fp32_len, matmul_id_q4_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1215. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->l, "matmul_id_q5_k_f32_l", matmul_id_q5_k_f32_fp32_len, matmul_id_q5_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1216. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->m, "matmul_id_q5_k_f32_m", matmul_id_q5_k_f32_fp32_len, matmul_id_q5_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1217. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->s, "matmul_id_q5_k_f32_s", matmul_id_q5_k_f32_fp32_len, matmul_id_q5_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1218. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_l, "matmul_id_q5_k_f32_aligned_l", matmul_id_q5_k_f32_aligned_fp32_len, matmul_id_q5_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1219. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_m, "matmul_id_q5_k_f32_aligned_m", matmul_id_q5_k_f32_aligned_fp32_len, matmul_id_q5_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1220. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_s, "matmul_id_q5_k_f32_aligned_s", matmul_id_q5_k_f32_aligned_fp32_len, matmul_id_q5_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1221. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->l, "matmul_id_q6_k_f32_l", matmul_id_q6_k_f32_fp32_len, matmul_id_q6_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1222. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->m, "matmul_id_q6_k_f32_m", matmul_id_q6_k_f32_fp32_len, matmul_id_q6_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1223. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->s, "matmul_id_q6_k_f32_s", matmul_id_q6_k_f32_fp32_len, matmul_id_q6_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1224. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_l, "matmul_id_q6_k_f32_aligned_l", matmul_id_q6_k_f32_aligned_fp32_len, matmul_id_q6_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1225. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_m, "matmul_id_q6_k_f32_aligned_m", matmul_id_q6_k_f32_aligned_fp32_len, matmul_id_q6_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1226. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_s, "matmul_id_q6_k_f32_aligned_s", matmul_id_q6_k_f32_aligned_fp32_len, matmul_id_q6_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1227. }
  1228. // mul mat vec
  1229. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F32 ], "mul_mat_vec_f32_f32_f32", mul_mat_vec_f32_f32_f32_len, mul_mat_vec_f32_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1230. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f32_f32", mul_mat_vec_f16_f32_f32_len, mul_mat_vec_f16_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1231. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f32_f32", mul_mat_vec_q4_0_f32_f32_len, mul_mat_vec_q4_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1232. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f32_f32", mul_mat_vec_q4_1_f32_f32_len, mul_mat_vec_q4_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1233. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f32_f32", mul_mat_vec_q5_0_f32_f32_len, mul_mat_vec_q5_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1234. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f32_f32", mul_mat_vec_q5_1_f32_f32_len, mul_mat_vec_q5_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1235. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f32_f32", mul_mat_vec_q8_0_f32_f32_len, mul_mat_vec_q8_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1236. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f32_f32", mul_mat_vec_q2_k_f32_f32_len, mul_mat_vec_q2_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1237. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f32_f32", mul_mat_vec_q3_k_f32_f32_len, mul_mat_vec_q3_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1238. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f32_f32", mul_mat_vec_q4_k_f32_f32_len, mul_mat_vec_q4_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1239. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f32_f32", mul_mat_vec_q5_k_f32_f32_len, mul_mat_vec_q5_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1240. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f32_f32", mul_mat_vec_q6_k_f32_f32_len, mul_mat_vec_q6_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1241. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F32 ], "mul_mat_vec_f32_f16_f32", mul_mat_vec_f32_f16_f32_len, mul_mat_vec_f32_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1242. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f16_f32", mul_mat_vec_f16_f16_f32_len, mul_mat_vec_f16_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1243. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f16_f32", mul_mat_vec_q4_0_f16_f32_len, mul_mat_vec_q4_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1244. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f16_f32", mul_mat_vec_q4_1_f16_f32_len, mul_mat_vec_q4_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1245. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f16_f32", mul_mat_vec_q5_0_f16_f32_len, mul_mat_vec_q5_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1246. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f16_f32", mul_mat_vec_q5_1_f16_f32_len, mul_mat_vec_q5_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1247. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f16_f32", mul_mat_vec_q8_0_f16_f32_len, mul_mat_vec_q8_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1248. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f16_f32", mul_mat_vec_q2_k_f16_f32_len, mul_mat_vec_q2_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1249. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f16_f32", mul_mat_vec_q3_k_f16_f32_len, mul_mat_vec_q3_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1250. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f16_f32", mul_mat_vec_q4_k_f16_f32_len, mul_mat_vec_q4_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1251. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f16_f32", mul_mat_vec_q5_k_f16_f32_len, mul_mat_vec_q5_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1252. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f16_f32", mul_mat_vec_q6_k_f16_f32_len, mul_mat_vec_q6_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1253. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F32 ], "mul_mat_vec_id_f32_f32", mul_mat_vec_id_f32_f32_len, mul_mat_vec_id_f32_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1254. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F16 ], "mul_mat_vec_id_f16_f32", mul_mat_vec_id_f16_f32_len, mul_mat_vec_id_f16_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1255. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_0], "mul_mat_vec_id_q4_0_f32", mul_mat_vec_id_q4_0_f32_len, mul_mat_vec_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1256. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_1], "mul_mat_vec_id_q4_1_f32", mul_mat_vec_id_q4_1_f32_len, mul_mat_vec_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1257. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_0], "mul_mat_vec_id_q5_0_f32", mul_mat_vec_id_q5_0_f32_len, mul_mat_vec_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1258. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_1], "mul_mat_vec_id_q5_1_f32", mul_mat_vec_id_q5_1_f32_len, mul_mat_vec_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1259. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q8_0], "mul_mat_vec_id_q8_0_f32", mul_mat_vec_id_q8_0_f32_len, mul_mat_vec_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1260. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q2_K], "mul_mat_vec_id_q2_k_f32", mul_mat_vec_id_q2_k_f32_len, mul_mat_vec_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1261. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q3_K], "mul_mat_vec_id_q3_k_f32", mul_mat_vec_id_q3_k_f32_len, mul_mat_vec_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1262. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_K], "mul_mat_vec_id_q4_k_f32", mul_mat_vec_id_q4_k_f32_len, mul_mat_vec_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1263. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_K], "mul_mat_vec_id_q5_k_f32", mul_mat_vec_id_q5_k_f32_len, mul_mat_vec_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1264. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q6_K], "mul_mat_vec_id_q6_k_f32", mul_mat_vec_id_q6_k_f32_len, mul_mat_vec_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1265. // dequant shaders
  1266. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant[GGML_TYPE_F32 ], "f32_to_f16", dequant_f32_len, dequant_f32_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1267. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant[GGML_TYPE_Q4_0], "dequant_q4_0", dequant_q4_0_len, dequant_q4_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1268. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant[GGML_TYPE_Q4_1], "dequant_q4_1", dequant_q4_1_len, dequant_q4_1_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1269. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant[GGML_TYPE_Q5_0], "dequant_q5_0", dequant_q5_0_len, dequant_q5_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1270. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant[GGML_TYPE_Q5_1], "dequant_q5_1", dequant_q5_1_len, dequant_q5_1_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1271. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant[GGML_TYPE_Q8_0], "dequant_q8_0", dequant_q8_0_len, dequant_q8_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1272. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant[GGML_TYPE_Q2_K], "dequant_q2_k", dequant_q2_k_len, dequant_q2_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  1273. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant[GGML_TYPE_Q3_K], "dequant_q3_k", dequant_q3_k_len, dequant_q3_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  1274. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant[GGML_TYPE_Q4_K], "dequant_q4_k", dequant_q4_k_len, dequant_q4_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  1275. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant[GGML_TYPE_Q5_K], "dequant_q5_k", dequant_q5_k_len, dequant_q5_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  1276. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_dequant[GGML_TYPE_Q6_K], "dequant_q6_k", dequant_q6_k_len, dequant_q6_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  1277. // get_rows
  1278. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_get_rows[GGML_TYPE_F32 ], "get_rows_f32", get_rows_f32_len, get_rows_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  1279. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_get_rows[GGML_TYPE_F16 ], "get_rows_f16", get_rows_f16_len, get_rows_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  1280. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_get_rows[GGML_TYPE_Q4_0], "get_rows_q4_0", get_rows_q4_0_len, get_rows_q4_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1281. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_get_rows[GGML_TYPE_Q4_1], "get_rows_q4_1", get_rows_q4_1_len, get_rows_q4_1_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1282. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_get_rows[GGML_TYPE_Q5_0], "get_rows_q5_0", get_rows_q5_0_len, get_rows_q5_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1283. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_get_rows[GGML_TYPE_Q5_1], "get_rows_q5_1", get_rows_q5_1_len, get_rows_q5_1_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1284. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_get_rows[GGML_TYPE_Q8_0], "get_rows_q8_0", get_rows_q8_0_len, get_rows_q8_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1285. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_get_rows_f32[GGML_TYPE_F32 ], "get_rows_f32_f32", get_rows_f32_f32_len, get_rows_f32_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  1286. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_get_rows_f32[GGML_TYPE_F16 ], "get_rows_f16_f32", get_rows_f16_f32_len, get_rows_f16_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  1287. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_get_rows_f32[GGML_TYPE_Q4_0], "get_rows_q4_0_f32", get_rows_q4_0_f32_len, get_rows_q4_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1288. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_get_rows_f32[GGML_TYPE_Q4_1], "get_rows_q4_1_f32", get_rows_q4_1_f32_len, get_rows_q4_1_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1289. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_get_rows_f32[GGML_TYPE_Q5_0], "get_rows_q5_0_f32", get_rows_q5_0_f32_len, get_rows_q5_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1290. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_get_rows_f32[GGML_TYPE_Q5_1], "get_rows_q5_1_f32", get_rows_q5_1_f32_len, get_rows_q5_1_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1291. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_get_rows_f32[GGML_TYPE_Q8_0], "get_rows_q8_0_f32", get_rows_q8_0_f32_len, get_rows_q8_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1292. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_matmul_split_k_reduce, "split_k_reduce", split_k_reduce_len, split_k_reduce_data, "main", 2, 2 * sizeof(uint32_t), {256, 1, 1}, {}, 1);
  1293. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32, "mul_mat_vec_p021_f16_f32", mul_mat_vec_p021_f16_f32_len, mul_mat_vec_p021_f16_f32_data, "main", 3, 6 * sizeof(uint32_t), {1, 1, 1}, {}, 1);
  1294. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32, "mul_mat_vec_nc_f16_f32", mul_mat_vec_nc_f16_f32_len, mul_mat_vec_nc_f16_f32_data, "main", 3, 7 * sizeof(uint32_t), {1, 1, 1}, {}, 1);
  1295. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_norm_f32, "norm_f32", norm_f32_len, norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  1296. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_rms_norm_f32, "rms_norm_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  1297. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_cpy_f32_f32, "cpy_f32_f32", cpy_f32_f32_len, cpy_f32_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1298. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_cpy_f32_f16, "cpy_f32_f16", cpy_f32_f16_len, cpy_f32_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1299. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_cpy_f16_f16, "cpy_f16_f16", cpy_f16_f16_len, cpy_f16_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1300. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_add_f32, "add_f32", add_f32_len, add_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1301. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_mul_f32, "mul_f32", mul_f32_len, mul_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1302. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_div_f32, "div_f32", div_f32_len, div_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1303. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_scale_f32, "scale_f32", scale_f32_len, scale_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1304. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_sqr_f32, "sqr_f32", sqr_f32_len, sqr_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1305. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_clamp_f32, "clamp_f32", clamp_f32_len, clamp_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1306. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_gelu_f32, "gelu_f32", gelu_f32_len, gelu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1307. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_silu_f32, "silu_f32", silu_f32_len, silu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1308. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_relu_f32, "relu_f32", relu_f32_len, relu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1309. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_diag_mask_inf_f32, "diag_mask_inf_f32", diag_mask_inf_f32_len, diag_mask_inf_f32_data, "main", 2, sizeof(vk_op_diag_mask_push_constants), {512, 1, 1}, {}, 1);
  1310. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_soft_max_f32, "soft_max_f32", soft_max_f32_len, soft_max_f32_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, {}, 1);
  1311. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_soft_max_f32_f16, "soft_max_f32_f16", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, {}, 1);
  1312. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_rope_norm_f32, "rope_norm_f32", rope_norm_f32_len, rope_norm_f32_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  1313. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_rope_norm_f16, "rope_norm_f16", rope_norm_f16_len, rope_norm_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  1314. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_rope_neox_f32, "rope_neox_f32", rope_neox_f32_len, rope_neox_f32_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  1315. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_rope_neox_f16, "rope_neox_f16", rope_neox_f16_len, rope_neox_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  1316. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_argsort_f32, "argsort_f32", argsort_f32_len, argsort_f32_data, "main", 2, sizeof(vk_op_argsort_push_constants), {1024, 1, 1}, {}, 1);
  1317. ggml_vk_create_pipeline(ctx, ctx->device->pipeline_sum_rows_f32, "sum_rows_f32", sum_rows_f32_len, sum_rows_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1318. }
  1319. static void ggml_vk_print_gpu_info(size_t idx) {
  1320. GGML_ASSERT(idx < vk_instance.device_indices.size());
  1321. size_t dev_num = vk_instance.device_indices[idx];
  1322. VK_LOG_DEBUG("ggml_vk_print_gpu_info(" << dev_num << ")");
  1323. GGML_ASSERT(vk_instance.initialized);
  1324. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  1325. if (dev_num >= devices.size()) {
  1326. std::cerr << "ggml_vulkan: Device with index " << dev_num << " does not exist." << std::endl;
  1327. throw std::runtime_error("Device not found");
  1328. }
  1329. vk::PhysicalDevice physical_device = devices[dev_num];
  1330. std::vector<vk::ExtensionProperties> ext_props = physical_device.enumerateDeviceExtensionProperties();
  1331. vk::PhysicalDeviceProperties2 props2;
  1332. vk::PhysicalDeviceMaintenance3Properties props3;
  1333. vk::PhysicalDeviceSubgroupProperties subgroup_props;
  1334. vk::PhysicalDeviceDriverProperties driver_props;
  1335. props2.pNext = &props3;
  1336. props3.pNext = &subgroup_props;
  1337. subgroup_props.pNext = &driver_props;
  1338. physical_device.getProperties2(&props2);
  1339. const size_t subgroup_size = subgroup_props.subgroupSize;
  1340. const bool uma = props2.properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
  1341. bool fp16_storage = false;
  1342. bool fp16_compute = false;
  1343. for (auto properties : ext_props) {
  1344. if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) {
  1345. fp16_storage = true;
  1346. } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
  1347. fp16_compute = true;
  1348. }
  1349. }
  1350. const char* GGML_VK_DISABLE_F16 = getenv("GGML_VK_DISABLE_F16");
  1351. bool force_disable_f16 = GGML_VK_DISABLE_F16 != nullptr;
  1352. bool fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
  1353. vk::PhysicalDeviceFeatures device_features = physical_device.getFeatures();
  1354. VkPhysicalDeviceFeatures2 device_features2;
  1355. device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
  1356. device_features2.pNext = nullptr;
  1357. device_features2.features = (VkPhysicalDeviceFeatures)device_features;
  1358. VkPhysicalDeviceVulkan11Features vk11_features;
  1359. vk11_features.pNext = nullptr;
  1360. vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
  1361. device_features2.pNext = &vk11_features;
  1362. VkPhysicalDeviceVulkan12Features vk12_features;
  1363. vk12_features.pNext = nullptr;
  1364. vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
  1365. vk11_features.pNext = &vk12_features;
  1366. vkGetPhysicalDeviceFeatures2(physical_device, &device_features2);
  1367. fp16 = fp16 && vk12_features.shaderFloat16;
  1368. std::string device_name = props2.properties.deviceName.data();
  1369. std::cerr << GGML_VK_NAME << idx << ": " << device_name << " (" << driver_props.driverName << ") | uma: " << uma << " | fp16: " << fp16 << " | warp size: " << subgroup_size << std::endl;
  1370. if (props2.properties.deviceType == vk::PhysicalDeviceType::eCpu) {
  1371. std::cerr << "ggml_vulkan: Warning: Device type is CPU. This is probably not the device you want." << std::endl;
  1372. }
  1373. }
  1374. static bool ggml_vk_instance_validation_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions);
  1375. static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions);
  1376. void ggml_vk_instance_init() {
  1377. if (vk_instance_initialized) {
  1378. return;
  1379. }
  1380. VK_LOG_DEBUG("ggml_vk_instance_init()");
  1381. vk::ApplicationInfo app_info{ "ggml-vulkan", 1, nullptr, 0, VK_API_VERSION };
  1382. const std::vector<vk::ExtensionProperties> instance_extensions = vk::enumerateInstanceExtensionProperties();
  1383. const bool validation_ext = ggml_vk_instance_validation_ext_available(instance_extensions);
  1384. #ifdef __APPLE__
  1385. const bool portability_enumeration_ext = ggml_vk_instance_portability_enumeration_ext_available(instance_extensions);
  1386. #endif
  1387. std::vector<const char*> layers;
  1388. if (validation_ext) {
  1389. layers.push_back("VK_LAYER_KHRONOS_validation");
  1390. }
  1391. std::vector<const char*> extensions;
  1392. if (validation_ext) {
  1393. extensions.push_back("VK_EXT_validation_features");
  1394. }
  1395. #ifdef __APPLE__
  1396. if (portability_enumeration_ext) {
  1397. extensions.push_back("VK_KHR_portability_enumeration");
  1398. }
  1399. #endif
  1400. vk::InstanceCreateInfo instance_create_info(vk::InstanceCreateFlags{}, &app_info, layers, extensions);
  1401. #ifdef __APPLE__
  1402. if (portability_enumeration_ext) {
  1403. instance_create_info.flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
  1404. }
  1405. #endif
  1406. std::vector<vk::ValidationFeatureEnableEXT> features_enable;
  1407. vk::ValidationFeaturesEXT validation_features;
  1408. if (validation_ext) {
  1409. features_enable = { vk::ValidationFeatureEnableEXT::eBestPractices };
  1410. validation_features = {
  1411. features_enable,
  1412. {},
  1413. };
  1414. validation_features.setPNext(nullptr);
  1415. instance_create_info.setPNext(&validation_features);
  1416. std::cerr << "ggml_vulkan: Validation layers enabled" << std::endl;
  1417. }
  1418. vk_instance.instance = vk::createInstance(instance_create_info);
  1419. memset(vk_instance.initialized, 0, sizeof(bool) * GGML_VK_MAX_DEVICES);
  1420. size_t num_available_devices = vk_instance.instance.enumeratePhysicalDevices().size();
  1421. // Emulate behavior of CUDA_VISIBLE_DEVICES for Vulkan
  1422. char * devices_env = getenv("GGML_VK_VISIBLE_DEVICES");
  1423. if (devices_env != nullptr) {
  1424. std::string devices(devices_env);
  1425. std::replace(devices.begin(), devices.end(), ',', ' ');
  1426. std::stringstream ss(devices);
  1427. size_t tmp;
  1428. while (ss >> tmp) {
  1429. if(tmp >= num_available_devices) {
  1430. std::cerr << "ggml_vulkan: Invalid device index " << tmp << " in GGML_VK_VISIBLE_DEVICES." << std::endl;
  1431. throw std::runtime_error("Invalid Vulkan device index");
  1432. }
  1433. vk_instance.device_indices.push_back(tmp);
  1434. }
  1435. } else {
  1436. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  1437. // Make sure at least one device exists
  1438. if (devices.empty()) {
  1439. std::cerr << "ggml_vulkan: Error: No devices found." << std::endl;
  1440. GGML_ASSERT(false);
  1441. }
  1442. // Default to using all dedicated GPUs
  1443. for (size_t i = 0; i < devices.size(); i++) {
  1444. vk::PhysicalDeviceProperties2 new_props;
  1445. vk::PhysicalDeviceDriverProperties new_driver;
  1446. vk::PhysicalDeviceIDProperties new_id;
  1447. new_props.pNext = &new_driver;
  1448. new_driver.pNext = &new_id;
  1449. devices[i].getProperties2(&new_props);
  1450. if (new_props.properties.deviceType == vk::PhysicalDeviceType::eDiscreteGpu) {
  1451. // Check if there are two physical devices corresponding to the same GPU
  1452. auto old_device = std::find_if(
  1453. vk_instance.device_indices.begin(),
  1454. vk_instance.device_indices.end(),
  1455. [&devices, &new_id](const size_t k){
  1456. vk::PhysicalDeviceProperties2 old_props;
  1457. vk::PhysicalDeviceIDProperties old_id;
  1458. old_props.pNext = &old_id;
  1459. devices[k].getProperties2(&old_props);
  1460. return std::equal(std::begin(old_id.deviceUUID), std::end(old_id.deviceUUID), std::begin(new_id.deviceUUID));
  1461. }
  1462. );
  1463. if (old_device == vk_instance.device_indices.end()) {
  1464. vk_instance.device_indices.push_back(i);
  1465. } else {
  1466. // There can be two physical devices corresponding to the same GPU if there are 2 different drivers
  1467. // This can cause error when splitting layers aross the devices, need to keep only 1
  1468. VK_LOG_DEBUG("Device " << i << " and device " << *old_device << " have the same deviceUUID");
  1469. vk::PhysicalDeviceProperties2 old_props;
  1470. vk::PhysicalDeviceDriverProperties old_driver;
  1471. old_props.pNext = &old_driver;
  1472. devices[*old_device].getProperties2(&old_props);
  1473. std::map<vk::DriverId, int> driver_priorities {};
  1474. int old_priority = std::numeric_limits<int>::max();
  1475. int new_priority = std::numeric_limits<int>::max();
  1476. // Check https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkDriverId.html for the list of driver id
  1477. // Smaller number -> higher priority
  1478. switch (old_props.properties.vendorID) {
  1479. case VK_VENDOR_ID_AMD:
  1480. driver_priorities[vk::DriverId::eMesaRadv] = 1;
  1481. driver_priorities[vk::DriverId::eAmdOpenSource] = 2;
  1482. driver_priorities[vk::DriverId::eAmdProprietary] = 3;
  1483. break;
  1484. case VK_VENDOR_ID_INTEL:
  1485. driver_priorities[vk::DriverId::eIntelOpenSourceMESA] = 1;
  1486. driver_priorities[vk::DriverId::eIntelProprietaryWindows] = 2;
  1487. break;
  1488. case VK_VENDOR_ID_NVIDIA:
  1489. driver_priorities[vk::DriverId::eNvidiaProprietary] = 1;
  1490. #if defined(VK_API_VERSION_1_3) && VK_HEADER_VERSION >= 235
  1491. driver_priorities[vk::DriverId::eMesaNvk] = 2;
  1492. #endif
  1493. break;
  1494. }
  1495. if (driver_priorities.count(old_driver.driverID)) {
  1496. old_priority = driver_priorities[old_driver.driverID];
  1497. }
  1498. if (driver_priorities.count(new_driver.driverID)) {
  1499. new_priority = driver_priorities[new_driver.driverID];
  1500. }
  1501. if (new_priority < old_priority) {
  1502. auto r = std::remove(vk_instance.device_indices.begin(), vk_instance.device_indices.end(), *old_device);
  1503. vk_instance.device_indices.erase(r, vk_instance.device_indices.end());
  1504. vk_instance.device_indices.push_back(i);
  1505. VK_LOG_DEBUG("Prioritize device " << i << " driver " << new_driver.driverName << " over device " << *old_device << " driver " << old_driver.driverName);
  1506. }
  1507. else {
  1508. VK_LOG_DEBUG("Prioritize device " << *old_device << " driver " << old_driver.driverName << " over device " << i << " driver " << new_driver.driverName << std::endl);
  1509. }
  1510. }
  1511. }
  1512. }
  1513. // If no dedicated GPUs found, fall back to GPU 0
  1514. if (vk_instance.device_indices.empty()) {
  1515. vk_instance.device_indices.push_back(0);
  1516. }
  1517. }
  1518. std::cerr << "ggml_vulkan: Found " << vk_instance.device_indices.size() << " Vulkan devices:" << std::endl;
  1519. for (size_t i = 0; i < vk_instance.device_indices.size(); i++) {
  1520. ggml_vk_print_gpu_info(i);
  1521. }
  1522. vk_instance_initialized = true;
  1523. }
  1524. static void ggml_vk_init(ggml_backend_vk_context * ctx, size_t idx) {
  1525. GGML_ASSERT(idx < vk_instance.device_indices.size());
  1526. size_t dev_num = vk_instance.device_indices[idx];
  1527. VK_LOG_DEBUG("ggml_vk_init(" << ctx->name << ", " << dev_num << ")");
  1528. ggml_vk_instance_init();
  1529. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  1530. if (dev_num >= devices.size()) {
  1531. std::cerr << "ggml_vulkan: Device with index " << dev_num << " does not exist." << std::endl;
  1532. throw std::runtime_error("Device not found");
  1533. }
  1534. ctx->device = ggml_vk_get_device(idx);
  1535. if (!ctx->device->initialized) {
  1536. ctx->device->physical_device = devices[dev_num];
  1537. const std::vector<vk::ExtensionProperties> ext_props = ctx->device->physical_device.enumerateDeviceExtensionProperties();
  1538. bool maintenance4_support = false;
  1539. // Check if maintenance4 is supported
  1540. for (const auto& properties : ext_props) {
  1541. if (strcmp("VK_KHR_maintenance4", properties.extensionName) == 0) {
  1542. maintenance4_support = true;
  1543. }
  1544. }
  1545. vk::PhysicalDeviceProperties2 props2;
  1546. vk::PhysicalDeviceMaintenance3Properties props3;
  1547. vk::PhysicalDeviceMaintenance4Properties props4;
  1548. vk::PhysicalDeviceSubgroupProperties subgroup_props;
  1549. props2.pNext = &props3;
  1550. props3.pNext = &subgroup_props;
  1551. if (maintenance4_support) {
  1552. subgroup_props.pNext = &props4;
  1553. }
  1554. ctx->device->physical_device.getProperties2(&props2);
  1555. ctx->device->properties = props2.properties;
  1556. const char* GGML_VK_FORCE_MAX_ALLOCATION_SIZE = getenv("GGML_VK_FORCE_MAX_ALLOCATION_SIZE");
  1557. if (GGML_VK_FORCE_MAX_ALLOCATION_SIZE != nullptr) {
  1558. ctx->device->max_memory_allocation_size = std::stoi(GGML_VK_FORCE_MAX_ALLOCATION_SIZE);
  1559. } else if (maintenance4_support) {
  1560. ctx->device->max_memory_allocation_size = std::min(props3.maxMemoryAllocationSize, props4.maxBufferSize);
  1561. } else {
  1562. ctx->device->max_memory_allocation_size = props3.maxMemoryAllocationSize;
  1563. }
  1564. ctx->device->vendor_id = ctx->device->properties.vendorID;
  1565. ctx->device->subgroup_size = subgroup_props.subgroupSize;
  1566. ctx->device->uma = ctx->device->properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
  1567. bool fp16_storage = false;
  1568. bool fp16_compute = false;
  1569. for (const auto& properties : ext_props) {
  1570. if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) {
  1571. fp16_storage = true;
  1572. } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
  1573. fp16_compute = true;
  1574. }
  1575. }
  1576. const char* GGML_VK_DISABLE_F16 = getenv("GGML_VK_DISABLE_F16");
  1577. const bool force_disable_f16 = GGML_VK_DISABLE_F16 != nullptr;
  1578. ctx->device->fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
  1579. std::vector<vk::QueueFamilyProperties> queue_family_props = ctx->device->physical_device.getQueueFamilyProperties();
  1580. // Try to find a non-graphics compute queue and transfer-focused queues
  1581. const uint32_t compute_queue_family_index = ggml_vk_find_queue_family_index(queue_family_props, vk::QueueFlagBits::eCompute, vk::QueueFlagBits::eGraphics, -1, 1);
  1582. const uint32_t transfer_queue_family_index = ggml_vk_find_queue_family_index(queue_family_props, vk::QueueFlagBits::eTransfer, vk::QueueFlagBits::eCompute | vk::QueueFlagBits::eGraphics, compute_queue_family_index, 1);
  1583. const float priorities[] = { 1.0f, 1.0f };
  1584. ctx->device->single_queue = compute_queue_family_index == transfer_queue_family_index && queue_family_props[compute_queue_family_index].queueCount == 1;
  1585. std::vector<vk::DeviceQueueCreateInfo> device_queue_create_infos;
  1586. if (compute_queue_family_index != transfer_queue_family_index) {
  1587. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, priorities});
  1588. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), transfer_queue_family_index, 1, priorities + 1});
  1589. } else if(!ctx->device->single_queue) {
  1590. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 2, priorities});
  1591. } else {
  1592. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, priorities});
  1593. }
  1594. vk::DeviceCreateInfo device_create_info;
  1595. std::vector<const char *> device_extensions;
  1596. vk::PhysicalDeviceFeatures device_features = ctx->device->physical_device.getFeatures();
  1597. VkPhysicalDeviceFeatures2 device_features2;
  1598. device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
  1599. device_features2.pNext = nullptr;
  1600. device_features2.features = (VkPhysicalDeviceFeatures)device_features;
  1601. VkPhysicalDeviceVulkan11Features vk11_features;
  1602. vk11_features.pNext = nullptr;
  1603. vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
  1604. device_features2.pNext = &vk11_features;
  1605. VkPhysicalDeviceVulkan12Features vk12_features;
  1606. vk12_features.pNext = nullptr;
  1607. vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
  1608. vk11_features.pNext = &vk12_features;
  1609. vkGetPhysicalDeviceFeatures2(ctx->device->physical_device, &device_features2);
  1610. ctx->device->fp16 = ctx->device->fp16 && vk12_features.shaderFloat16;
  1611. if (!vk11_features.storageBuffer16BitAccess) {
  1612. std::cerr << "ggml_vulkan: device " << GGML_VK_NAME << idx << " does not support 16-bit storage." << std::endl;
  1613. throw std::runtime_error("Unsupported device");
  1614. }
  1615. device_extensions.push_back("VK_KHR_16bit_storage");
  1616. #ifdef GGML_VULKAN_VALIDATE
  1617. device_extensions.push_back("VK_KHR_shader_non_semantic_info");
  1618. #endif
  1619. if (ctx->device->fp16) {
  1620. device_extensions.push_back("VK_KHR_shader_float16_int8");
  1621. }
  1622. ctx->device->name = ctx->device->properties.deviceName.data();
  1623. device_create_info = {
  1624. vk::DeviceCreateFlags(),
  1625. device_queue_create_infos,
  1626. {},
  1627. device_extensions
  1628. };
  1629. device_create_info.setPNext(&device_features2);
  1630. ctx->device->device = ctx->device->physical_device.createDevice(device_create_info);
  1631. ctx->device->descriptor_set_mode = VK_DEVICE_DESCRIPTOR_POOL_MODE_UNKNOWN;
  1632. // Queues
  1633. ggml_vk_create_queue(ctx, ctx->device->compute_queue, compute_queue_family_index, 0, { vk::PipelineStageFlagBits::eComputeShader | vk::PipelineStageFlagBits::eTransfer });
  1634. // Shaders
  1635. ggml_vk_load_shaders(ctx);
  1636. if (!ctx->device->single_queue) {
  1637. const uint32_t transfer_queue_index = compute_queue_family_index == transfer_queue_family_index ? 1 : 0;
  1638. ggml_vk_create_queue(ctx, ctx->device->transfer_queue, transfer_queue_family_index, transfer_queue_index, { vk::PipelineStageFlagBits::eTransfer });
  1639. } else {
  1640. // TODO: Use pointer or reference to avoid copy
  1641. ctx->device->transfer_queue = ctx->device->compute_queue;
  1642. }
  1643. ctx->device->idx = dev_num;
  1644. ctx->device->initialized = true;
  1645. } else if (ctx->device->idx != dev_num) {
  1646. std::cerr << "ggml_vulkan: Device " << ctx->device->name << " already initialized with index " << ctx->device->idx << ", but trying to reinitialize with index " << dev_num << std::endl;
  1647. throw std::runtime_error("Device already initialized");
  1648. }
  1649. ctx->fence = ctx->device->device.createFence({});
  1650. ctx->compute_ctx = nullptr;
  1651. ctx->transfer_ctx = nullptr;
  1652. ctx->initialized = true;
  1653. ctx->idx = idx;
  1654. #ifdef GGML_VULKAN_CHECK_RESULTS
  1655. const char* skip_checks = getenv("GGML_VULKAN_SKIP_CHECKS");
  1656. vk_skip_checks = (skip_checks == NULL ? 0 : atoi(skip_checks));
  1657. const char* output_tensor = getenv("GGML_VULKAN_OUTPUT_TENSOR");
  1658. vk_output_tensor = (output_tensor == NULL ? 0 : atoi(output_tensor));
  1659. #endif
  1660. }
  1661. static vk_pipeline ggml_vk_get_to_fp16(ggml_backend_vk_context * ctx, ggml_type type) {
  1662. VK_LOG_DEBUG("ggml_vk_get_to_fp16()");
  1663. switch (type) {
  1664. case GGML_TYPE_F32:
  1665. case GGML_TYPE_Q4_0:
  1666. case GGML_TYPE_Q4_1:
  1667. case GGML_TYPE_Q5_0:
  1668. case GGML_TYPE_Q5_1:
  1669. case GGML_TYPE_Q8_0:
  1670. case GGML_TYPE_Q2_K:
  1671. case GGML_TYPE_Q3_K:
  1672. case GGML_TYPE_Q4_K:
  1673. case GGML_TYPE_Q5_K:
  1674. case GGML_TYPE_Q6_K:
  1675. break;
  1676. default:
  1677. return nullptr;
  1678. }
  1679. return ctx->device->pipeline_dequant[type];
  1680. }
  1681. static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type) {
  1682. VK_LOG_DEBUG("ggml_vk_get_mul_mat_mat_pipeline()");
  1683. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  1684. return ctx->device->pipeline_matmul_f32;
  1685. }
  1686. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
  1687. return ctx->device->pipeline_matmul_f32_f16;
  1688. }
  1689. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  1690. return ctx->device->pipeline_matmul_f16_f32;
  1691. }
  1692. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  1693. return ctx->device->pipeline_matmul_f16;
  1694. }
  1695. GGML_ASSERT(src1_type == GGML_TYPE_F32);
  1696. switch (src0_type) {
  1697. case GGML_TYPE_Q4_0:
  1698. case GGML_TYPE_Q4_1:
  1699. case GGML_TYPE_Q5_0:
  1700. case GGML_TYPE_Q5_1:
  1701. case GGML_TYPE_Q8_0:
  1702. case GGML_TYPE_Q2_K:
  1703. case GGML_TYPE_Q3_K:
  1704. case GGML_TYPE_Q4_K:
  1705. case GGML_TYPE_Q5_K:
  1706. case GGML_TYPE_Q6_K:
  1707. break;
  1708. default:
  1709. return nullptr;
  1710. }
  1711. return ctx->device->pipeline_dequant_mul_mat_mat[src0_type];
  1712. }
  1713. static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type) {
  1714. VK_LOG_DEBUG("ggml_vk_get_dequantize_mul_mat_vec()");
  1715. GGML_ASSERT(b_type == GGML_TYPE_F32 || b_type == GGML_TYPE_F16);
  1716. switch (a_type) {
  1717. case GGML_TYPE_F32:
  1718. case GGML_TYPE_F16:
  1719. case GGML_TYPE_Q4_0:
  1720. case GGML_TYPE_Q4_1:
  1721. case GGML_TYPE_Q5_0:
  1722. case GGML_TYPE_Q5_1:
  1723. case GGML_TYPE_Q8_0:
  1724. case GGML_TYPE_Q2_K:
  1725. case GGML_TYPE_Q3_K:
  1726. case GGML_TYPE_Q4_K:
  1727. case GGML_TYPE_Q5_K:
  1728. case GGML_TYPE_Q6_K:
  1729. break;
  1730. default:
  1731. return nullptr;
  1732. }
  1733. return b_type == GGML_TYPE_F32 ? ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[a_type] : ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[a_type];
  1734. }
  1735. static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_id_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type) {
  1736. VK_LOG_DEBUG("ggml_vk_get_mul_mat_mat_id_pipeline()");
  1737. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  1738. return ctx->device->pipeline_matmul_id_f32;
  1739. }
  1740. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  1741. return ctx->device->pipeline_matmul_id_f16_f32;
  1742. }
  1743. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  1744. return ctx->device->pipeline_matmul_id_f16;
  1745. }
  1746. GGML_ASSERT(src1_type == GGML_TYPE_F32);
  1747. switch (src0_type) {
  1748. case GGML_TYPE_Q4_0:
  1749. case GGML_TYPE_Q4_1:
  1750. case GGML_TYPE_Q5_0:
  1751. case GGML_TYPE_Q5_1:
  1752. case GGML_TYPE_Q8_0:
  1753. case GGML_TYPE_Q2_K:
  1754. case GGML_TYPE_Q3_K:
  1755. case GGML_TYPE_Q4_K:
  1756. case GGML_TYPE_Q5_K:
  1757. case GGML_TYPE_Q6_K:
  1758. break;
  1759. default:
  1760. return nullptr;
  1761. }
  1762. return ctx->device->pipeline_dequant_mul_mat_mat_id[src0_type];
  1763. }
  1764. static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec_id(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type) {
  1765. VK_LOG_DEBUG("ggml_vk_get_dequantize_mul_mat_vec()");
  1766. GGML_ASSERT(b_type == GGML_TYPE_F32);
  1767. switch (a_type) {
  1768. case GGML_TYPE_F32:
  1769. case GGML_TYPE_F16:
  1770. case GGML_TYPE_Q4_0:
  1771. case GGML_TYPE_Q4_1:
  1772. case GGML_TYPE_Q5_0:
  1773. case GGML_TYPE_Q5_1:
  1774. case GGML_TYPE_Q8_0:
  1775. case GGML_TYPE_Q2_K:
  1776. case GGML_TYPE_Q3_K:
  1777. case GGML_TYPE_Q4_K:
  1778. case GGML_TYPE_Q5_K:
  1779. case GGML_TYPE_Q6_K:
  1780. break;
  1781. default:
  1782. return nullptr;
  1783. }
  1784. return ctx->device->pipeline_dequant_mul_mat_vec_id_f32[a_type];
  1785. }
  1786. static vk_buffer ggml_vk_pool_malloc(ggml_backend_vk_context * ctx, size_t size) {
  1787. VK_LOG_DEBUG("ggml_vk_pool_malloc(" << size << ")");
  1788. VK_LOG_MEMORY("ggml_vk_pool_malloc");
  1789. int best_i = -1;
  1790. size_t best_size = std::numeric_limits<size_t>::max(); //smallest unused buffer that fits our needs
  1791. int worst_i = -1;
  1792. size_t worst_size = 0; //largest unused buffer seen so far
  1793. for (int i = 0; i < MAX_VK_BUFFERS; ++i) {
  1794. vk_buffer &b = ctx->buffer_pool[i];
  1795. if (b != nullptr && b->size >= size && b->size < best_size) {
  1796. best_i = i;
  1797. best_size = b->size;
  1798. }
  1799. if (b != nullptr && b->size > worst_size) {
  1800. worst_i = i;
  1801. worst_size = b->size;
  1802. }
  1803. }
  1804. if(best_i != -1) {
  1805. //found the smallest buffer that fits our needs
  1806. vk_buffer b = ctx->buffer_pool[best_i];
  1807. ctx->buffer_pool[best_i].reset();
  1808. return b;
  1809. }
  1810. if(worst_i != -1) {
  1811. //no buffer that fits our needs, resize largest one to save memory
  1812. vk_buffer& b = ctx->buffer_pool[worst_i];
  1813. ggml_vk_destroy_buffer(b);
  1814. }
  1815. return ggml_vk_create_buffer_device(ctx, size);
  1816. }
  1817. static void ggml_vk_pool_free(ggml_backend_vk_context * ctx, vk_buffer& buffer) {
  1818. VK_LOG_DEBUG("ggml_vk_pool_free(" << buffer->size << ")");
  1819. for (int i = 0; i < MAX_VK_BUFFERS; ++i) {
  1820. vk_buffer& b = ctx->buffer_pool[i];
  1821. if (b == nullptr) {
  1822. b = buffer;
  1823. return;
  1824. }
  1825. }
  1826. std::cerr << "ggml_vulkan: WARNING: vk buffer pool full, increase MAX_VK_BUFFERS" << std::endl;
  1827. ggml_vk_destroy_buffer(buffer);
  1828. }
  1829. // Returns an available temporary buffer that may only be used temporarily, it will be reused
  1830. static vk_buffer ggml_vk_create_buffer_temp(ggml_backend_vk_context * ctx, size_t size) {
  1831. // Try to find existing temp buffer with enough capacity
  1832. for (auto& buffer : ctx->gc.temp_buffers) {
  1833. if (buffer->size >= size) {
  1834. return buffer;
  1835. }
  1836. }
  1837. VK_LOG_MEMORY("ggml_vk_create_buffer_temp(" << size << ")");
  1838. // Otherwise create new buffer
  1839. vk_buffer buf = ggml_vk_pool_malloc(ctx, size);
  1840. ctx->gc.temp_buffers.push_back(buf);
  1841. return buf;
  1842. }
  1843. static void * ggml_vk_host_malloc(ggml_backend_vk_context * ctx, size_t size) {
  1844. VK_LOG_MEMORY("ggml_vk_host_malloc(" << size << ")");
  1845. vk_buffer buf = ggml_vk_create_buffer(ctx, size,
  1846. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  1847. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  1848. if(!(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible)) {
  1849. fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory\n",
  1850. size/1024.0/1024.0);
  1851. ctx->device->device.freeMemory(buf->device_memory);
  1852. ctx->device->device.destroyBuffer(buf->buffer);
  1853. return nullptr;
  1854. }
  1855. ctx->pinned_memory.push_back(std::make_tuple(buf->ptr, size, buf));
  1856. return buf->ptr;
  1857. }
  1858. static void ggml_vk_host_free(ggml_backend_vk_context * ctx, void* ptr) {
  1859. if (ptr == nullptr) {
  1860. return;
  1861. }
  1862. VK_LOG_MEMORY("ggml_vk_host_free(" << ptr << ")");
  1863. vk_buffer buf;
  1864. size_t index;
  1865. for (size_t i = 0; i < ctx->pinned_memory.size(); i++) {
  1866. const uint8_t* addr = (const uint8_t*) std::get<0>(ctx->pinned_memory[i]);
  1867. const uint8_t* endr = addr + std::get<1>(ctx->pinned_memory[i]);
  1868. if (ptr >= addr && ptr < endr) {
  1869. buf = std::get<2>(ctx->pinned_memory[i]);
  1870. index = i;
  1871. break;
  1872. }
  1873. }
  1874. if (buf == nullptr) {
  1875. fprintf(stderr, "WARNING: failed to free pinned memory: memory not in map\n");
  1876. return;
  1877. }
  1878. ggml_vk_destroy_buffer(buf);
  1879. ctx->pinned_memory.erase(ctx->pinned_memory.begin() + index);
  1880. }
  1881. static void ggml_vk_host_get(ggml_backend_vk_context * ctx, const void * ptr, vk_buffer& buf, size_t& buf_offset) {
  1882. buf = nullptr;
  1883. buf_offset = 0;
  1884. for (size_t i = 0; i < ctx->pinned_memory.size(); i++) {
  1885. const uint8_t* addr = (const uint8_t*) std::get<0>(ctx->pinned_memory[i]);
  1886. const uint8_t* endr = addr + std::get<1>(ctx->pinned_memory[i]);
  1887. if (ptr >= addr && ptr < endr) {
  1888. buf = std::get<2>(ctx->pinned_memory[i]);
  1889. buf_offset = ((const uint8_t *)ptr) - addr;
  1890. break;
  1891. }
  1892. }
  1893. }
  1894. static vk_submission ggml_vk_begin_submission(ggml_backend_vk_context * ctx, vk_queue& q, bool one_time = true) {
  1895. vk_submission s;
  1896. s.buffer = ggml_vk_create_cmd_buffer(ctx, q);
  1897. if (one_time) {
  1898. s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
  1899. } else {
  1900. s.buffer.begin({ vk::CommandBufferUsageFlags{} });
  1901. }
  1902. return s;
  1903. }
  1904. static void ggml_vk_dispatch_pipeline(ggml_backend_vk_context * ctx, vk_context * subctx, vk_pipeline& pipeline, std::vector<vk_subbuffer>&& buffers, size_t push_constant_size, const void* push_constants, std::array<uint32_t, 3> elements) {
  1905. const uint32_t wg0 = CEIL_DIV(elements[0], pipeline->wg_denoms[0]);
  1906. const uint32_t wg1 = CEIL_DIV(elements[1], pipeline->wg_denoms[1]);
  1907. const uint32_t wg2 = CEIL_DIV(elements[2], pipeline->wg_denoms[2]);
  1908. VK_LOG_DEBUG("ggml_vk_dispatch_pipeline(" << pipeline->name << ", {";
  1909. for (auto& buffer : buffers) {
  1910. std::cerr << "(" << buffer.buffer << ", " << buffer.offset << ", " << buffer.size << "), ";
  1911. }
  1912. std::cerr << "}, (" << wg0 << "," << wg1 << "," << wg2 << "))");
  1913. std::vector<vk::DescriptorBufferInfo> descriptor_buffer_infos;
  1914. std::vector<vk::WriteDescriptorSet> write_descriptor_sets;
  1915. GGML_ASSERT(pipeline->descriptor_set_idx < pipeline->descriptor_sets.size());
  1916. GGML_ASSERT(buffers.size() == pipeline->parameter_count);
  1917. vk::DescriptorSet& descriptor_set = pipeline->descriptor_sets[pipeline->descriptor_set_idx++];
  1918. for (uint32_t i = 0; i < pipeline->parameter_count; i++) {
  1919. descriptor_buffer_infos.push_back({buffers[i].buffer->buffer, buffers[i].offset, buffers[i].size});
  1920. }
  1921. for (uint32_t i = 0; i < pipeline->parameter_count; i++) {
  1922. write_descriptor_sets.push_back({descriptor_set, i, 0, 1, vk::DescriptorType::eStorageBuffer, nullptr, &descriptor_buffer_infos[i]});
  1923. }
  1924. ctx->device->device.updateDescriptorSets(write_descriptor_sets, {});
  1925. subctx->s->buffer.pushConstants(pipeline->layout, vk::ShaderStageFlagBits::eCompute, 0, push_constant_size, push_constants);
  1926. subctx->s->buffer.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->pipeline);
  1927. subctx->s->buffer.bindDescriptorSets(vk::PipelineBindPoint::eCompute,
  1928. pipeline->layout,
  1929. 0,
  1930. { descriptor_set },
  1931. {});
  1932. subctx->s->buffer.dispatch(wg0, wg1, wg2);
  1933. }
  1934. static void ggml_vk_end_submission(vk_submission& s, std::vector<vk_semaphore> wait_semaphores, std::vector<vk_semaphore> signal_semaphores) {
  1935. s.buffer.end();
  1936. s.wait_semaphores = std::move(wait_semaphores);
  1937. s.signal_semaphores = std::move(signal_semaphores);
  1938. }
  1939. static void ggml_vk_ctx_end(vk_context * ctx) {
  1940. VK_LOG_DEBUG("ggml_vk_ctx_end(" << ctx << ", " << ctx->seqs.size() << ")");
  1941. if (ctx->s == nullptr) {
  1942. return;
  1943. }
  1944. ctx->s->buffer.end();
  1945. ctx->s = nullptr;
  1946. }
  1947. static void ggml_vk_ctx_begin(ggml_backend_vk_context * ctx, vk_context * subctx) {
  1948. VK_LOG_DEBUG("ggml_vk_ctx_begin(" << ctx << ")");
  1949. if (subctx->s != nullptr) {
  1950. ggml_vk_ctx_end(subctx);
  1951. }
  1952. subctx->seqs.push_back({ ggml_vk_begin_submission(ctx, *subctx->q) });
  1953. subctx->s = subctx->seqs[subctx->seqs.size() - 1].data();
  1954. }
  1955. static size_t ggml_vk_align_size(size_t width, size_t align) {
  1956. VK_LOG_DEBUG("ggml_vk_align_size(" << width << ", " << align << ")");
  1957. return CEIL_DIV(width, align) * align;
  1958. }
  1959. static void deferred_memcpy(void * dst, const void * src, size_t size, std::vector<vk_staging_memcpy>* memcpys = nullptr) {
  1960. if (memcpys == nullptr) {
  1961. memcpy(dst, src, size);
  1962. } else {
  1963. memcpys->emplace_back(dst, src, size);
  1964. }
  1965. }
  1966. static void ggml_vk_ensure_sync_staging_buffer(ggml_backend_vk_context * ctx, size_t size) {
  1967. if (ctx->sync_staging == nullptr || ctx->sync_staging->size < size) {
  1968. VK_LOG_MEMORY("ggml_vk_ensure_sync_staging_buffer(" << size << ")");
  1969. ggml_vk_destroy_buffer(ctx->sync_staging);
  1970. ctx->sync_staging = ggml_vk_create_buffer_check(ctx, size,
  1971. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  1972. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  1973. }
  1974. }
  1975. static void ggml_vk_buffer_write_nc_async(ggml_backend_vk_context * ctx, vk_context * subctx, vk_buffer& dst, size_t offset, const ggml_tensor * tensor, bool sync_staging = false) {
  1976. VK_LOG_DEBUG("ggml_vk_buffer_write_nc_async(" << tensor << ")");
  1977. GGML_ASSERT(!ggml_is_contiguous(tensor));
  1978. // Buffer is already mapped
  1979. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  1980. std::cerr << "ggml_vulkan: buffer_write_nc_async dst buffer is host_visible. Use synchronous write." << std::endl;
  1981. GGML_ASSERT(false);
  1982. }
  1983. // Check if src is pinned memory
  1984. vk_buffer buf;
  1985. size_t buf_offset;
  1986. ggml_vk_host_get(ctx, tensor->data, buf, buf_offset);
  1987. const uint64_t ne0 = tensor->ne[0];
  1988. const uint64_t ne1 = tensor->ne[1];
  1989. const uint64_t ne2 = tensor->ne[2];
  1990. const uint64_t ne3 = tensor->ne[3];
  1991. const uint64_t nb0 = tensor->nb[0];
  1992. const uint64_t nb1 = tensor->nb[1];
  1993. const uint64_t nb2 = tensor->nb[2];
  1994. const uint64_t nb3 = tensor->nb[3];
  1995. const ggml_type type = tensor->type;
  1996. const uint64_t ts = ggml_type_size(type);
  1997. const uint64_t bs = ggml_blck_size(type);
  1998. const uint64_t dstnb0 = ts;
  1999. const uint64_t dstnb1 = dstnb0*(ne0/bs);
  2000. const uint64_t dstnb2 = dstnb1*ne1;
  2001. const uint64_t dstnb3 = dstnb2*ne2;
  2002. const uint64_t ne = ggml_nelements(tensor);
  2003. if (buf != nullptr) {
  2004. // Memory is pinned, use as staging buffer
  2005. std::vector<vk::BufferCopy> slices;
  2006. for (uint64_t i3 = 0; i3 < ne3; i3++) {
  2007. for (uint64_t i2 = 0; i2 < ne2; i2++) {
  2008. // Find longest contiguous slice
  2009. if (ne1*nb1 == dstnb2) {
  2010. slices.push_back({ buf_offset + i3*nb3 + i2*nb2, offset + i3*dstnb3 + i2*dstnb2, dstnb2 });
  2011. } else {
  2012. for (uint64_t i1 = 0; i1 < ne1; i1++) {
  2013. if (ne0*nb0/bs == dstnb1) {
  2014. slices.push_back({ buf_offset + i3*nb3 + i2*nb2 + i1*nb1, offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1, dstnb1 });
  2015. } else {
  2016. const uint64_t s_off = buf_offset + i3*nb3 + i2*nb2 + i1*nb1;
  2017. const uint64_t d_off = offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1;
  2018. for (uint64_t i0 = 0; i0 < ne0; i0++) {
  2019. slices.push_back({ s_off + i1*nb0, d_off + i0*dstnb0, dstnb0 });
  2020. }
  2021. }
  2022. }
  2023. }
  2024. }
  2025. }
  2026. ggml_vk_sync_buffers(subctx);
  2027. subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices);
  2028. return;
  2029. }
  2030. // Staging buffer required
  2031. vk_buffer staging = ctx->staging;
  2032. size_t staging_offset = ctx->staging_offset;
  2033. const size_t copy_size = ts*ne/bs;
  2034. if (ctx->staging->size < ctx->staging_offset + copy_size) {
  2035. if (sync_staging) {
  2036. // Create temporary larger buffer
  2037. ggml_vk_ensure_sync_staging_buffer(ctx, copy_size);
  2038. staging = ctx->sync_staging;
  2039. staging_offset = 0;
  2040. } else {
  2041. GGML_ASSERT(false);
  2042. }
  2043. }
  2044. VkBufferCopy buf_copy{ staging_offset, offset, copy_size };
  2045. ggml_vk_sync_buffers(subctx);
  2046. vkCmdCopyBuffer(subctx->s->buffer, staging->buffer, dst->buffer, 1, &buf_copy);
  2047. for (uint64_t i3 = 0; i3 < ne3; i3++) {
  2048. for (uint64_t i2 = 0; i2 < ne2; i2++) {
  2049. // Find longest contiguous slice
  2050. if (ne1*nb1 == dstnb2) {
  2051. deferred_memcpy((uint8_t *)staging->ptr + staging_offset + i3*dstnb3 + i2*dstnb2, (const uint8_t *) tensor->data + buf_offset + i3*nb3 + i2*nb2, dstnb2, &subctx->in_memcpys);
  2052. } else {
  2053. for (uint64_t i1 = 0; i1 < ne1; i1++) {
  2054. if (ne0*nb0/bs == dstnb1) {
  2055. deferred_memcpy((uint8_t *)staging->ptr + staging_offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1, (const uint8_t *) tensor->data + buf_offset + i3*nb3 + i2*nb2 + i1*nb1, dstnb1, &subctx->in_memcpys);
  2056. } else {
  2057. const uint64_t s_off = buf_offset + i3*nb3 + i2*nb2 + i1*nb1;
  2058. const uint64_t d_off = staging_offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1;
  2059. for (uint64_t i0 = 0; i0 < ne0; i0++) {
  2060. deferred_memcpy((uint8_t *)staging->ptr + d_off + i0*dstnb0, (const uint8_t *) tensor->data + s_off + i0*nb0, dstnb0, &subctx->in_memcpys);
  2061. }
  2062. }
  2063. }
  2064. }
  2065. }
  2066. }
  2067. }
  2068. static void ggml_vk_buffer_write_2d_async(ggml_backend_vk_context * ctx, vk_context * subctx, vk_buffer& dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height, bool sync_staging = false) {
  2069. VK_LOG_DEBUG("ggml_vk_buffer_write_2d_async(" << width << ", " << height << ")");
  2070. // Make sure ctx owns the buffer
  2071. GGML_ASSERT(dst->ctx == ctx);
  2072. // Buffer is already mapped
  2073. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  2074. std::cerr << "ggml_vulkan: buffer_write_async dst buffer is host_visible. Use synchronous write." << std::endl;
  2075. GGML_ASSERT(false);
  2076. }
  2077. // Check if src is pinned memory
  2078. vk_buffer buf = nullptr;
  2079. size_t buf_offset;
  2080. ggml_vk_host_get(ctx, src, buf, buf_offset);
  2081. if (buf != nullptr) {
  2082. // Memory is pinned, use as staging buffer
  2083. std::vector<vk::BufferCopy> slices(1);
  2084. if (width == spitch) {
  2085. // Only do single write if stride is equal
  2086. slices[0].srcOffset = buf_offset;
  2087. slices[0].dstOffset = offset;
  2088. slices[0].size = width * height;
  2089. } else {
  2090. slices.resize(height);
  2091. for (size_t i = 0; i < height; i++) {
  2092. slices[i].srcOffset = buf_offset + i * spitch;
  2093. slices[i].dstOffset = offset + i * width;
  2094. slices[i].size = width;
  2095. }
  2096. }
  2097. ggml_vk_sync_buffers(subctx);
  2098. subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices);
  2099. return;
  2100. }
  2101. VK_LOG_DEBUG("STAGING");
  2102. // Staging buffer required
  2103. vk_buffer staging = ctx->staging;
  2104. size_t staging_offset = ctx->staging_offset;
  2105. const size_t copy_size = width*height;
  2106. if (ctx->staging == nullptr || ctx->staging->size < ctx->staging_offset + copy_size) {
  2107. if (sync_staging) {
  2108. ggml_vk_ensure_sync_staging_buffer(ctx, copy_size);
  2109. staging = ctx->sync_staging;
  2110. staging_offset = 0;
  2111. } else {
  2112. GGML_ASSERT(false);
  2113. }
  2114. }
  2115. VkBufferCopy buf_copy = {
  2116. staging_offset,
  2117. offset,
  2118. copy_size};
  2119. ggml_vk_sync_buffers(subctx);
  2120. vkCmdCopyBuffer(subctx->s->buffer, staging->buffer, dst->buffer, 1, &buf_copy);
  2121. if (width == spitch) {
  2122. deferred_memcpy((uint8_t *)staging->ptr + staging_offset, src, width * height, &subctx->in_memcpys);
  2123. } else {
  2124. for (size_t i = 0; i < height; i++) {
  2125. deferred_memcpy((uint8_t *)staging->ptr + staging_offset + i * width, (const uint8_t *) src + i * spitch, width, &subctx->in_memcpys);
  2126. }
  2127. }
  2128. }
  2129. static void ggml_vk_buffer_write_async(ggml_backend_vk_context * ctx, vk_context * subctx, vk_buffer& dst, size_t offset, const void * src, size_t size, bool sync_staging = false) {
  2130. VK_LOG_DEBUG("ggml_vk_buffer_write_async(" << size << ")");
  2131. return ggml_vk_buffer_write_2d_async(ctx, subctx, dst, offset, src, size, size, 1, sync_staging);
  2132. }
  2133. static void ggml_vk_buffer_write_2d(ggml_backend_vk_context * ctx, vk_buffer& dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height) {
  2134. VK_LOG_DEBUG("ggml_vk_buffer_write_2d(" << width << ", " << height << ")");
  2135. // Buffer is already mapped
  2136. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  2137. GGML_ASSERT(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostCoherent);
  2138. for (size_t i = 0; i < height; i++) {
  2139. memcpy((uint8_t *)dst->ptr + offset + i * width, (const uint8_t *) src + i * spitch, width);
  2140. }
  2141. } else {
  2142. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
  2143. ggml_vk_ctx_begin(ctx, subctx);
  2144. ggml_vk_buffer_write_2d_async(ctx, subctx, dst, offset, src, spitch, width, height, true);
  2145. ggml_vk_ctx_end(subctx);
  2146. for (auto& cpy : subctx->in_memcpys) {
  2147. memcpy(cpy.dst, cpy.src, cpy.n);
  2148. }
  2149. ggml_vk_submit(subctx, ctx->fence);
  2150. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "vk_buffer_write_2d waitForFences");
  2151. ctx->device->device.resetFences({ ctx->fence });
  2152. }
  2153. }
  2154. static void ggml_vk_buffer_write(ggml_backend_vk_context * ctx, vk_buffer& dst, size_t offset, const void * src, size_t size) {
  2155. VK_LOG_DEBUG("ggml_vk_buffer_write(" << size << ")");
  2156. ggml_vk_buffer_write_2d(ctx, dst, offset, src, 0, size, 1);
  2157. }
  2158. static void ggml_vk_buffer_read_2d_async(ggml_backend_vk_context * ctx, vk_context * subctx, vk_buffer& src, size_t offset, void * dst, size_t spitch, size_t dpitch, size_t width, size_t height, bool sync_staging = false) {
  2159. VK_LOG_DEBUG("ggml_vk_buffer_read_2d_async(offset=" << offset << ", width=" << width << ", height=" << height << ")");
  2160. GGML_ASSERT(width > 0);
  2161. GGML_ASSERT(height > 0);
  2162. GGML_ASSERT(src != nullptr);
  2163. // Make sure ctx owns the buffer
  2164. GGML_ASSERT(src->ctx == ctx);
  2165. // Check if dst is pinned memory
  2166. vk_buffer buf = nullptr;
  2167. size_t buf_offset;
  2168. ggml_vk_host_get(ctx, dst, buf, buf_offset);
  2169. std::vector<vk::BufferCopy> slices(1);
  2170. if (width == spitch && width == dpitch) {
  2171. // Only do single write if stride is equal
  2172. slices[0].srcOffset = offset;
  2173. slices[0].dstOffset = buf_offset;
  2174. slices[0].size = width * height;
  2175. } else {
  2176. slices.resize(height);
  2177. for (size_t i = 0; i < height; i++) {
  2178. slices[i].srcOffset = offset + i * spitch;
  2179. slices[i].dstOffset = buf_offset + i * dpitch;
  2180. slices[i].size = width;
  2181. }
  2182. }
  2183. if (buf != nullptr) {
  2184. // Memory is pinned, use as staging buffer
  2185. ggml_vk_sync_buffers(subctx);
  2186. subctx->s->buffer.copyBuffer(src->buffer, buf->buffer, slices);
  2187. return;
  2188. }
  2189. VK_LOG_DEBUG("STAGING");
  2190. // Fall back to staging buffer
  2191. vk_buffer staging = ctx->staging;
  2192. const size_t copy_size = dpitch * height;
  2193. if (ctx->staging == nullptr || ctx->staging->size < ctx->staging_offset + copy_size) {
  2194. if (sync_staging) {
  2195. // Create temporary larger buffer
  2196. ggml_vk_ensure_sync_staging_buffer(ctx, copy_size);
  2197. staging = ctx->sync_staging;
  2198. } else {
  2199. GGML_ASSERT(false);
  2200. }
  2201. }
  2202. ggml_vk_sync_buffers(subctx);
  2203. subctx->s->buffer.copyBuffer(src->buffer, staging->buffer, slices);
  2204. deferred_memcpy(dst, staging->ptr, copy_size, &subctx->out_memcpys);
  2205. }
  2206. static void ggml_vk_buffer_read_async(ggml_backend_vk_context * ctx, vk_context * subctx, vk_buffer& src, size_t offset, void * dst, size_t size, bool sync_staging = false) {
  2207. return ggml_vk_buffer_read_2d_async(ctx, subctx, src, offset, dst, size, size, size, 1, sync_staging);
  2208. }
  2209. static void ggml_vk_buffer_read(ggml_backend_vk_context * ctx, vk_buffer& src, size_t offset, void * dst, size_t size) {
  2210. VK_LOG_DEBUG("ggml_vk_buffer_read(" << offset << ", " << size << ")");
  2211. if(src->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  2212. GGML_ASSERT(src->memory_property_flags & vk::MemoryPropertyFlagBits::eHostCoherent);
  2213. memcpy(dst, (uint8_t *) src->ptr + offset, size);
  2214. } else {
  2215. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
  2216. ggml_vk_ctx_begin(ctx, subctx);
  2217. ggml_vk_buffer_read_async(ctx, subctx, src, offset, dst, size, true);
  2218. ggml_vk_ctx_end(subctx);
  2219. ggml_vk_submit(subctx, ctx->fence);
  2220. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "vk_buffer_read waitForFences");
  2221. ctx->device->device.resetFences({ ctx->fence });
  2222. for (auto& cpy : subctx->out_memcpys) {
  2223. memcpy(cpy.dst, cpy.src, cpy.n);
  2224. }
  2225. }
  2226. }
  2227. static void ggml_vk_buffer_copy_async(vk_context * ctx, vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) {
  2228. VK_LOG_DEBUG("ggml_vk_buffer_copy_async(" << size << ")");
  2229. // Make sure both buffers are on same ctx
  2230. GGML_ASSERT(src->ctx == dst->ctx);
  2231. VkBufferCopy bc{ src_offset, dst_offset, size };
  2232. vkCmdCopyBuffer(ctx->s->buffer, src->buffer, dst->buffer, 1, &bc);
  2233. }
  2234. static void ggml_vk_buffer_copy(vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) {
  2235. if (src->ctx == dst->ctx) {
  2236. VK_LOG_DEBUG("ggml_vk_buffer_copy(SINGLE_DEVICE, " << size << ")");
  2237. // Copy within the device
  2238. ggml_backend_vk_context * ctx = src->ctx;
  2239. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
  2240. ggml_vk_ctx_begin(ctx, subctx);
  2241. ggml_vk_buffer_copy_async(subctx, dst, dst_offset, src, src_offset, size);
  2242. ggml_vk_ctx_end(subctx);
  2243. ggml_vk_submit(subctx, ctx->fence);
  2244. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "vk_buffer_copy waitForFences");
  2245. ctx->device->device.resetFences({ ctx->fence });
  2246. } else {
  2247. VK_LOG_DEBUG("ggml_vk_buffer_copy(MULTI_DEVICE, " << size << ")");
  2248. // Copy device to device
  2249. ggml_backend_vk_context * src_ctx = src->ctx;
  2250. ggml_backend_vk_context * dst_ctx = dst->ctx;
  2251. ggml_vk_ensure_sync_staging_buffer(src_ctx, size);
  2252. ggml_vk_ensure_sync_staging_buffer(dst_ctx, size);
  2253. // Copy to src staging buffer
  2254. ggml_vk_buffer_copy(src_ctx->sync_staging, 0, src, src_offset, size);
  2255. // memcpy to dst staging buffer
  2256. memcpy(dst_ctx->sync_staging->ptr, src_ctx->sync_staging->ptr, size);
  2257. // Copy to dst buffer
  2258. ggml_vk_buffer_copy(dst, dst_offset, dst_ctx->sync_staging, 0, size);
  2259. }
  2260. }
  2261. static void ggml_vk_buffer_memset(ggml_backend_vk_context * ctx, vk_buffer& dst, size_t offset, uint32_t c, size_t size) {
  2262. VK_LOG_DEBUG("ggml_vk_buffer_memset(" << offset << ", " << c << ", " << size << ")");
  2263. // Make sure ctx owns the buffer
  2264. GGML_ASSERT(dst->ctx == ctx);
  2265. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
  2266. ggml_vk_ctx_begin(ctx, subctx);
  2267. subctx->s->buffer.fillBuffer(dst->buffer, offset, size, c);
  2268. ggml_vk_ctx_end(subctx);
  2269. ggml_vk_submit(subctx, ctx->fence);
  2270. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "vk_memset waitForFences");
  2271. ctx->device->device.resetFences({ ctx->fence });
  2272. }
  2273. static void ggml_vk_h2d_tensor_2d(ggml_backend_vk_context * ctx, vk_context * subctx, vk_buffer& dst, size_t offset, const ggml_tensor * src, uint64_t i3, uint64_t i2, uint64_t i1) {
  2274. VK_LOG_DEBUG("ggml_vk_h2d_tensor_2d(dst=" << dst << ", offset=" << offset << ", src=" << src << ", i3=" << i3 << ", i2=" << i2 << ", i1=" << i1 << ")");
  2275. const uint64_t ne0 = src->ne[0];
  2276. const uint64_t ne1 = src->ne[1];
  2277. const uint64_t nb0 = src->nb[0];
  2278. const uint64_t nb1 = src->nb[1];
  2279. const uint64_t nb2 = src->nb[2];
  2280. const uint64_t nb3 = src->nb[3];
  2281. const enum ggml_type type = src->type;
  2282. const size_t ts = ggml_type_size(type);
  2283. const size_t bs = ggml_blck_size(type);
  2284. const size_t row_length = ts*ne0/bs;
  2285. const void * x = (const void *) ((const char *) src->data + i2*nb2 + i3*nb3);
  2286. if (nb0 == ts && nb1 == row_length) {
  2287. return ggml_vk_buffer_write_async(ctx, subctx, dst, offset, x, i1*nb1);
  2288. }
  2289. if (nb0 == ts && (i1 == ne1 || !ggml_is_permuted(src))) {
  2290. return ggml_vk_buffer_write_2d_async(ctx, subctx, dst, offset, x, nb1, row_length, i1);
  2291. }
  2292. GGML_ASSERT(i3 == 0);
  2293. GGML_ASSERT(i2 == 0);
  2294. GGML_ASSERT(i1 == (uint64_t) ggml_nrows(src));
  2295. return ggml_vk_buffer_write_nc_async(ctx, subctx, dst, offset, src);
  2296. }
  2297. static void ggml_vk_d2h_tensor_2d(ggml_backend_vk_context * ctx, vk_context * subctx, vk_buffer& src, size_t offset, const ggml_tensor * dst) {
  2298. VK_LOG_DEBUG("ggml_vk_d2h_tensor_2d()");
  2299. const uint64_t ne0 = dst->ne[0];
  2300. const uint64_t ne1 = dst->ne[1];
  2301. const uint64_t ne2 = dst->ne[2];
  2302. const uint64_t ne3 = dst->ne[3];
  2303. const uint64_t nb0 = dst->nb[0];
  2304. const uint64_t nb1 = dst->nb[1];
  2305. // const uint64_t nb2 = dst->nb[2];
  2306. // const uint64_t nb3 = dst->nb[3];
  2307. const enum ggml_type type = dst->type;
  2308. const size_t ts = ggml_type_size(type);
  2309. const size_t bs = ggml_blck_size(type);
  2310. const size_t row_length = ts*ne0/bs;
  2311. if (ggml_is_contiguous(dst)) {
  2312. return ggml_vk_buffer_read_async(ctx, subctx, src, offset, dst->data, ne1*nb1*ne2*ne3);
  2313. }
  2314. if (nb0 == ts) {
  2315. return ggml_vk_buffer_read_2d_async(ctx, subctx, src, offset, dst->data, nb1, nb1, row_length, ne1*ne2*ne3);
  2316. }
  2317. GGML_ASSERT(false);
  2318. }
  2319. static uint32_t ggml_vk_guess_split_k(int m, int n, int k) {
  2320. VK_LOG_DEBUG("ggml_vk_guess_split_k(" << m << ", " << n << ", " << k << ")");
  2321. // if (k > 128 && (m < 128 || n < 128) && m > 2 && n > 2) {
  2322. // return 4;
  2323. // }
  2324. return 1;
  2325. GGML_UNUSED(m); GGML_UNUSED(n); GGML_UNUSED(k);
  2326. }
  2327. static vk_pipeline ggml_vk_guess_matmul_pipeline_amd(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, bool aligned) {
  2328. if (m <= 32 || n <= 32) {
  2329. return aligned ? mmp->a_s : mmp->s;
  2330. }
  2331. return aligned ? mmp->a_m : mmp->m;
  2332. GGML_UNUSED(ctx);
  2333. }
  2334. static vk_pipeline ggml_vk_guess_matmul_pipeline_apple(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, bool aligned) {
  2335. return aligned ? mmp->a_m : mmp->m;
  2336. GGML_UNUSED(ctx);
  2337. }
  2338. static vk_pipeline ggml_vk_guess_matmul_pipeline_intel(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, bool aligned) {
  2339. return aligned ? mmp->a_s : mmp->s;
  2340. GGML_UNUSED(ctx);
  2341. }
  2342. static vk_pipeline ggml_vk_guess_matmul_pipeline(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, bool aligned) {
  2343. VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline(" << m << ", " << n << ", " << aligned << ")");
  2344. switch (ctx->device->vendor_id) {
  2345. case VK_VENDOR_ID_AMD:
  2346. return ggml_vk_guess_matmul_pipeline_amd(ctx, mmp, m, n, aligned);
  2347. case VK_VENDOR_ID_APPLE:
  2348. return ggml_vk_guess_matmul_pipeline_apple(ctx, mmp, aligned);
  2349. case VK_VENDOR_ID_INTEL:
  2350. return ggml_vk_guess_matmul_pipeline_intel(ctx, mmp, aligned);
  2351. default:
  2352. break;
  2353. }
  2354. if (m <= 32 || n <= 32) {
  2355. return aligned ? mmp->a_s : mmp->s;
  2356. }
  2357. if (m <= 64 || n <= 64) {
  2358. return aligned ? mmp->a_m : mmp->m;
  2359. }
  2360. return aligned ? mmp->a_l : mmp->l;
  2361. }
  2362. static uint32_t ggml_vk_guess_matmul_pipeline_align(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n) {
  2363. VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline_align(" << m << ", " << n << ")");
  2364. return ggml_vk_guess_matmul_pipeline(ctx, mmp, m, n, true)->align;
  2365. }
  2366. static void ggml_vk_matmul(
  2367. ggml_backend_vk_context * ctx, vk_context * subctx, vk_pipeline& pipeline,
  2368. vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& split_k_buffer,
  2369. uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d,
  2370. uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d,
  2371. uint32_t split_k, uint32_t batch, uint32_t ne02, uint32_t ne12, uint32_t broadcast2, uint32_t broadcast3) {
  2372. VK_LOG_DEBUG("ggml_vk_matmul(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), d: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), split_k: (" << (split_k_buffer.buffer != nullptr ? split_k_buffer.buffer->buffer : VK_NULL_HANDLE) << ", " << split_k_buffer.offset << ", " << split_k_buffer.size << "), m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ", split_k: " << split_k << ", batch: " << batch << ", ne02: " << ne02 << ", ne12: " << ne12 << ", broadcast2: " << broadcast2 << ", broadcast3: " << broadcast3 << ")");
  2373. ggml_vk_sync_buffers(subctx);
  2374. if (split_k == 1) {
  2375. const vk_mat_mat_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, k, ne02, ne12, broadcast2, broadcast3 };
  2376. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d }, sizeof(vk_mat_mat_push_constants), &pc, { m, n, batch });
  2377. return;
  2378. }
  2379. GGML_ASSERT(batch_stride_d == m * n);
  2380. const vk_mat_mat_push_constants pc1 = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, CEIL_DIV(k, split_k), ne02, ne12, broadcast2, broadcast3 };
  2381. // Make sure enough workgroups get assigned for split k to work
  2382. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, split_k_buffer }, sizeof(vk_mat_mat_push_constants), &pc1, { (CEIL_DIV(m, pipeline->wg_denoms[0]) * pipeline->wg_denoms[0]) * split_k, n, batch });
  2383. ggml_vk_sync_buffers(subctx);
  2384. const std::array<uint32_t, 2> pc2 = { (uint32_t)(m * n * batch), split_k };
  2385. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_matmul_split_k_reduce, { split_k_buffer, d }, pc2.size() * sizeof(uint32_t), pc2.data(), { m * n * batch, 1, 1 });
  2386. }
  2387. static void ggml_vk_matmul_id(
  2388. ggml_backend_vk_context * ctx, vk_context * subctx, vk_pipeline& pipeline,
  2389. vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& ids,
  2390. uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d,
  2391. uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d,
  2392. uint32_t n_as, uint32_t nei0, uint32_t nei1, uint32_t nbi1, uint32_t ne11) {
  2393. VK_LOG_DEBUG("ggml_vk_matmul_id(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), d: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), ids: (" << ids.buffer->buffer << ", " << ids.offset << ", " << ids.size << "), " <<
  2394. "m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", " <<
  2395. "batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ", " <<
  2396. "n_as: " << n_as << ", nei0: " << nei0 << ", nei1: " << nei1 << ", nbi1: " << nbi1 << ", ne11: " << ne11 << ")");
  2397. ggml_vk_sync_buffers(subctx);
  2398. const vk_mat_mat_id_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d,
  2399. nei0, nei1, nbi1, ne11 };
  2400. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d, ids }, sizeof(vk_mat_mat_id_push_constants), &pc, { m, nei1, n_as });
  2401. }
  2402. static bool ggml_vk_dim01_contiguous(const ggml_tensor * tensor) {
  2403. return
  2404. tensor->nb[0] == ggml_type_size(tensor->type) &&
  2405. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  2406. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  2407. }
  2408. static vk_pipeline ggml_vk_get_cpy_pipeline(ggml_backend_vk_context * ctx, ggml_type from, ggml_type to) {
  2409. if (from == GGML_TYPE_F32 && to == GGML_TYPE_F32) {
  2410. return ctx->device->pipeline_cpy_f32_f32;
  2411. }
  2412. if (from == GGML_TYPE_F32 && to == GGML_TYPE_F16) {
  2413. return ctx->device->pipeline_cpy_f32_f16;
  2414. }
  2415. if (from == GGML_TYPE_F16 && to == GGML_TYPE_F16) {
  2416. return ctx->device->pipeline_cpy_f16_f16;
  2417. }
  2418. std::cerr << "Missing CPY op for types: " << ggml_type_name(from) << " " << ggml_type_name(to) << std::endl;
  2419. GGML_ASSERT(false);
  2420. }
  2421. static void ggml_vk_cpy_to_contiguous(ggml_backend_vk_context * ctx, vk_context * subctx, vk_pipeline pipeline, const ggml_tensor * tensor, vk_subbuffer&& in, vk_subbuffer&& out) {
  2422. VK_LOG_DEBUG("ggml_vk_cpy_to_contiguous((" << tensor << ", type=" << tensor->type << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << "), ";
  2423. std::cerr << "buffer in size=" << in.buffer->size << ", buffer out size=" << out.buffer->size << ")");
  2424. const int tensor_type_size = ggml_type_size(tensor->type);
  2425. const uint32_t ne = ggml_nelements(tensor);
  2426. const vk_op_unary_push_constants pc = {
  2427. (uint32_t)ne,
  2428. (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->ne[2], (uint32_t)tensor->ne[3], (uint32_t)tensor->nb[0] / tensor_type_size, (uint32_t)tensor->nb[1] / tensor_type_size, (uint32_t)tensor->nb[2] / tensor_type_size, (uint32_t)tensor->nb[3] / tensor_type_size,
  2429. (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->ne[2], (uint32_t)tensor->ne[3], 1 , (uint32_t)tensor->ne[0] , (uint32_t)(tensor->ne[0] * tensor->ne[1]) , (uint32_t)(tensor->ne[0] * tensor->ne[1] * tensor->ne[2]),
  2430. 0,
  2431. 0.0f, 0.0f,
  2432. };
  2433. ggml_vk_sync_buffers(subctx);
  2434. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { in, out }, sizeof(vk_op_unary_push_constants), &pc, { ne, 1, 1 });
  2435. }
  2436. static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2437. VK_LOG_DEBUG("ggml_vk_mul_mat_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2438. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2439. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  2440. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT
  2441. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  2442. const uint64_t ne00 = src0->ne[0];
  2443. const uint64_t ne01 = src0->ne[1];
  2444. const uint64_t ne02 = src0->ne[2];
  2445. const uint64_t ne03 = src0->ne[3];
  2446. const uint64_t ne10 = src1->ne[0];
  2447. const uint64_t ne11 = src1->ne[1];
  2448. const uint64_t ne12 = src1->ne[2];
  2449. const uint64_t ne13 = src1->ne[3];
  2450. const uint64_t ne20 = dst->ne[0];
  2451. const uint64_t ne21 = dst->ne[1];
  2452. const uint64_t r2 = ne12 / ne02;
  2453. const uint64_t r3 = ne13 / ne03;
  2454. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2455. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2456. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  2457. vk_buffer d_Qx;
  2458. size_t qx_buf_offset = 0;
  2459. vk_buffer d_Qy;
  2460. size_t qy_buf_offset = 0;
  2461. bool src0_uma = false;
  2462. bool src1_uma = false;
  2463. if (ctx->device->uma) {
  2464. ggml_vk_host_get(ctx, src0->data, d_Qx, qx_buf_offset);
  2465. ggml_vk_host_get(ctx, src1->data, d_Qy, qy_buf_offset);
  2466. src0_uma = d_Qx != nullptr;
  2467. src1_uma = d_Qy != nullptr;
  2468. }
  2469. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  2470. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  2471. const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig;
  2472. vk_matmul_pipeline mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, src0->type, y_non_contig ? GGML_TYPE_F16 : src1->type);
  2473. const bool qx_needs_dequant = mmp == nullptr || x_non_contig;
  2474. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !y_f32_kernel) || y_non_contig;
  2475. if (mmp == nullptr) {
  2476. // Fall back to dequant + f16 mulmat
  2477. mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, GGML_TYPE_F16, y_f32_kernel ? GGML_TYPE_F32 : GGML_TYPE_F16);
  2478. }
  2479. // Not implemented
  2480. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  2481. const int x_ne = ne01 * ne00;
  2482. const int y_ne = ne11 * ne10;
  2483. const int d_ne = ne11 * ne01;
  2484. const uint32_t kpad = ggml_vk_align_size(ne10, ggml_vk_guess_matmul_pipeline_align(ctx, mmp, ne01, ne11));
  2485. const bool aligned = ne10 == kpad && ne01 > 8 && ne11 > 8;
  2486. const uint32_t split_k = ggml_vk_guess_split_k(ne01, ne11, ne10);
  2487. vk_pipeline pipeline = ggml_vk_guess_matmul_pipeline(ctx, mmp, ne01, ne11, aligned);
  2488. const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type);
  2489. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  2490. const uint64_t x_sz = !qx_needs_dequant ? qx_sz : sizeof(ggml_fp16_t) * x_ne;
  2491. const uint64_t y_sz = y_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  2492. const uint64_t d_sz = sizeof(float) * d_ne;
  2493. vk_buffer d_D = extra->buffer_gpu.lock();
  2494. const uint64_t d_buf_offset = extra->offset + dst->view_offs;
  2495. GGML_ASSERT(d_D != nullptr);
  2496. GGML_ASSERT(d_D->size >= d_buf_offset + d_sz * ne02 * ne03);
  2497. vk_buffer d_X;
  2498. uint64_t x_buf_offset = 0;
  2499. vk_buffer d_Y;
  2500. uint64_t y_buf_offset = 0;
  2501. if (!src0_uma) {
  2502. d_Qx = extra_src0->buffer_gpu.lock();
  2503. qx_buf_offset = extra_src0->offset + src0->view_offs;
  2504. GGML_ASSERT(d_Qx != nullptr);
  2505. }
  2506. if (!src1_uma) {
  2507. d_Qy = extra_src1->buffer_gpu.lock();
  2508. qy_buf_offset = extra_src1->offset + src1->view_offs;
  2509. GGML_ASSERT(d_Qy != nullptr);
  2510. }
  2511. if (qx_needs_dequant) {
  2512. d_X = ctx->prealloc_x;
  2513. GGML_ASSERT(d_X->size >= x_sz * ne02 * ne03);
  2514. } else {
  2515. d_X = d_Qx;
  2516. x_buf_offset = qx_buf_offset;
  2517. GGML_ASSERT(qx_sz == x_sz);
  2518. }
  2519. if (qy_needs_dequant) {
  2520. d_Y = ctx->prealloc_y;
  2521. GGML_ASSERT(d_Y->size >= y_sz * ne02 * ne03);
  2522. } else {
  2523. d_Y = d_Qy;
  2524. y_buf_offset = qy_buf_offset;
  2525. GGML_ASSERT(qy_sz == y_sz);
  2526. }
  2527. vk_pipeline to_fp16_vk_0 = nullptr;
  2528. vk_pipeline to_fp16_vk_1 = nullptr;
  2529. if (x_non_contig) {
  2530. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0->type, GGML_TYPE_F16);
  2531. } else {
  2532. to_fp16_vk_0 = ggml_vk_get_to_fp16(ctx, src0->type);
  2533. }
  2534. if (y_non_contig) {
  2535. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1->type, GGML_TYPE_F16);
  2536. } else {
  2537. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  2538. }
  2539. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  2540. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  2541. // Allocate descriptor sets
  2542. ggml_pipeline_allocate_descriptor_sets(ctx, pipeline, 1);
  2543. if (qx_needs_dequant) {
  2544. ggml_pipeline_allocate_descriptor_sets(ctx, to_fp16_vk_0, 1);
  2545. }
  2546. if (qy_needs_dequant) {
  2547. ggml_pipeline_allocate_descriptor_sets(ctx, to_fp16_vk_1, 1);
  2548. }
  2549. if (split_k > 1) {
  2550. ggml_pipeline_allocate_descriptor_sets(ctx, ctx->device->pipeline_matmul_split_k_reduce, 1);
  2551. }
  2552. if (x_non_contig) {
  2553. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
  2554. } else if (qx_needs_dequant) {
  2555. const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) };
  2556. ggml_vk_sync_buffers(subctx);
  2557. ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0, { { d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, { d_X, 0, x_sz * ne02 * ne03 } }, pc.size() * sizeof(uint32_t), pc.data(), { (uint32_t)(x_ne * ne02 * ne03), 1, 1});
  2558. }
  2559. if (y_non_contig) {
  2560. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
  2561. }
  2562. uint32_t stride_batch_x = ne00*ne01;
  2563. uint32_t stride_batch_y = ne10*ne11;
  2564. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  2565. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  2566. }
  2567. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  2568. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  2569. }
  2570. // compute
  2571. ggml_vk_matmul(
  2572. ctx, subctx, pipeline,
  2573. { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz * ne12 * ne13 },
  2574. { d_D, d_buf_offset, d_sz * ne12 * ne13 }, { ctx->prealloc_split_k, 0, d_sz * ne12 * ne13 * split_k },
  2575. ne01, ne11, ne10,
  2576. ne10, ne10, ne01, stride_batch_x, stride_batch_y, ne20*ne21,
  2577. split_k, ne12*ne13, ne02, ne12, r2, r3
  2578. ); // NOLINT
  2579. }
  2580. static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2581. VK_LOG_DEBUG("ggml_vk_mul_mat_vec_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2582. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2583. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  2584. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT
  2585. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  2586. const uint64_t ne00 = src0->ne[0];
  2587. const uint64_t ne01 = src0->ne[1];
  2588. const uint64_t ne02 = src0->ne[2];
  2589. const uint64_t ne03 = src0->ne[3];
  2590. const uint64_t ne10 = src1->ne[0];
  2591. const uint64_t ne11 = src1->ne[1];
  2592. const uint64_t ne12 = src1->ne[2];
  2593. const uint64_t ne13 = src1->ne[3];
  2594. GGML_ASSERT(ne11 == 1);
  2595. const uint64_t ne20 = dst->ne[0];
  2596. const uint64_t ne21 = dst->ne[1];
  2597. const uint64_t ne22 = dst->ne[2];
  2598. const uint64_t ne23 = dst->ne[3];
  2599. const uint64_t r2 = ne12 / ne02;
  2600. const uint64_t r3 = ne13 / ne03;
  2601. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2602. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2603. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  2604. vk_buffer d_Qx;
  2605. size_t qx_buf_offset = 0;
  2606. vk_buffer d_Qy;
  2607. size_t qy_buf_offset = 0;
  2608. bool src0_uma = false;
  2609. bool src1_uma = false;
  2610. if (ctx->device->uma) {
  2611. ggml_vk_host_get(ctx, src0->data, d_Qx, qx_buf_offset);
  2612. ggml_vk_host_get(ctx, src1->data, d_Qy, qy_buf_offset);
  2613. src0_uma = d_Qx != nullptr;
  2614. src1_uma = d_Qy != nullptr;
  2615. }
  2616. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  2617. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  2618. const bool f16_f32_kernel = src1->type == GGML_TYPE_F32;
  2619. const bool qx_needs_dequant = x_non_contig;
  2620. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig;
  2621. // Not implemented
  2622. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  2623. const uint64_t x_ne = ne01 * ne00;
  2624. const uint64_t y_ne = ne11 * ne10;
  2625. const uint64_t d_ne = ne11 * ne01;
  2626. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
  2627. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  2628. const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) : qx_sz;
  2629. const uint64_t y_sz = f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  2630. const uint64_t d_sz = sizeof(float) * d_ne;
  2631. vk_buffer d_D = extra->buffer_gpu.lock();
  2632. const uint64_t d_buf_offset = extra->offset + dst->view_offs;
  2633. GGML_ASSERT(d_D != nullptr);
  2634. vk_buffer d_X;
  2635. uint64_t x_buf_offset = 0;
  2636. vk_buffer d_Y;
  2637. uint64_t y_buf_offset = 0;
  2638. if(!src0_uma) {
  2639. d_Qx = extra_src0->buffer_gpu.lock();
  2640. qx_buf_offset = extra_src0->offset + src0->view_offs;
  2641. GGML_ASSERT(d_Qx != nullptr);
  2642. }
  2643. if(!src1_uma) {
  2644. d_Qy = extra_src1->buffer_gpu.lock();
  2645. qy_buf_offset = extra_src1->offset + src1->view_offs;
  2646. GGML_ASSERT(d_Qy != nullptr);
  2647. }
  2648. if (qx_needs_dequant) {
  2649. d_X = ctx->prealloc_x;
  2650. } else {
  2651. d_X = d_Qx;
  2652. x_buf_offset = qx_buf_offset;
  2653. GGML_ASSERT(qx_sz == x_sz);
  2654. }
  2655. if (qy_needs_dequant) {
  2656. d_Y = ctx->prealloc_y;
  2657. } else {
  2658. d_Y = d_Qy;
  2659. y_buf_offset = qy_buf_offset;
  2660. GGML_ASSERT(qy_sz == y_sz);
  2661. }
  2662. vk_pipeline to_fp16_vk_0 = nullptr;
  2663. vk_pipeline to_fp16_vk_1 = nullptr;
  2664. if (x_non_contig) {
  2665. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0->type, src0->type);
  2666. }
  2667. if (y_non_contig) {
  2668. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1->type, src1->type);
  2669. } else {
  2670. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  2671. }
  2672. vk_pipeline dmmv = ggml_vk_get_dequantize_mul_mat_vec(ctx, src0->type, src1->type);
  2673. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  2674. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  2675. GGML_ASSERT(dmmv != nullptr);
  2676. // Allocate descriptor sets
  2677. if (qx_needs_dequant) {
  2678. ggml_pipeline_allocate_descriptor_sets(ctx, to_fp16_vk_0, 1);
  2679. }
  2680. if (qy_needs_dequant) {
  2681. ggml_pipeline_allocate_descriptor_sets(ctx, to_fp16_vk_1, y_non_contig ? 1 : ne12 * ne13);
  2682. }
  2683. ggml_pipeline_allocate_descriptor_sets(ctx, dmmv, ne12 * ne13);
  2684. if (x_non_contig) {
  2685. GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment));
  2686. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
  2687. }
  2688. if (y_non_contig) {
  2689. GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne);
  2690. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
  2691. }
  2692. uint32_t stride_batch_x = ne00*ne01;
  2693. uint32_t stride_batch_y = ne10*ne11;
  2694. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  2695. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  2696. }
  2697. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  2698. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  2699. }
  2700. // compute
  2701. const vk_mat_vec_push_constants pc = {
  2702. (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01,
  2703. stride_batch_x, stride_batch_y, (uint32_t)(ne20*ne21),
  2704. (uint32_t)ne02, (uint32_t)ne12, (uint32_t)r2, (uint32_t)r3,
  2705. };
  2706. ggml_vk_sync_buffers(subctx);
  2707. ggml_vk_dispatch_pipeline(ctx, subctx, dmmv, { { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz * ne12 * ne13 }, { d_D, d_buf_offset, d_sz * ne22 * ne23} }, sizeof(vk_mat_vec_push_constants), &pc, { (uint32_t)ne01, (uint32_t)(ne12 * ne13), 1});
  2708. }
  2709. static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2710. VK_LOG_DEBUG("ggml_vk_mul_mat_p021_f16_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2711. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2712. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  2713. GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
  2714. GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // NOLINT
  2715. GGML_ASSERT(src1->nb[0] <= src1->nb[1] && src1->nb[2] <= src1->nb[3]); // NOLINT
  2716. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  2717. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  2718. const uint64_t ne00 = src0->ne[0];
  2719. const uint64_t ne01 = src0->ne[1];
  2720. const uint64_t ne02 = src0->ne[2];
  2721. // const uint64_t ne03 = src0->ne[3];
  2722. const uint64_t ne10 = src1->ne[0];
  2723. const uint64_t ne11 = src1->ne[1];
  2724. const uint64_t ne12 = src1->ne[2];
  2725. // const uint64_t ne13 = src1->ne[3];
  2726. GGML_ASSERT(ne11 == 1);
  2727. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2728. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2729. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  2730. vk_buffer d_Qy;
  2731. size_t qy_buf_offset = 0;
  2732. bool src1_uma = false;
  2733. if (ctx->device->uma) {
  2734. ggml_vk_host_get(ctx, src1->data, d_Qy, qy_buf_offset);
  2735. src1_uma = d_Qy != nullptr;
  2736. }
  2737. const uint64_t x_ne = ne00 * ne01 * ne02;
  2738. const uint64_t y_ne = ne10 * ne11 * ne12;
  2739. const uint64_t d_ne = ne01 * ne11 * ne12;
  2740. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
  2741. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  2742. const uint64_t d_sz = sizeof(float) * d_ne;
  2743. vk_buffer d_D = extra->buffer_gpu.lock();
  2744. const uint64_t d_buf_offset = extra->offset + dst->view_offs;
  2745. GGML_ASSERT(d_D != nullptr);
  2746. vk_buffer d_Qx = extra_src0->buffer_gpu.lock();
  2747. const uint64_t qx_buf_offset = extra_src0->offset + src0->view_offs;
  2748. GGML_ASSERT(d_Qx != nullptr);
  2749. if (!src1_uma) {
  2750. d_Qy = extra_src1->buffer_gpu.lock();
  2751. qy_buf_offset = extra_src1->offset + src1->view_offs;
  2752. GGML_ASSERT(d_Qx != nullptr);
  2753. }
  2754. // Allocate descriptor sets
  2755. ggml_pipeline_allocate_descriptor_sets(ctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32, 1);
  2756. const uint64_t qy_buffer_offset = (qy_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  2757. const uint64_t qy_shader_offset = qy_buf_offset - qy_buffer_offset;
  2758. const uint64_t d_buffer_offset = (d_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  2759. const uint64_t d_shader_offset = d_buf_offset - d_buffer_offset;
  2760. // compute
  2761. const std::array<uint32_t, 6> pc = { (uint32_t)ne00, (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne12, (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) };
  2762. ggml_vk_sync_buffers(subctx);
  2763. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32, { { d_Qx, qx_buf_offset, qx_sz }, { d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, { d_D, d_buffer_offset, d_sz + d_shader_offset } }, 6 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 });
  2764. }
  2765. static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2766. VK_LOG_DEBUG("ggml_vk_mul_mat_nc_f16_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2767. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2768. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  2769. GGML_ASSERT(!ggml_is_transposed(src0));
  2770. GGML_ASSERT(!ggml_is_transposed(src1));
  2771. GGML_ASSERT(!ggml_is_permuted(src0));
  2772. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  2773. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  2774. const uint64_t ne00 = src0->ne[0];
  2775. const uint64_t ne01 = src0->ne[1];
  2776. const uint64_t ne02 = src0->ne[2];
  2777. // const uint64_t ne03 = src0->ne[3];
  2778. const uint64_t nb01 = src0->nb[1];
  2779. const uint64_t nb02 = src0->nb[2];
  2780. // const uint64_t ne10 = src1->ne[0];
  2781. const uint64_t ne11 = src1->ne[1];
  2782. const uint64_t ne12 = src1->ne[2];
  2783. // const uint64_t ne13 = src1->ne[3];
  2784. GGML_ASSERT(ne11 == 1);
  2785. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2786. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2787. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  2788. vk_buffer d_Qy = nullptr;
  2789. size_t qy_buf_offset = 0;
  2790. bool src1_uma = false;
  2791. if (ctx->device->uma) {
  2792. ggml_vk_host_get(ctx, src1->data, d_Qy, qy_buf_offset);
  2793. src1_uma = d_Qy != nullptr;
  2794. }
  2795. const uint64_t d_ne = ne01 * ne11 * ne12;
  2796. const uint32_t row_stride_x = nb01 / sizeof(ggml_fp16_t);
  2797. const uint32_t channel_stride_x = nb02 / sizeof(ggml_fp16_t);
  2798. const uint64_t qx_sz = ggml_nbytes(src0);
  2799. const uint64_t qy_sz = ggml_nbytes(src1);
  2800. const uint64_t d_sz = sizeof(float) * d_ne;
  2801. vk_buffer d_D = extra->buffer_gpu.lock();
  2802. const uint64_t d_buf_offset = extra->offset + dst->view_offs;
  2803. GGML_ASSERT(d_D != nullptr);
  2804. vk_buffer d_Qx = extra_src0->buffer_gpu.lock();
  2805. const uint64_t qx_buf_offset = extra_src0->offset + src0->view_offs;
  2806. GGML_ASSERT(d_Qx != nullptr);
  2807. if (!src1_uma) {
  2808. d_Qy = extra_src1->buffer_gpu.lock();
  2809. qy_buf_offset = extra_src1->offset + src1->view_offs;
  2810. GGML_ASSERT(d_Qx != nullptr);
  2811. }
  2812. // Allocate descriptor sets
  2813. ggml_pipeline_allocate_descriptor_sets(ctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32, 1);
  2814. const uint64_t qy_buffer_offset = (qy_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  2815. const uint64_t qy_shader_offset = qy_buf_offset - qy_buffer_offset;
  2816. const uint64_t d_buffer_offset = (d_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  2817. const uint64_t d_shader_offset = d_buf_offset - d_buffer_offset;
  2818. // compute
  2819. const std::array<uint32_t, 7> pc = { (uint32_t)ne00, (uint32_t)ne01, row_stride_x, channel_stride_x, (uint32_t)(ne12 / ne02), (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) };
  2820. ggml_vk_sync_buffers(subctx);
  2821. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32, { { d_Qx, qx_buf_offset, qx_sz }, { d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, { d_D, d_buffer_offset, d_sz + d_shader_offset } }, 7 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 });
  2822. }
  2823. static void ggml_vk_mul_mat(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2824. VK_LOG_DEBUG("ggml_vk_mul_mat(" << src0 << ", " << src1 << ", " << dst << ")");
  2825. if (src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && dst->ne[1] == 1) {
  2826. ggml_vk_mul_mat_vec_p021_f16_f32(ctx, subctx, src0, src1, dst);
  2827. } else if (src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && dst->ne[1] == 1) {
  2828. ggml_vk_mul_mat_vec_nc_f16_f32(ctx, subctx, src0, src1, dst);
  2829. } else if (dst->ne[1] == 1 && (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type))) {
  2830. ggml_vk_mul_mat_vec_q_f16(ctx, subctx, src0, src1, dst);
  2831. } else {
  2832. ggml_vk_mul_mat_q_f16(ctx, subctx, src0, src1, dst);
  2833. }
  2834. }
  2835. static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst) {
  2836. VK_LOG_DEBUG("ggml_vk_mul_mat_id_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2837. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2838. std::cerr << "), (" << ids << ", name=" << ids->name << ", type=" << ids->type << ", ne0=" << ids->ne[0] << ", ne1=" << ids->ne[1] << ", ne2=" << ids->ne[2] << ", ne3=" << ids->ne[3] << ", nb0=" << ids->nb[0] << ", nb1=" << ids->nb[1] << ", nb2=" << ids->nb[2] << ", nb3=" << ids->nb[3];
  2839. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  2840. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  2841. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  2842. const uint64_t ne00 = src0->ne[0];
  2843. const uint64_t ne01 = src0->ne[1];
  2844. const uint64_t ne02 = src0->ne[2];
  2845. const uint64_t ne03 = src0->ne[3];
  2846. const uint64_t ne10 = src1->ne[0];
  2847. const uint64_t ne11 = src1->ne[1];
  2848. const uint64_t ne12 = src1->ne[2];
  2849. const uint64_t ne13 = src1->ne[3];
  2850. const uint64_t nei0 = ids->ne[0];
  2851. const uint64_t nei1 = ids->ne[1];
  2852. GGML_ASSERT(nei0 * nei1 <= 2048);
  2853. const uint32_t nbi1 = ids->nb[1];
  2854. const uint32_t nbi2 = ids->nb[2];
  2855. const uint64_t ne20 = dst->ne[0];
  2856. const uint64_t ne21 = dst->ne[1];
  2857. const uint64_t ne22 = dst->ne[2];
  2858. const uint64_t ne23 = dst->ne[3];
  2859. const uint64_t n_as = ne02;
  2860. GGML_ASSERT(n_as <= 8);
  2861. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2862. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2863. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  2864. ggml_tensor_extra_gpu * extra_ids = (ggml_tensor_extra_gpu *) ids->extra;
  2865. vk_buffer d_Qx;
  2866. size_t qx_buf_offset = 0;
  2867. vk_buffer d_Qy;
  2868. size_t qy_buf_offset = 0;
  2869. vk_buffer d_ids;
  2870. size_t ids_buf_offset = 0;
  2871. bool src0_uma = false;
  2872. bool src1_uma = false;
  2873. bool ids_uma = false;
  2874. if (ctx->device->uma) {
  2875. ggml_vk_host_get(ctx, src0->data, d_Qx, qx_buf_offset);
  2876. ggml_vk_host_get(ctx, src1->data, d_Qy, qy_buf_offset);
  2877. ggml_vk_host_get(ctx, ids->data, d_ids, ids_buf_offset);
  2878. src0_uma = d_Qx != nullptr;
  2879. src1_uma = d_Qy != nullptr;
  2880. ids_uma = d_ids != nullptr;
  2881. }
  2882. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  2883. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  2884. const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig;
  2885. vk_matmul_pipeline mmp = ggml_vk_get_mul_mat_mat_id_pipeline(ctx, src0->type, y_non_contig ? GGML_TYPE_F16 : src1->type);
  2886. const bool qx_needs_dequant = mmp == nullptr || x_non_contig;
  2887. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !y_f32_kernel) || y_non_contig;
  2888. if (mmp == nullptr) {
  2889. GGML_ASSERT(false);
  2890. }
  2891. // Not implemented
  2892. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  2893. const uint64_t x_ne = ne01 * ne00;
  2894. const uint64_t y_ne = ne11 * ne10;
  2895. const uint64_t d_ne = ne21 * ne20;
  2896. const uint32_t kpad = ggml_vk_align_size(ne10, ggml_vk_guess_matmul_pipeline_align(ctx, mmp, ne01, nei1));
  2897. const bool aligned = ne10 == kpad && ne01 > 8 && nei1 > 8;
  2898. vk_pipeline pipeline = ggml_vk_guess_matmul_pipeline(ctx, mmp, ne01, nei1, aligned);
  2899. const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type);
  2900. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  2901. const uint64_t x_sz = !qx_needs_dequant ? qx_sz : sizeof(ggml_fp16_t) * x_ne;
  2902. const uint64_t y_sz = y_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  2903. const uint64_t ids_sz = nbi2;
  2904. const uint64_t d_sz = sizeof(float) * d_ne;
  2905. vk_buffer d_D = extra->buffer_gpu.lock();
  2906. const uint64_t d_buf_offset = extra->offset + dst->view_offs;
  2907. GGML_ASSERT(d_D != nullptr);
  2908. vk_buffer d_X;
  2909. uint64_t x_buf_offset = 0;
  2910. vk_buffer d_Y;
  2911. uint64_t y_buf_offset = 0;
  2912. if (!src0_uma) {
  2913. d_Qx = extra_src0->buffer_gpu.lock();
  2914. qx_buf_offset = extra_src0->offset + src0->view_offs;
  2915. GGML_ASSERT(d_Qx != nullptr);
  2916. }
  2917. if (!src1_uma) {
  2918. d_Qy = extra_src1->buffer_gpu.lock();
  2919. qy_buf_offset = extra_src1->offset + src1->view_offs;
  2920. GGML_ASSERT(d_Qy != nullptr);
  2921. }
  2922. if (!ids_uma) {
  2923. d_ids = extra_ids->buffer_gpu.lock();
  2924. ids_buf_offset = extra_ids->offset + ids->view_offs;
  2925. GGML_ASSERT(d_ids != nullptr);
  2926. }
  2927. if (qx_needs_dequant) {
  2928. d_X = ctx->prealloc_x;
  2929. GGML_ASSERT(d_X->size >= x_sz * ne02 * ne03);
  2930. } else {
  2931. d_X = d_Qx;
  2932. x_buf_offset = qx_buf_offset;
  2933. GGML_ASSERT(qx_sz == x_sz);
  2934. }
  2935. if (qy_needs_dequant) {
  2936. d_Y = ctx->prealloc_y;
  2937. GGML_ASSERT(d_Y->size >= y_sz * ne02 * ne03);
  2938. } else {
  2939. d_Y = d_Qy;
  2940. y_buf_offset = qy_buf_offset;
  2941. GGML_ASSERT(qy_sz == y_sz);
  2942. }
  2943. vk_pipeline to_fp16_vk_0 = nullptr;
  2944. vk_pipeline to_fp16_vk_1 = nullptr;
  2945. if (x_non_contig) {
  2946. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0->type, GGML_TYPE_F16);
  2947. } else {
  2948. to_fp16_vk_0 = ggml_vk_get_to_fp16(ctx, src0->type);
  2949. }
  2950. if (y_non_contig) {
  2951. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1->type, GGML_TYPE_F16);
  2952. } else {
  2953. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  2954. }
  2955. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  2956. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  2957. // Allocate descriptor sets
  2958. ggml_pipeline_allocate_descriptor_sets(ctx, pipeline, 1);
  2959. if (qx_needs_dequant) {
  2960. ggml_pipeline_allocate_descriptor_sets(ctx, to_fp16_vk_0, 1);
  2961. }
  2962. if (qy_needs_dequant) {
  2963. ggml_pipeline_allocate_descriptor_sets(ctx, to_fp16_vk_1, 1);
  2964. }
  2965. if (x_non_contig) {
  2966. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
  2967. } else if (qx_needs_dequant) {
  2968. const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) };
  2969. ggml_vk_sync_buffers(subctx);
  2970. ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0, { { d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, { d_X, 0, x_sz * ne02 * ne03 } }, pc.size() * sizeof(uint32_t), pc.data(), { (uint32_t)(x_ne * ne02 * ne03), 1, 1});
  2971. }
  2972. if (y_non_contig) {
  2973. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
  2974. }
  2975. uint32_t stride_batch_x = ne00*ne01;
  2976. uint32_t stride_batch_y = ne10*ne11;
  2977. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  2978. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  2979. }
  2980. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  2981. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  2982. }
  2983. // compute
  2984. ggml_vk_matmul_id(
  2985. ctx, subctx, pipeline,
  2986. { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz * ne12 * ne13 },
  2987. { d_D, d_buf_offset, d_sz * ne22 * ne23 }, { d_ids, ids_buf_offset, ids_sz },
  2988. ne01, ne21, ne10, ne10, ne10, ne01,
  2989. stride_batch_x, stride_batch_y, ne20*ne21,
  2990. n_as, nei0, nei1, nbi1 / ggml_type_size(ids->type), ne11
  2991. ); // NOLINT
  2992. }
  2993. static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst) {
  2994. VK_LOG_DEBUG("ggml_vk_mul_mat_vec_id_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2995. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2996. std::cerr << "), (" << ids << ", name=" << ids->name << ", type=" << ids->type << ", ne0=" << ids->ne[0] << ", ne1=" << ids->ne[1] << ", ne2=" << ids->ne[2] << ", ne3=" << ids->ne[3] << ", nb0=" << ids->nb[0] << ", nb1=" << ids->nb[1] << ", nb2=" << ids->nb[2] << ", nb3=" << ids->nb[3];
  2997. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  2998. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT
  2999. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  3000. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  3001. const uint64_t ne00 = src0->ne[0];
  3002. const uint64_t ne01 = src0->ne[1];
  3003. const uint64_t ne02 = src0->ne[2];
  3004. const uint64_t ne03 = src0->ne[3];
  3005. const uint64_t ne10 = src1->ne[0];
  3006. const uint64_t ne11 = src1->ne[1];
  3007. const uint64_t ne12 = src1->ne[2];
  3008. const uint64_t ne13 = src1->ne[3];
  3009. const uint64_t nei0 = ids->ne[0];
  3010. const uint64_t nei1 = ids->ne[1];
  3011. const uint64_t nbi2 = ids->nb[2];
  3012. GGML_ASSERT(nei1 == 1);
  3013. const uint64_t ne20 = dst->ne[0];
  3014. const uint64_t ne21 = dst->ne[1];
  3015. const uint64_t ne22 = dst->ne[2];
  3016. const uint64_t ne23 = dst->ne[3];
  3017. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  3018. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  3019. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  3020. ggml_tensor_extra_gpu * extra_ids = (ggml_tensor_extra_gpu *) ids->extra;
  3021. vk_buffer d_Qx;
  3022. size_t qx_buf_offset = 0;
  3023. vk_buffer d_Qy;
  3024. size_t qy_buf_offset = 0;
  3025. vk_buffer d_ids;
  3026. size_t ids_buf_offset = 0;
  3027. bool src0_uma = false;
  3028. bool src1_uma = false;
  3029. bool ids_uma = false;
  3030. if (ctx->device->uma) {
  3031. ggml_vk_host_get(ctx, src0->data, d_Qx, qx_buf_offset);
  3032. ggml_vk_host_get(ctx, src1->data, d_Qy, qy_buf_offset);
  3033. ggml_vk_host_get(ctx, ids->data, d_ids, ids_buf_offset);
  3034. src0_uma = d_Qx != nullptr;
  3035. src1_uma = d_Qy != nullptr;
  3036. ids_uma = d_ids != nullptr;
  3037. }
  3038. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  3039. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  3040. const bool f16_f32_kernel = src1->type == GGML_TYPE_F32;
  3041. const bool qx_needs_dequant = x_non_contig;
  3042. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig;
  3043. // Not implemented
  3044. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  3045. const uint64_t x_ne = ne01 * ne00;
  3046. const uint64_t y_ne = ne11 * ne10;
  3047. const uint64_t d_ne = ne21 * ne20;
  3048. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
  3049. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  3050. const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) : qx_sz;
  3051. const uint64_t y_sz = f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  3052. const uint64_t ids_sz = nbi2;
  3053. const uint64_t d_sz = sizeof(float) * d_ne;
  3054. vk_buffer d_D = extra->buffer_gpu.lock();
  3055. const uint64_t d_buf_offset = extra->offset + dst->view_offs;
  3056. GGML_ASSERT(d_D != nullptr);
  3057. vk_buffer d_X;
  3058. uint64_t x_buf_offset = 0;
  3059. vk_buffer d_Y;
  3060. uint64_t y_buf_offset = 0;
  3061. if(!src0_uma) {
  3062. d_Qx = extra_src0->buffer_gpu.lock();
  3063. qx_buf_offset = extra_src0->offset + src0->view_offs;
  3064. GGML_ASSERT(d_Qx != nullptr);
  3065. }
  3066. if(!src1_uma) {
  3067. d_Qy = extra_src1->buffer_gpu.lock();
  3068. qy_buf_offset = extra_src1->offset + src1->view_offs;
  3069. GGML_ASSERT(d_Qy != nullptr);
  3070. }
  3071. if(!ids_uma) {
  3072. d_ids = extra_ids->buffer_gpu.lock();
  3073. ids_buf_offset = extra_ids->offset + ids->view_offs;
  3074. GGML_ASSERT(d_ids != nullptr);
  3075. }
  3076. if (qx_needs_dequant) {
  3077. d_X = ctx->prealloc_x;
  3078. } else {
  3079. d_X = d_Qx;
  3080. x_buf_offset = qx_buf_offset;
  3081. GGML_ASSERT(qx_sz == x_sz);
  3082. }
  3083. if (qy_needs_dequant) {
  3084. d_Y = ctx->prealloc_y;
  3085. } else {
  3086. d_Y = d_Qy;
  3087. y_buf_offset = qy_buf_offset;
  3088. GGML_ASSERT(qy_sz == y_sz);
  3089. }
  3090. vk_pipeline to_fp16_vk_0 = nullptr;
  3091. vk_pipeline to_fp16_vk_1 = nullptr;
  3092. if (x_non_contig) {
  3093. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0->type, src0->type);
  3094. }
  3095. if (y_non_contig) {
  3096. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1->type, src1->type);
  3097. } else {
  3098. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  3099. }
  3100. vk_pipeline dmmv = ggml_vk_get_dequantize_mul_mat_vec_id(ctx, src0->type, src1->type);
  3101. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  3102. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  3103. GGML_ASSERT(dmmv != nullptr);
  3104. // Allocate descriptor sets
  3105. if (qx_needs_dequant) {
  3106. ggml_pipeline_allocate_descriptor_sets(ctx, to_fp16_vk_0, 1);
  3107. }
  3108. if (qy_needs_dequant) {
  3109. ggml_pipeline_allocate_descriptor_sets(ctx, to_fp16_vk_1, y_non_contig ? 1 : ne12 * ne13);
  3110. }
  3111. ggml_pipeline_allocate_descriptor_sets(ctx, dmmv, ne12 * ne13);
  3112. if (x_non_contig) {
  3113. GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment));
  3114. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
  3115. }
  3116. if (y_non_contig) {
  3117. GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne);
  3118. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
  3119. }
  3120. uint32_t stride_batch_y = ne10*ne11;
  3121. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  3122. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  3123. }
  3124. // compute
  3125. const vk_mat_vec_id_push_constants pc = {
  3126. (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01,
  3127. (uint32_t)x_ne, stride_batch_y, (uint32_t)(ne20*ne21),
  3128. (uint32_t)nei0, (uint32_t)ne11,
  3129. };
  3130. ggml_vk_sync_buffers(subctx);
  3131. ggml_vk_dispatch_pipeline(ctx, subctx, dmmv,
  3132. { { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz * ne12 * ne13 }, { d_D, d_buf_offset, d_sz * ne22 * ne23}, { d_ids, ids_buf_offset, ids_sz } },
  3133. sizeof(vk_mat_vec_id_push_constants), &pc, { (uint32_t)ne01, (uint32_t)nei0, 1 });
  3134. }
  3135. static void ggml_vk_mul_mat_id(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) {
  3136. VK_LOG_DEBUG("ggml_vk_mul_mat_id(" << src0 << ", " << src1 << ", " << src2 << ", " << dst << ")");
  3137. if (src2->ne[1] == 1 && (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type))) {
  3138. ggml_vk_mul_mat_vec_id_q_f16(ctx, subctx, src0, src1, src2, dst);
  3139. } else {
  3140. ggml_vk_mul_mat_id_q_f16(ctx, subctx, src0, src1, src2, dst);
  3141. }
  3142. }
  3143. static void ggml_vk_op_repeat(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3144. // guaranteed to be an integer due to the check in ggml_can_repeat
  3145. const uint64_t ne0 = dst->ne[0];
  3146. const uint64_t ne1 = dst->ne[1];
  3147. const uint64_t ne2 = dst->ne[2];
  3148. const uint64_t ne3 = dst->ne[3];
  3149. const uint64_t ne00 = src0->ne[0];
  3150. const uint64_t ne01 = src0->ne[1];
  3151. const uint64_t ne02 = src0->ne[2];
  3152. const uint64_t ne03 = src0->ne[3];
  3153. const uint64_t nb0 = dst->nb[0];
  3154. const uint64_t nb1 = dst->nb[1];
  3155. const uint64_t nb2 = dst->nb[2];
  3156. const uint64_t nb3 = dst->nb[3];
  3157. const uint64_t nb00 = src0->nb[0];
  3158. const uint64_t nb01 = src0->nb[1];
  3159. const uint64_t nb02 = src0->nb[2];
  3160. const uint64_t nb03 = src0->nb[3];
  3161. const uint64_t nr0 = ne0/ne00;
  3162. const uint64_t nr1 = ne1/ne01;
  3163. const uint64_t nr2 = ne2/ne02;
  3164. const uint64_t nr3 = ne3/ne03;
  3165. // TODO: support for transposed / permuted tensors
  3166. GGML_ASSERT(nb0 == sizeof(float));
  3167. GGML_ASSERT(nb00 == sizeof(float));
  3168. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  3169. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  3170. const vk_buffer src_buf = extra_src0->buffer_gpu.lock();
  3171. const uint64_t src_offset = extra_src0->offset + src0->view_offs;
  3172. vk_buffer dst_buf = extra->buffer_gpu.lock();
  3173. const uint64_t dst_offset = extra->offset + dst->view_offs;
  3174. std::vector<vk::BufferCopy> copies;
  3175. for (uint64_t i3 = 0; i3 < nr3; i3++) {
  3176. for (uint64_t k3 = 0; k3 < ne03; k3++) {
  3177. for (uint64_t i2 = 0; i2 < nr2; i2++) {
  3178. for (uint64_t k2 = 0; k2 < ne02; k2++) {
  3179. for (uint64_t i1 = 0; i1 < nr1; i1++) {
  3180. for (uint64_t k1 = 0; k1 < ne01; k1++) {
  3181. for (uint64_t i0 = 0; i0 < nr0; i0++) {
  3182. copies.push_back({
  3183. src_offset + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0,
  3184. dst_offset + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01,
  3185. ne00*nb0,
  3186. });
  3187. }
  3188. }
  3189. }
  3190. }
  3191. }
  3192. }
  3193. }
  3194. ggml_vk_sync_buffers(subctx);
  3195. subctx->s->buffer.copyBuffer(src_buf->buffer, dst_buf->buffer, copies);
  3196. GGML_UNUSED(ctx);
  3197. GGML_UNUSED(src1);
  3198. }
  3199. static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, ggml_op op) {
  3200. switch (op) {
  3201. case GGML_OP_ADD:
  3202. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3203. return ctx->device->pipeline_add_f32;
  3204. }
  3205. return nullptr;
  3206. case GGML_OP_GET_ROWS:
  3207. GGML_ASSERT(src1->type == GGML_TYPE_I32);
  3208. if (dst->type == GGML_TYPE_F16) {
  3209. return ctx->device->pipeline_get_rows[src0->type];
  3210. }
  3211. if (dst->type == GGML_TYPE_F32) {
  3212. return ctx->device->pipeline_get_rows_f32[src0->type];
  3213. }
  3214. return nullptr;
  3215. case GGML_OP_MUL:
  3216. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3217. return ctx->device->pipeline_mul_f32;
  3218. }
  3219. return nullptr;
  3220. case GGML_OP_DIV:
  3221. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3222. return ctx->device->pipeline_div_f32;
  3223. }
  3224. return nullptr;
  3225. case GGML_OP_SCALE:
  3226. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3227. return ctx->device->pipeline_scale_f32;
  3228. }
  3229. return nullptr;
  3230. case GGML_OP_SQR:
  3231. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3232. return ctx->device->pipeline_sqr_f32;
  3233. }
  3234. return nullptr;
  3235. case GGML_OP_CLAMP:
  3236. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3237. return ctx->device->pipeline_clamp_f32;
  3238. }
  3239. return nullptr;
  3240. case GGML_OP_CPY:
  3241. case GGML_OP_CONT:
  3242. case GGML_OP_DUP:
  3243. return ggml_vk_get_cpy_pipeline(ctx, src0->type, dst->type);
  3244. case GGML_OP_NORM:
  3245. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3246. return ctx->device->pipeline_norm_f32;
  3247. }
  3248. return nullptr;
  3249. case GGML_OP_RMS_NORM:
  3250. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3251. return ctx->device->pipeline_rms_norm_f32;
  3252. }
  3253. return nullptr;
  3254. case GGML_OP_UNARY:
  3255. switch (ggml_get_unary_op(dst)) {
  3256. case GGML_UNARY_OP_SILU:
  3257. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3258. return ctx->device->pipeline_silu_f32;
  3259. }
  3260. break;
  3261. case GGML_UNARY_OP_GELU:
  3262. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3263. return ctx->device->pipeline_gelu_f32;
  3264. }
  3265. break;
  3266. case GGML_UNARY_OP_RELU:
  3267. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3268. return ctx->device->pipeline_relu_f32;
  3269. }
  3270. break;
  3271. default:
  3272. break;
  3273. }
  3274. return nullptr;
  3275. case GGML_OP_DIAG_MASK_INF:
  3276. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3277. return ctx->device->pipeline_diag_mask_inf_f32;
  3278. }
  3279. return nullptr;
  3280. case GGML_OP_SOFT_MAX:
  3281. GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16);
  3282. if (src0->type == GGML_TYPE_F32 && (src1 == nullptr || src1->type == GGML_TYPE_F32) && dst->type == GGML_TYPE_F32) {
  3283. return ctx->device->pipeline_soft_max_f32;
  3284. }
  3285. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
  3286. return ctx->device->pipeline_soft_max_f32_f16;
  3287. }
  3288. return nullptr;
  3289. case GGML_OP_ROPE:
  3290. {
  3291. const int mode = ((const int32_t *) dst->op_params)[2];
  3292. const bool is_neox = mode & 2;
  3293. if (is_neox) {
  3294. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3295. return ctx->device->pipeline_rope_neox_f32;
  3296. }
  3297. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  3298. return ctx->device->pipeline_rope_neox_f16;
  3299. }
  3300. } else {
  3301. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3302. return ctx->device->pipeline_rope_norm_f32;
  3303. }
  3304. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  3305. return ctx->device->pipeline_rope_norm_f16;
  3306. }
  3307. }
  3308. return nullptr;
  3309. }
  3310. case GGML_OP_ARGSORT:
  3311. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_I32) {
  3312. return ctx->device->pipeline_argsort_f32;
  3313. }
  3314. return nullptr;
  3315. case GGML_OP_SUM_ROWS:
  3316. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3317. return ctx->device->pipeline_sum_rows_f32;
  3318. }
  3319. return nullptr;
  3320. default:
  3321. return nullptr;
  3322. }
  3323. GGML_UNUSED(src2);
  3324. }
  3325. static ggml_vk_func_t ggml_vk_op_get_func(ggml_op op) {
  3326. switch(op) {
  3327. case GGML_OP_REPEAT:
  3328. return ggml_vk_op_repeat;
  3329. default:
  3330. return nullptr;
  3331. }
  3332. }
  3333. static bool ggml_vk_op_supports_incontiguous(ggml_op op) {
  3334. switch (op) {
  3335. case GGML_OP_CPY:
  3336. case GGML_OP_GET_ROWS:
  3337. case GGML_OP_ADD:
  3338. case GGML_OP_MUL:
  3339. case GGML_OP_DIV:
  3340. case GGML_OP_SCALE:
  3341. case GGML_OP_SQR:
  3342. case GGML_OP_CLAMP:
  3343. return true;
  3344. default:
  3345. return false;
  3346. }
  3347. }
  3348. template<typename PC>
  3349. static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, ggml_op op, const PC&& pc) {
  3350. VK_LOG_DEBUG("ggml_vk_op_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  3351. if (src1 != nullptr) {
  3352. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  3353. }
  3354. if (src2 != nullptr) {
  3355. std::cerr << "), (" << src2 << ", name=" << src2->name << ", type=" << src2->type << ", ne0=" << src2->ne[0] << ", ne1=" << src2->ne[1] << ", ne2=" << src2->ne[2] << ", ne3=" << src2->ne[3] << ", nb0=" << src2->nb[0] << ", nb1=" << src2->nb[1] << ", nb2=" << src2->nb[2] << ", nb3=" << src2->nb[3];
  3356. }
  3357. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "), " << ggml_op_name(op) << ")");
  3358. GGML_ASSERT(op == GGML_OP_GET_ROWS || (!ggml_is_quantized(src0->type) && (src1 == nullptr || !ggml_is_quantized(src1->type)))); // NOLINT
  3359. GGML_ASSERT(ggml_vk_op_supports_incontiguous(op) || ggml_vk_dim01_contiguous(src0)); // NOLINT
  3360. GGML_ASSERT(dst->extra != nullptr);
  3361. const uint64_t ne00 = src0->ne[0];
  3362. const uint64_t ne01 = src0->ne[1];
  3363. const uint64_t ne02 = src0->ne[2];
  3364. const uint64_t ne03 = src0->ne[3];
  3365. const uint64_t ne0 = ne00 * ne01;
  3366. const bool use_src1 = src1 != nullptr;
  3367. const uint64_t ne10 = use_src1 ? src1->ne[0] : 0;
  3368. const uint64_t ne11 = use_src1 ? src1->ne[1] : 0;
  3369. const uint64_t ne12 = use_src1 ? src1->ne[2] : 0;
  3370. const uint64_t ne13 = use_src1 ? src1->ne[3] : 0;
  3371. const uint64_t ne1 = ne10 * ne11;
  3372. // const uint64_t nb10 = use_src1 ? src1->nb[0] : 0;
  3373. const bool use_src2 = src2 != nullptr;
  3374. const uint64_t ne20 = use_src2 ? src2->ne[0] : 0;
  3375. const uint64_t ne21 = use_src2 ? src2->ne[1] : 0;
  3376. const uint64_t ne22 = use_src2 ? src2->ne[2] : 0;
  3377. const uint64_t ne23 = use_src2 ? src2->ne[3] : 0;
  3378. const uint64_t ne2 = ne20 * ne21;
  3379. const uint64_t ned0 = dst->ne[0];
  3380. const uint64_t ned1 = dst->ne[1];
  3381. const uint64_t ned2 = dst->ne[2];
  3382. const uint64_t ned3 = dst->ne[3];
  3383. const uint64_t ned = ned0 * ned1;
  3384. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, dst, op);
  3385. ggml_vk_func_t op_func;
  3386. if (pipeline == nullptr) {
  3387. op_func = ggml_vk_op_get_func(op);
  3388. if (op_func == nullptr) {
  3389. std::cerr << "ggml_vulkan: Error: Missing op: " << ggml_op_name(op) << " for " << ggml_type_name(src0->type);
  3390. if (src1 != nullptr) {
  3391. std::cerr << " and " << ggml_type_name(src1->type);
  3392. }
  3393. std::cerr << " to " << ggml_type_name(dst->type) << std::endl;
  3394. GGML_ASSERT(false);
  3395. }
  3396. op_func(ctx, subctx, src0, src1, dst);
  3397. return;
  3398. }
  3399. const bool op_supports_incontiguous = ggml_vk_op_supports_incontiguous(op);
  3400. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  3401. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  3402. ggml_tensor_extra_gpu * extra_src1 = use_src1 ? (ggml_tensor_extra_gpu *) src1->extra : nullptr;
  3403. ggml_tensor_extra_gpu * extra_src2 = use_src2 ? (ggml_tensor_extra_gpu *) src2->extra : nullptr;
  3404. vk_buffer d_X = nullptr;
  3405. size_t x_buf_offset = 0;
  3406. vk_buffer d_Y = nullptr;
  3407. size_t y_buf_offset = 0;
  3408. vk_buffer d_Z = nullptr;
  3409. size_t z_buf_offset = 0;
  3410. bool src0_uma = false;
  3411. bool src1_uma = false;
  3412. bool src2_uma = false;
  3413. if (ctx->device->uma) {
  3414. ggml_vk_host_get(ctx, src0->data, d_X, x_buf_offset);
  3415. src0_uma = d_X != nullptr;
  3416. if (use_src1) {
  3417. ggml_vk_host_get(ctx, src1->data, d_Y, y_buf_offset);
  3418. src1_uma = d_Y != nullptr;
  3419. }
  3420. if (use_src2) {
  3421. ggml_vk_host_get(ctx, src2->data, d_Z, z_buf_offset);
  3422. src2_uma = d_Z != nullptr;
  3423. }
  3424. }
  3425. uint64_t x_sz = ggml_type_size(src0->type)/ggml_blck_size(src0->type) * ne0;
  3426. uint64_t y_sz = use_src1 ? ggml_type_size(src1->type) * ne1 : 0;
  3427. uint64_t z_sz = use_src2 ? ggml_type_size(src2->type) * ne2 : 0;
  3428. uint64_t d_sz = ggml_type_size(dst->type) * ned;
  3429. vk_buffer d_D = extra->buffer_gpu.lock();
  3430. // Workaround for tiny tensor inputs on ROPE
  3431. if (use_src1 && y_sz > d_D->size) {
  3432. y_sz = VK_WHOLE_SIZE;
  3433. }
  3434. GGML_ASSERT(d_D != nullptr);
  3435. uint64_t d_buf_offset = ((extra->offset + dst->view_offs) / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  3436. GGML_ASSERT(d_buf_offset == extra->offset || op == GGML_OP_CPY); // NOLINT
  3437. if(!src0_uma) {
  3438. d_X = extra_src0->buffer_gpu.lock();
  3439. x_buf_offset = extra_src0->offset + src0->view_offs;
  3440. GGML_ASSERT(d_X != nullptr);
  3441. }
  3442. if (use_src1 && !src1_uma) {
  3443. d_Y = extra_src1->buffer_gpu.lock();
  3444. y_buf_offset = extra_src1->offset + src1->view_offs;
  3445. GGML_ASSERT(d_Y != nullptr);
  3446. }
  3447. if (use_src2 && !src2_uma) {
  3448. d_Z = extra_src2->buffer_gpu.lock();
  3449. z_buf_offset = extra_src2->offset + src2->view_offs;
  3450. GGML_ASSERT(d_Z != nullptr);
  3451. }
  3452. if (op_supports_incontiguous) {
  3453. x_sz = ggml_nbytes(src0);
  3454. y_sz = use_src1 ? ggml_nbytes(src1) : 0;
  3455. z_sz = use_src2 ? ggml_nbytes(src2) : 0;
  3456. d_sz = ggml_nbytes(dst);
  3457. if (x_buf_offset + x_sz >= d_X->size) {
  3458. x_sz = VK_WHOLE_SIZE;
  3459. }
  3460. if (use_src1 && y_buf_offset + y_sz >= d_Y->size) {
  3461. y_sz = VK_WHOLE_SIZE;
  3462. }
  3463. if (use_src2 && z_buf_offset + z_sz >= d_Z->size) {
  3464. z_sz = VK_WHOLE_SIZE;
  3465. }
  3466. if (d_buf_offset + d_sz >= d_D->size) {
  3467. d_sz = VK_WHOLE_SIZE;
  3468. }
  3469. }
  3470. std::array<uint32_t, 3> elements;
  3471. // Single call if dimension 2 is contiguous
  3472. if (op_supports_incontiguous || (ggml_is_contiguous(src0) && (src1 == nullptr || ggml_is_contiguous(src1)))) {
  3473. ggml_pipeline_allocate_descriptor_sets(ctx, pipeline, 1);
  3474. switch (dst->op) {
  3475. case GGML_OP_NORM:
  3476. case GGML_OP_RMS_NORM:
  3477. case GGML_OP_SOFT_MAX:
  3478. case GGML_OP_SUM_ROWS:
  3479. elements = { (uint32_t)ggml_nrows(src0), 1, 1 };
  3480. break;
  3481. case GGML_OP_DIAG_MASK_INF:
  3482. case GGML_OP_ROPE:
  3483. elements = { (uint32_t)ggml_nrows(src0), (uint32_t)ne00, 1 };
  3484. break;
  3485. case GGML_OP_GET_ROWS:
  3486. elements = { (uint32_t)ne00, (uint32_t)ne10, (uint32_t)(ne11 * ne12) };
  3487. break;
  3488. case GGML_OP_ARGSORT:
  3489. elements = { (uint32_t)ne00, (uint32_t)ggml_nrows(src0), 1 };
  3490. break;
  3491. default:
  3492. elements = { (uint32_t)ggml_nelements(src0), 1, 1 };
  3493. break;
  3494. }
  3495. if (!op_supports_incontiguous) {
  3496. if (x_sz != VK_WHOLE_SIZE) {
  3497. x_sz *= ne02 * ne03;
  3498. }
  3499. if (use_src1 && y_sz != VK_WHOLE_SIZE) {
  3500. y_sz *= ne12 * ne13;
  3501. }
  3502. if (use_src2 && z_sz != VK_WHOLE_SIZE) {
  3503. z_sz *= ne22 * ne23;
  3504. }
  3505. if (d_sz != VK_WHOLE_SIZE) {
  3506. d_sz *= ned2 * ned3;
  3507. }
  3508. }
  3509. if (op == GGML_OP_SOFT_MAX) {
  3510. // Empty src1 is possible in soft_max, but the shader needs a buffer
  3511. vk_subbuffer subbuf_y;
  3512. if (use_src1) {
  3513. subbuf_y = { d_Y, y_buf_offset, y_sz };
  3514. } else {
  3515. subbuf_y = { d_X, 0, d_X->size };
  3516. }
  3517. ggml_vk_sync_buffers(subctx);
  3518. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_X, x_buf_offset, x_sz }, subbuf_y, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3519. } else if (op == GGML_OP_ROPE) {
  3520. // Empty src2 is possible in rope, but the shader needs a buffer
  3521. vk_subbuffer subbuf_z;
  3522. if (use_src2) {
  3523. subbuf_z = { d_Z, z_buf_offset, z_sz };
  3524. } else {
  3525. subbuf_z = { d_X, 0, d_X->size };
  3526. }
  3527. ggml_vk_sync_buffers(subctx);
  3528. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_X, x_buf_offset, x_sz }, { d_Y, y_buf_offset, y_sz }, subbuf_z, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3529. } else if (use_src2) {
  3530. ggml_vk_sync_buffers(subctx);
  3531. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_X, x_buf_offset, x_sz }, { d_Y, y_buf_offset, y_sz }, { d_Z, z_buf_offset, z_sz }, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3532. } else if (use_src1) {
  3533. ggml_vk_sync_buffers(subctx);
  3534. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_X, x_buf_offset, x_sz }, { d_Y, y_buf_offset, y_sz }, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3535. } else {
  3536. ggml_vk_sync_buffers(subctx);
  3537. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_X, x_buf_offset, x_sz }, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3538. }
  3539. } else {
  3540. GGML_ASSERT(op != GGML_OP_SOFT_MAX);
  3541. GGML_ASSERT(op != GGML_OP_ARGSORT);
  3542. GGML_ASSERT(!use_src2);
  3543. ggml_pipeline_allocate_descriptor_sets(ctx, pipeline, ne02 * ne03);
  3544. switch (dst->op) {
  3545. case GGML_OP_NORM:
  3546. case GGML_OP_RMS_NORM:
  3547. elements = { (uint32_t)ne01, 1, 1 };
  3548. break;
  3549. case GGML_OP_DIAG_MASK_INF:
  3550. case GGML_OP_ROPE:
  3551. elements = { (uint32_t)ne01, (uint32_t)ne00, 1 };
  3552. break;
  3553. case GGML_OP_GET_ROWS:
  3554. elements = { (uint32_t)ne00, (uint32_t)ne10, (uint32_t)(ne11 * ne12) };
  3555. break;
  3556. default:
  3557. elements = { (uint32_t)ne0, 1, 1 };
  3558. break;
  3559. }
  3560. for (uint64_t i03 = 0; i03 < ne03; i03++) {
  3561. for (uint64_t i02 = 0; i02 < ne02; i02++) {
  3562. const uint32_t it_idx0 = (i03 * ne02 + i02);
  3563. const uint32_t it_idx1 = use_src1 ? ((i03 % ne13) * ne12 + (i02 % ne12)) : 0;
  3564. const uint32_t x_offset = x_sz * it_idx0;
  3565. const uint32_t y_offset = y_sz * it_idx1;
  3566. const uint32_t d_offset = d_sz * it_idx0;
  3567. if (use_src1) {
  3568. ggml_vk_sync_buffers(subctx);
  3569. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_X, x_buf_offset + x_offset, x_sz }, { d_Y, y_buf_offset + y_offset, y_sz }, { d_D, d_buf_offset + d_offset, d_sz } }, sizeof(PC), &pc, elements);
  3570. } else {
  3571. ggml_vk_sync_buffers(subctx);
  3572. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_X, x_buf_offset + x_offset, x_sz }, { d_D, d_buf_offset + d_offset, d_sz } }, sizeof(PC), &pc, elements);
  3573. }
  3574. }
  3575. }
  3576. }
  3577. }
  3578. static void ggml_vk_repeat(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3579. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_REPEAT, { (uint32_t)ggml_nelements(src0), (uint32_t)ggml_nelements(src1), 0.0f, 0.0f });
  3580. }
  3581. static void ggml_vk_get_rows(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3582. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3583. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3584. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3585. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_GET_ROWS, {
  3586. (uint32_t)ggml_nelements(src0),
  3587. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3588. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3589. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3590. 0,
  3591. 0.0f, 0.0f,
  3592. });
  3593. }
  3594. static void ggml_vk_add(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3595. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3596. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3597. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3598. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_ADD, {
  3599. (uint32_t)ggml_nelements(src0),
  3600. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3601. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3602. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3603. 0,
  3604. 0.0f, 0.0f,
  3605. });
  3606. }
  3607. static void ggml_vk_mul(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3608. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3609. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3610. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3611. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_MUL, {
  3612. (uint32_t)ggml_nelements(src0),
  3613. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3614. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3615. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3616. 0,
  3617. 0.0f, 0.0f,
  3618. });
  3619. }
  3620. static void ggml_vk_div(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3621. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3622. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3623. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3624. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_DIV, {
  3625. (uint32_t)ggml_nelements(src0),
  3626. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3627. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3628. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3629. 0,
  3630. 0.0f, 0.0f,
  3631. });
  3632. }
  3633. static void ggml_vk_scale(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3634. float * op_params = (float *)dst->op_params;
  3635. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3636. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3637. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SCALE, {
  3638. (uint32_t)ggml_nelements(src0),
  3639. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3640. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3641. 0,
  3642. op_params[0], 0.0f
  3643. });
  3644. }
  3645. static void ggml_vk_sqr(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3646. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3647. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3648. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SQR, {
  3649. (uint32_t)ggml_nelements(src0),
  3650. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3651. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3652. 0,
  3653. 0.0f, 0.0f,
  3654. });
  3655. }
  3656. static void ggml_vk_clamp(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3657. float * op_params = (float *)dst->op_params;
  3658. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3659. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3660. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_CLAMP, {
  3661. (uint32_t)ggml_nelements(src0),
  3662. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3663. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3664. 0,
  3665. op_params[0], op_params[1],
  3666. });
  3667. }
  3668. static void ggml_vk_cpy(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3669. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  3670. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3671. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3672. const uint32_t d_offset = ((extra->offset + dst->view_offs) % ctx->device->properties.limits.minStorageBufferOffsetAlignment) / dst_type_size;
  3673. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_CPY, {
  3674. (uint32_t)ggml_nelements(src0),
  3675. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3676. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3677. d_offset,
  3678. 0.0f, 0.0f,
  3679. });
  3680. }
  3681. static void ggml_vk_norm(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3682. float * op_params = (float *)dst->op_params;
  3683. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f });
  3684. }
  3685. static void ggml_vk_rms_norm(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3686. float * op_params = (float *)dst->op_params;
  3687. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_RMS_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f });
  3688. }
  3689. static void ggml_vk_unary(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3690. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_UNARY, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f });
  3691. }
  3692. static void ggml_vk_diag_mask_inf(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3693. int32_t * op_params = (int32_t *)dst->op_params;
  3694. ggml_vk_op_f32<vk_op_diag_mask_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_DIAG_MASK_INF, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0] });
  3695. }
  3696. static void ggml_vk_soft_max(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  3697. float * op_params = (float *)dst->op_params;
  3698. float scale = op_params[0];
  3699. float max_bias = op_params[1];
  3700. const uint32_t ncols = (uint32_t)src0->ne[0];
  3701. const uint32_t nrows_x = (uint32_t)ggml_nrows(src0);
  3702. const uint32_t nrows_y = (uint32_t)src0->ne[1];
  3703. const uint32_t n_head_kv = nrows_x/nrows_y;
  3704. const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
  3705. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  3706. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  3707. ggml_vk_op_f32<vk_op_soft_max_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_SOFT_MAX, {
  3708. ncols,
  3709. src1 != nullptr ? nrows_y : (uint32_t)0,
  3710. scale, max_bias,
  3711. m0, m1,
  3712. n_head_log2,
  3713. });
  3714. }
  3715. static void ggml_vk_rope(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) {
  3716. const int n_dims = ((int32_t *) dst->op_params)[1];
  3717. // const int mode = ((int32_t *) dst->op_params)[2];
  3718. // const int n_ctx = ((int32_t *) dst->op_params)[3];
  3719. const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
  3720. const float freq_base = ((float *) dst->op_params)[5];
  3721. const float freq_scale = ((float *) dst->op_params)[6];
  3722. const float ext_factor = ((float *) dst->op_params)[7];
  3723. const float attn_factor = ((float *) dst->op_params)[8];
  3724. const float beta_fast = ((float *) dst->op_params)[9];
  3725. const float beta_slow = ((float *) dst->op_params)[10];
  3726. float corr_dims[2];
  3727. ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
  3728. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  3729. ggml_vk_op_f32<vk_op_rope_push_constants>(ctx, subctx, src0, src1, src2, dst, GGML_OP_ROPE, {
  3730. (uint32_t)src0->ne[0], (uint32_t)n_dims, freq_scale, (uint32_t)src0->ne[1],
  3731. freq_base, ext_factor, attn_factor, {corr_dims[0], corr_dims[1]}, theta_scale,
  3732. src2 != nullptr,
  3733. });
  3734. }
  3735. static void ggml_vk_argsort(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3736. int32_t * op_params = (int32_t *)dst->op_params;
  3737. uint32_t ncols = src0->ne[0];
  3738. uint32_t ncols_pad = 1;
  3739. while (ncols_pad < ncols) {
  3740. ncols_pad *= 2;
  3741. }
  3742. GGML_ASSERT(ncols_pad <= 1024);
  3743. ggml_vk_op_f32<vk_op_argsort_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_ARGSORT, {
  3744. ncols,
  3745. ncols_pad,
  3746. op_params[0],
  3747. });
  3748. }
  3749. static void ggml_vk_sum_rows(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  3750. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SUM_ROWS, { (uint32_t)src0->ne[0], 0, 0.0f, 0.0f });
  3751. }
  3752. #ifdef GGML_VULKAN_RUN_TESTS
  3753. static void ggml_vk_print_matrix_area(const void * data, ggml_type type, int ne0, int ne1, int i0, int i1, int i2) {
  3754. if (type != GGML_TYPE_F32 && type != GGML_TYPE_F16) {
  3755. return;
  3756. }
  3757. i0 = std::max(i0, 5);
  3758. i1 = std::max(i1, 5);
  3759. i2 = std::max(i2, 0);
  3760. fprintf(stderr, " ");
  3761. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  3762. fprintf(stderr, "%7d ", idx1);
  3763. }
  3764. fprintf(stderr, "\n");
  3765. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  3766. fprintf(stderr, "%7d: ", idx0);
  3767. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  3768. if (idx0 >= 0 && idx0 < ne0 && idx1 >= 0 && idx1 < ne1) {
  3769. float val;
  3770. if (type == GGML_TYPE_F32) {
  3771. val = *((const float *) data + i2*ne1*ne0 + idx1*ne0 + idx0);
  3772. } else if (type == GGML_TYPE_F16) {
  3773. val = ggml_fp16_to_fp32(*((const ggml_fp16_t *) data + i2*ne1*ne0 + idx1*ne0 + idx0));
  3774. } else {
  3775. GGML_ASSERT(false);
  3776. }
  3777. fprintf(stderr, "% 7.2f ", val);
  3778. } else {
  3779. fprintf(stderr, " ");
  3780. }
  3781. }
  3782. fprintf(stderr, "\n");
  3783. }
  3784. }
  3785. template <typename X_TYPE, typename Y_TYPE>
  3786. static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t n, size_t k, size_t batch, size_t num_it, int split_k, int shader_size) {
  3787. VK_LOG_DEBUG("ggml_vk_test_matmul(" << m << ", " << n << ", " << k << ", " << batch << ", " << num_it << ", " << split_k << ", " << shader_size << ")");
  3788. const size_t x_ne = m * k * batch;
  3789. const size_t y_ne = k * n * batch;
  3790. const size_t d_ne = m * n * batch;
  3791. vk_pipeline p;
  3792. std::string shname;
  3793. if (shader_size == 0) {
  3794. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  3795. p = ctx->device->pipeline_matmul_f32->a_s;
  3796. shname = "F32_ALIGNED_S";
  3797. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3798. p = ctx->device->pipeline_matmul_f32_f16->a_s;
  3799. shname = "F32_F16_ALIGNED_S";
  3800. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  3801. p = ctx->device->pipeline_matmul_f16_f32->a_s;
  3802. shname = "F16_F32_ALIGNED_S";
  3803. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3804. p = ctx->device->pipeline_matmul_f16->a_s;
  3805. shname = "F16_ALIGNED_S";
  3806. } else {
  3807. GGML_ASSERT(false);
  3808. }
  3809. } else if (shader_size == 1) {
  3810. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  3811. p = ctx->device->pipeline_matmul_f32->a_m;
  3812. shname = "F32_ALIGNED_M";
  3813. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3814. p = ctx->device->pipeline_matmul_f32_f16->a_m;
  3815. shname = "F32_F16_ALIGNED_M";
  3816. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  3817. p = ctx->device->pipeline_matmul_f16_f32->a_m;
  3818. shname = "F16_F32_ALIGNED_M";
  3819. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3820. p = ctx->device->pipeline_matmul_f16->a_m;
  3821. shname = "F16_ALIGNED_M";
  3822. } else {
  3823. GGML_ASSERT(false);
  3824. }
  3825. } else if (shader_size == 2) {
  3826. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  3827. p = ctx->device->pipeline_matmul_f32->a_l;
  3828. shname = "F32_ALIGNED_L";
  3829. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3830. p = ctx->device->pipeline_matmul_f32_f16->a_l;
  3831. shname = "F32_F16_ALIGNED_L";
  3832. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  3833. p = ctx->device->pipeline_matmul_f16_f32->a_l;
  3834. shname = "F16_F32_ALIGNED_L";
  3835. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3836. p = ctx->device->pipeline_matmul_f16->a_l;
  3837. shname = "F16_ALIGNED_L";
  3838. } else {
  3839. GGML_ASSERT(false);
  3840. }
  3841. } else {
  3842. GGML_ASSERT(0);
  3843. }
  3844. const size_t kpad = ggml_vk_align_size(k, p->align);
  3845. if (k != kpad) {
  3846. if (shader_size == 0) {
  3847. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  3848. p = ctx->device->pipeline_matmul_f32->s;
  3849. shname = "F32_S";
  3850. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3851. p = ctx->device->pipeline_matmul_f32_f16->s;
  3852. shname = "F32_F16_S";
  3853. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  3854. p = ctx->device->pipeline_matmul_f16_f32->s;
  3855. shname = "F16_F32_S";
  3856. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3857. p = ctx->device->pipeline_matmul_f16->s;
  3858. shname = "F16_S";
  3859. }
  3860. } else if (shader_size == 1) {
  3861. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  3862. p = ctx->device->pipeline_matmul_f32->m;
  3863. shname = "F32_M";
  3864. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3865. p = ctx->device->pipeline_matmul_f32_f16->m;
  3866. shname = "F32_F16_M";
  3867. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  3868. p = ctx->device->pipeline_matmul_f16_f32->m;
  3869. shname = "F16_F32_M";
  3870. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3871. p = ctx->device->pipeline_matmul_f16->m;
  3872. shname = "F16_M";
  3873. }
  3874. } else if (shader_size == 2) {
  3875. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  3876. p = ctx->device->pipeline_matmul_f32->l;
  3877. shname = "F32_L";
  3878. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3879. p = ctx->device->pipeline_matmul_f32_f16->l;
  3880. shname = "F32_F16_L";
  3881. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  3882. p = ctx->device->pipeline_matmul_f16_f32->l;
  3883. shname = "F16_F32_L";
  3884. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3885. p = ctx->device->pipeline_matmul_f16->l;
  3886. shname = "F16_L";
  3887. }
  3888. }
  3889. }
  3890. ggml_pipeline_allocate_descriptor_sets(ctx, p, num_it);
  3891. if (split_k > 1) {
  3892. ggml_pipeline_allocate_descriptor_sets(ctx, ctx->device->pipeline_matmul_split_k_reduce, num_it);
  3893. if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) {
  3894. // Resize buffer
  3895. if (ctx->prealloc_split_k != nullptr) {
  3896. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  3897. }
  3898. ctx->prealloc_split_k = ggml_vk_create_buffer_check(ctx, sizeof(float) * d_ne * split_k, vk::MemoryPropertyFlagBits::eDeviceLocal);
  3899. }
  3900. }
  3901. vk_buffer d_X = ggml_vk_create_buffer_check(ctx, sizeof(X_TYPE) * x_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
  3902. vk_buffer d_Y = ggml_vk_create_buffer_check(ctx, sizeof(Y_TYPE) * y_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
  3903. vk_buffer d_D = ggml_vk_create_buffer_check(ctx, sizeof(float) * d_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
  3904. X_TYPE* x = (X_TYPE *) malloc(sizeof(X_TYPE) * x_ne);
  3905. Y_TYPE* y = (Y_TYPE *) malloc(sizeof(Y_TYPE) * y_ne);
  3906. float* d = (float *) malloc(sizeof(float) * d_ne);
  3907. for (size_t i = 0; i < x_ne; i++) {
  3908. if (std::is_same<float, X_TYPE>()) {
  3909. x[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  3910. } else if (std::is_same<ggml_fp16_t, X_TYPE>()) {
  3911. x[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f);
  3912. } else {
  3913. GGML_ASSERT(false);
  3914. }
  3915. }
  3916. for (size_t i = 0; i < y_ne; i++) {
  3917. if (std::is_same<float, Y_TYPE>()) {
  3918. // y[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  3919. y[i] = (i % k == i / k) ? 1.0f : 0.0f;
  3920. } else if (std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3921. // y[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f);
  3922. y[i] = ggml_fp32_to_fp16((i % k == i / k) ? 1.0f : 0.0f);
  3923. } else {
  3924. GGML_ASSERT(false);
  3925. }
  3926. }
  3927. ggml_vk_buffer_write(ctx, d_X, 0, x, sizeof(X_TYPE) * k * m * batch);
  3928. ggml_vk_buffer_write(ctx, d_Y, 0, y, sizeof(Y_TYPE) * k * n * batch);
  3929. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
  3930. for (size_t i = 0; i < num_it; i++) {
  3931. ggml_vk_ctx_begin(ctx, subctx);
  3932. ggml_vk_matmul(
  3933. ctx, subctx, p, ggml_vk_subbuffer(d_X), ggml_vk_subbuffer(d_Y), ggml_vk_subbuffer(d_D), ggml_vk_subbuffer(ctx->prealloc_split_k),
  3934. m, n, k,
  3935. k, k, m, k*m, k*n, m*n,
  3936. split_k, batch, batch, batch, 1, 1
  3937. );
  3938. ggml_vk_ctx_end(subctx);
  3939. }
  3940. auto begin = std::chrono::high_resolution_clock::now();
  3941. ggml_vk_submit(subctx, ctx->fence);
  3942. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_matmul waitForFences");
  3943. ctx->device->device.resetFences({ ctx->fence });
  3944. auto end = std::chrono::high_resolution_clock::now();
  3945. double time = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  3946. // copy dst to host
  3947. ggml_vk_buffer_read(ctx, d_D, 0, d, sizeof(float) * d_ne);
  3948. float * d_chk = (float *) malloc(sizeof(float) * d_ne);
  3949. ggml_init_params iparams = {
  3950. /*.mem_size =*/ 1024*1024*1024,
  3951. /*.mem_buffer =*/ NULL,
  3952. /*.no_alloc =*/ true,
  3953. };
  3954. ggml_context * ggml_ctx = ggml_init(iparams);
  3955. ggml_type src0_type;
  3956. ggml_type src1_type;
  3957. if (std::is_same<float, X_TYPE>()) {
  3958. src0_type = GGML_TYPE_F32;
  3959. } else if (std::is_same<ggml_fp16_t, X_TYPE>()) {
  3960. src0_type = GGML_TYPE_F16;
  3961. } else {
  3962. GGML_ASSERT(false);
  3963. }
  3964. if (std::is_same<float, Y_TYPE>()) {
  3965. src1_type = GGML_TYPE_F32;
  3966. } else if (std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3967. src1_type = GGML_TYPE_F16;
  3968. } else {
  3969. GGML_ASSERT(false);
  3970. }
  3971. ggml_tensor * src0_ggml = ggml_new_tensor_3d(ggml_ctx, src0_type, k, m, batch);
  3972. ggml_tensor * src1_ggml = ggml_new_tensor_3d(ggml_ctx, src1_type, k, n, batch);
  3973. ggml_tensor * tensor_ggml = ggml_mul_mat(ggml_ctx, src0_ggml, src1_ggml);
  3974. src0_ggml->data = x;
  3975. src1_ggml->data = y;
  3976. tensor_ggml->data = d_chk;
  3977. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  3978. ggml_build_forward_expand(cgraph, tensor_ggml);
  3979. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 1);
  3980. ggml_free(ggml_ctx);
  3981. double avg_err = 0.0;
  3982. int first_err_n = -1;
  3983. int first_err_m = -1;
  3984. int first_err_b = -1;
  3985. for (size_t i = 0; i < m*n*batch; i++) {
  3986. double err = std::fabs(d[i] - d_chk[i]);
  3987. avg_err += err;
  3988. if (err > 0.05f && first_err_n == -1) {
  3989. first_err_b = i / (m * n);
  3990. first_err_n = (i % (m * n)) / m;
  3991. first_err_m = (i % (m * n)) % m;
  3992. }
  3993. }
  3994. avg_err /= m * n;
  3995. std::cerr << "TEST " << shname << " m=" << m << " n=" << n << " k=" << k << " batch=" << batch << " split_k=" << split_k << " matmul " << time / num_it << "ms avg_err=" << avg_err << std::endl;
  3996. if (avg_err > 0.1) {
  3997. std::cerr << "m = " << first_err_m << " n = " << first_err_n << " b = " << first_err_b << std::endl;
  3998. std::cerr << "Actual result: " << std::endl << std::endl;
  3999. ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4000. std::cerr << std::endl;
  4001. ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n + 15, first_err_b);
  4002. std::cerr << "Expected result: " << std::endl << std::endl;
  4003. ggml_vk_print_matrix_area(d_chk, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4004. if (split_k > 1) {
  4005. float * split_k_buf = (float *) malloc(sizeof(float) * d_ne * split_k);
  4006. ggml_vk_buffer_read(ctx, ctx->prealloc_split_k, 0, split_k_buf, sizeof(float) * d_ne * split_k);
  4007. std::cerr << "d_buf0: " << std::endl << std::endl;
  4008. ggml_vk_print_matrix_area(split_k_buf, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4009. std::cerr << "d_buf1: " << std::endl << std::endl;
  4010. ggml_vk_print_matrix_area(split_k_buf + d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4011. std::cerr << "d_buf2: " << std::endl << std::endl;
  4012. ggml_vk_print_matrix_area(split_k_buf + 2 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4013. std::cerr << "d_buf3: " << std::endl << std::endl;
  4014. ggml_vk_print_matrix_area(split_k_buf + 3 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4015. free(split_k_buf);
  4016. }
  4017. }
  4018. free(d_chk);
  4019. ggml_vk_queue_cleanup(ctx, ctx->device->transfer_queue);
  4020. ggml_vk_queue_cleanup(ctx, ctx->device->compute_queue);
  4021. ggml_vk_destroy_buffer(d_X);
  4022. ggml_vk_destroy_buffer(d_Y);
  4023. ggml_vk_destroy_buffer(d_D);
  4024. ggml_pipeline_cleanup(p);
  4025. ggml_pipeline_cleanup(ctx->device->pipeline_matmul_split_k_reduce);
  4026. free(x);
  4027. free(y);
  4028. free(d);
  4029. }
  4030. static void ggml_vk_print_tensor_area(const ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  4031. if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16) {
  4032. return;
  4033. }
  4034. i0 = std::max(i0, 5);
  4035. i1 = std::max(i1, 5);
  4036. i2 = std::max(i2, 0);
  4037. i3 = std::max(i3, 0);
  4038. fprintf(stderr, " ");
  4039. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  4040. fprintf(stderr, "%7d ", idx1);
  4041. }
  4042. fprintf(stderr, "\n");
  4043. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  4044. fprintf(stderr, "%7d: ", idx0);
  4045. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  4046. if (idx0 >= 0 && idx0 < tensor->ne[0] && idx1 >= 0 && idx1 < tensor->ne[1] && i2 >= 0 && i2 < tensor->ne[2] && i3 >= 0 && i3 < tensor->ne[3]) {
  4047. float val;
  4048. if (tensor->type == GGML_TYPE_F32) {
  4049. val = *(float *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  4050. } else if (tensor->type == GGML_TYPE_F16) {
  4051. val = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]));
  4052. } else {
  4053. GGML_ASSERT(false);
  4054. }
  4055. fprintf(stderr, "% 7.2f ", val);
  4056. } else {
  4057. fprintf(stderr, " ");
  4058. }
  4059. }
  4060. fprintf(stderr, "\n");
  4061. }
  4062. }
  4063. static void ggml_vk_test_h2d_nc(ggml_backend_vk_context * ctx, size_t ne0, size_t ne1, size_t ne2, size_t ne3) {
  4064. const size_t ne = ne0 * ne1 * ne2 * ne3;
  4065. ggml_init_params iparams = {
  4066. /*.mem_size =*/ 1024*1024*1024,
  4067. /*.mem_buffer =*/ NULL,
  4068. /*.no_alloc =*/ true,
  4069. };
  4070. ggml_context * ggml_ctx = ggml_init(iparams);
  4071. ggml_tensor * tensor = ggml_new_tensor_4d(ggml_ctx, GGML_TYPE_F32, ne0, ne2, ne1, ne3); // NOLINT
  4072. ggml_tensor * result_tensor = ggml_new_tensor_4d(ggml_ctx, GGML_TYPE_F32, ne0, ne1, ne2, ne3);
  4073. float * data = (float *) ggml_vk_host_malloc(ctx, ggml_nbytes(tensor));
  4074. tensor->data = data;
  4075. float * result_data = (float *) malloc(ggml_nbytes(tensor));
  4076. result_tensor->data = result_data;
  4077. // Permute
  4078. {
  4079. size_t tmp = tensor->nb[2];
  4080. tensor->nb[2] = tensor->nb[1];
  4081. tensor->nb[1] = tmp;
  4082. tensor->ne[2] = ne2;
  4083. tensor->ne[1] = ne1;
  4084. }
  4085. for (size_t i = 0; i < ne; i++) {
  4086. data[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  4087. }
  4088. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
  4089. ggml_vk_ctx_begin(ctx, subctx);
  4090. vk_buffer buffer = ggml_vk_create_buffer_check(ctx, ggml_nbytes(tensor), vk::MemoryPropertyFlagBits::eDeviceLocal);
  4091. ggml_vk_h2d_tensor_2d(ctx, subctx, buffer, 0, tensor, 0, 0, ggml_nrows(tensor));
  4092. ggml_vk_ctx_end(subctx);
  4093. ggml_vk_submit(subctx, ctx->fence);
  4094. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_h2d_nc waitForFences");
  4095. ctx->device->device.resetFences({ ctx->fence });
  4096. ggml_vk_buffer_read(ctx, buffer, 0, result_data, ggml_nbytes(tensor));
  4097. double avg_err = 0.0;
  4098. int first_err_i0 = -1;
  4099. int first_err_i1 = -1;
  4100. int first_err_i2 = -1;
  4101. int first_err_i3 = -1;
  4102. for (size_t i3 = 0; i3 < ne3; i3++) {
  4103. for (size_t i2 = 0; i2 < ne2; i2++) {
  4104. for (size_t i1 = 0; i1 < ne1; i1++) {
  4105. for (size_t i0 = 0; i0 < ne0; i0++) {
  4106. float correct = *(float *) ((char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  4107. float result = *(float *) ((char *) result_data + i3*ne2*ne1*ne0*sizeof(float) + i2*ne1*ne0*sizeof(float) + i1*ne0*sizeof(float) + i0*sizeof(float));
  4108. double err = std::fabs(result - correct);
  4109. avg_err += err;
  4110. if (err > 0.05f && first_err_i0 == -1) {
  4111. first_err_i0 = i0;
  4112. first_err_i1 = i1;
  4113. first_err_i2 = i2;
  4114. first_err_i3 = i3;
  4115. }
  4116. }
  4117. }
  4118. }
  4119. }
  4120. avg_err /= ne;
  4121. std::cerr << "TEST nc copy ne0=" << ne0 << " ne1=" << ne1 << " ne2=" << ne2 << " ne3=" << ne3 << " avg_err=" << avg_err << std::endl;
  4122. if (avg_err > 0.1) {
  4123. std::cerr << "i0 = " << first_err_i0 << " i1 = " << first_err_i1 << " i2 = " << first_err_i2 << " i3 = " << first_err_i3 << std::endl;
  4124. std::cerr << "Actual result: " << std::endl << std::endl;
  4125. ggml_vk_print_tensor_area(result_tensor, first_err_i0, first_err_i1, first_err_i2, first_err_i3);
  4126. std::cerr << "Expected result: " << std::endl << std::endl;
  4127. ggml_vk_print_tensor_area(tensor, first_err_i0, first_err_i1, first_err_i2, first_err_i3);
  4128. }
  4129. ggml_free(ggml_ctx);
  4130. ggml_vk_destroy_buffer(buffer);
  4131. ggml_vk_host_free(ctx, data);
  4132. free(result_data);
  4133. }
  4134. static void ggml_vk_test_transfer(ggml_backend_vk_context * ctx, size_t ne, bool pinned) {
  4135. VK_LOG_DEBUG("ggml_vk_test_transfer(" << ne << ")");
  4136. // Check transfers are correct
  4137. vk_buffer buffer = ggml_vk_create_buffer_check(ctx, sizeof(float) * ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4138. float * x;
  4139. float * y;
  4140. if (pinned) {
  4141. x = (float *) ggml_vk_host_malloc(ctx, sizeof(float) * ne);
  4142. y = (float *) ggml_vk_host_malloc(ctx, sizeof(float) * ne);
  4143. } else {
  4144. x = (float *) malloc(sizeof(float) * ne);
  4145. y = (float *) malloc(sizeof(float) * ne);
  4146. }
  4147. for (size_t i = 0; i < ne; i++) {
  4148. x[i] = rand() / (float)RAND_MAX;
  4149. }
  4150. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
  4151. ggml_vk_ctx_begin(ctx, subctx);
  4152. auto begin = std::chrono::high_resolution_clock::now();
  4153. ggml_vk_buffer_write_async(ctx, subctx, buffer, 0, x, sizeof(float) * ne);
  4154. for (auto& cpy : subctx->in_memcpys) {
  4155. memcpy(cpy.dst, cpy.src, cpy.n);
  4156. }
  4157. subctx->in_memcpys.clear();
  4158. ggml_vk_ctx_end(subctx);
  4159. ggml_vk_submit(subctx, ctx->fence);
  4160. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_transfer waitForFences");
  4161. ctx->device->device.resetFences({ ctx->fence });
  4162. auto end = std::chrono::high_resolution_clock::now();
  4163. double ms_to_gpu = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  4164. ggml_vk_ctx_begin(ctx, subctx);
  4165. begin = std::chrono::high_resolution_clock::now();
  4166. ggml_vk_buffer_read_async(ctx, subctx, buffer, 0, y, sizeof(float) * ne);
  4167. ggml_vk_ctx_end(subctx);
  4168. ggml_vk_submit(subctx, ctx->fence);
  4169. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_transfer waitForFences");
  4170. ctx->device->device.resetFences({ ctx->fence });
  4171. for (auto& cpy : subctx->out_memcpys) {
  4172. memcpy(cpy.dst, cpy.src, cpy.n);
  4173. }
  4174. subctx->out_memcpys.clear();
  4175. end = std::chrono::high_resolution_clock::now();
  4176. double ms_from_gpu = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  4177. double avg_err = 0.0;
  4178. for (size_t i = 0; i < ne; i++) {
  4179. avg_err += std::fabs(x[i] - y[i]);
  4180. }
  4181. double kb = ne * sizeof(float) / 1024.0;
  4182. std::cerr << "TEST TRANSFER " << kb << " KB to_gpu " << ms_to_gpu << "ms (" << kb / ms_to_gpu * 1000.0 / 1024.0 << " MB/s) from_gpu " << ms_from_gpu << "ms (" << kb / ms_from_gpu * 1000.0 / 1024.0 << " MB/s) avg_err=" << avg_err / ne << std::endl;
  4183. ggml_vk_destroy_buffer(buffer);
  4184. if (pinned) {
  4185. ggml_vk_host_free(ctx, x);
  4186. ggml_vk_host_free(ctx, y);
  4187. } else {
  4188. free(x);
  4189. free(y);
  4190. }
  4191. }
  4192. static void ggml_vk_quantize_data(const float * from, void * to, size_t ne, ggml_type quant) {
  4193. ggml_quantize_chunk(quant, from, to, 0, 1, ne, nullptr);
  4194. }
  4195. static void ggml_vk_test_dequant(ggml_backend_vk_context * ctx, size_t ne, ggml_type quant) {
  4196. VK_LOG_DEBUG("ggml_vk_test_dequant(" << ne << ")");
  4197. const size_t x_sz = sizeof(float) * ne;
  4198. const size_t x_sz_f16 = sizeof(ggml_fp16_t) * ne;
  4199. const size_t qx_sz = ne * ggml_type_size(quant)/ggml_blck_size(quant);
  4200. float * x = (float *) malloc(x_sz);
  4201. void * qx = malloc(qx_sz);
  4202. vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx, qx_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4203. vk_buffer x_buf = ggml_vk_create_buffer_check(ctx, x_sz_f16, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4204. ggml_fp16_t * x_chk = (ggml_fp16_t *) malloc(x_sz_f16);
  4205. for (size_t i = 0; i < ne; i++) {
  4206. x[i] = rand() / (float)RAND_MAX;
  4207. }
  4208. vk_pipeline p = ctx->device->pipeline_dequant[quant];
  4209. ggml_vk_quantize_data(x, qx, ne, quant);
  4210. ggml_pipeline_allocate_descriptor_sets(ctx, p, 1);
  4211. ggml_vk_buffer_write(ctx, qx_buf, 0, qx, qx_sz);
  4212. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
  4213. ggml_vk_ctx_begin(ctx, subctx);
  4214. const std::vector<uint32_t> pc = { 1, (uint32_t)ne, (uint32_t)ne, (uint32_t)ne, (uint32_t)ne };
  4215. ggml_vk_dispatch_pipeline(ctx, subctx, p, { { qx_buf, 0, qx_sz }, { x_buf, 0, x_sz_f16 } }, pc.size() * sizeof(int), pc.data(), { (uint32_t)ne, 1, 1});
  4216. ggml_vk_ctx_end(subctx);
  4217. auto begin = std::chrono::high_resolution_clock::now();
  4218. ggml_vk_submit(subctx, ctx->fence);
  4219. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences");
  4220. ctx->device->device.resetFences({ ctx->fence });
  4221. auto end = std::chrono::high_resolution_clock::now();
  4222. double ms_dequant = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  4223. ggml_vk_buffer_read(ctx, x_buf, 0, x_chk, x_sz_f16);
  4224. int first_err = -1;
  4225. double avg_err = 0.0;
  4226. for (size_t i = 0; i < ne; i++) {
  4227. double error = std::fabs(x[i] - ggml_fp16_to_fp32(x_chk[i]));
  4228. avg_err += error;
  4229. if (first_err < 0 && error > 0.05) {
  4230. first_err = i;
  4231. }
  4232. }
  4233. avg_err /= ne;
  4234. std::cerr << "TEST DEQUANT " << ggml_type_name(quant) << " time=" << ms_dequant << "ms avg_err=" << avg_err << std::endl;
  4235. if (avg_err > 0.1) {
  4236. std::cerr << "first_error = " << first_err << std::endl;
  4237. std::cerr << "Actual result: " << std::endl << std::endl;
  4238. for (int i = std::max(0, first_err - 5); i < std::min((int)ne, first_err + 5); i++) {
  4239. std::cerr << ggml_fp16_to_fp32(x_chk[i]) << ", ";
  4240. }
  4241. std::cerr << std::endl << "Expected result: " << std::endl << std::endl;
  4242. for (int i = std::max(0, first_err - 5); i < std::min((int)ne, first_err + 5); i++) {
  4243. std::cerr << x[i] << ", ";
  4244. }
  4245. std::cerr << std::endl;
  4246. }
  4247. ggml_vk_destroy_buffer(x_buf);
  4248. ggml_vk_destroy_buffer(qx_buf);
  4249. free(x);
  4250. free(qx);
  4251. free(x_chk);
  4252. }
  4253. static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, size_t n, size_t k, size_t batch, size_t num_it, size_t split_k, size_t shader_size, ggml_type quant) {
  4254. VK_LOG_DEBUG("ggml_vk_test_dequant_matmul(" << m << ", " << n << ", " << k << ", " << batch << ", " << num_it << ", " << split_k << ", " << ggml_type_name(quant) << ")");
  4255. const size_t x_ne = m * k * batch;
  4256. const size_t y_ne = k * n * batch;
  4257. const size_t d_ne = m * n * batch;
  4258. vk_pipeline p;
  4259. std::string shname;
  4260. if (shader_size == 0) {
  4261. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->a_s;
  4262. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_S";
  4263. } else if (shader_size == 1) {
  4264. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->a_m;
  4265. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_M";
  4266. } else if (shader_size == 2) {
  4267. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->a_l;
  4268. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_L";
  4269. } else {
  4270. GGML_ASSERT(0);
  4271. }
  4272. const size_t kpad = ggml_vk_align_size(k, p->align);
  4273. if (k != kpad) {
  4274. if (shader_size == 0) {
  4275. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->s;
  4276. shname = std::string(ggml_type_name(quant)) + "_S";
  4277. } else if (shader_size == 1) {
  4278. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->m;
  4279. shname = std::string(ggml_type_name(quant)) + "_M";
  4280. } else if (shader_size == 2) {
  4281. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->l;
  4282. shname = std::string(ggml_type_name(quant)) + "_L";
  4283. } else {
  4284. GGML_ASSERT(0);
  4285. }
  4286. }
  4287. const size_t x_sz = sizeof(float) * x_ne;
  4288. const size_t y_sz = sizeof(float) * y_ne;
  4289. const size_t qx_sz = x_ne * ggml_type_size(quant)/ggml_blck_size(quant);
  4290. const size_t d_sz = sizeof(float) * d_ne;
  4291. float * x = (float *) malloc(x_sz);
  4292. float * y = (float *) malloc(y_sz);
  4293. void * qx = malloc(qx_sz);
  4294. vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx, qx_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4295. vk_buffer y_buf = ggml_vk_create_buffer_check(ctx, y_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4296. vk_buffer d_buf = ggml_vk_create_buffer_check(ctx, d_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4297. float * d = (float *) malloc(d_sz);
  4298. float * d_chk = (float *) malloc(d_sz);
  4299. for (size_t i = 0; i < x_ne; i++) {
  4300. x[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  4301. }
  4302. ggml_vk_quantize_data(x, qx, x_ne, quant);
  4303. for (size_t i = 0; i < y_ne; i++) {
  4304. // y[i] = rand() / (float)RAND_MAX;
  4305. y[i] = (i % k == i / k) ? 1.0f : 0.0f;
  4306. }
  4307. ggml_pipeline_allocate_descriptor_sets(ctx, p, num_it);
  4308. if (split_k > 1) {
  4309. ggml_pipeline_allocate_descriptor_sets(ctx, ctx->device->pipeline_matmul_split_k_reduce, num_it);
  4310. if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) {
  4311. // Resize buffer
  4312. if (ctx->prealloc_split_k != nullptr) {
  4313. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  4314. }
  4315. ctx->prealloc_split_k = ggml_vk_create_buffer_check(ctx, sizeof(float) * d_ne * split_k, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4316. }
  4317. }
  4318. ggml_vk_buffer_write(ctx, qx_buf, 0, qx, qx_sz);
  4319. ggml_vk_buffer_write(ctx, y_buf, 0, y, y_sz);
  4320. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
  4321. for (size_t i = 0; i < num_it; i++) {
  4322. ggml_vk_ctx_begin(ctx, subctx);
  4323. ggml_vk_matmul(
  4324. ctx, subctx, p, ggml_vk_subbuffer(qx_buf), ggml_vk_subbuffer(y_buf), ggml_vk_subbuffer(d_buf), ggml_vk_subbuffer(ctx->prealloc_split_k),
  4325. m, n, k,
  4326. k, k, m, k*m, k*n, m*n,
  4327. split_k, batch, batch, batch, 1, 1
  4328. );
  4329. ggml_vk_ctx_end(subctx);
  4330. }
  4331. auto begin = std::chrono::high_resolution_clock::now();
  4332. ggml_vk_submit(subctx, ctx->fence);
  4333. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences");
  4334. ctx->device->device.resetFences({ ctx->fence });
  4335. auto end = std::chrono::high_resolution_clock::now();
  4336. double time_ms = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  4337. ggml_vk_buffer_read(ctx, d_buf, 0, d, d_sz);
  4338. ggml_init_params iparams = {
  4339. /*.mem_size =*/ 1024*1024*1024,
  4340. /*.mem_buffer =*/ NULL,
  4341. /*.no_alloc =*/ true,
  4342. };
  4343. ggml_context * ggml_ctx = ggml_init(iparams);
  4344. ggml_tensor * src0_ggml = ggml_new_tensor_3d(ggml_ctx, quant, k, m, batch);
  4345. ggml_tensor * src1_ggml = ggml_new_tensor_3d(ggml_ctx, GGML_TYPE_F32, k, n, batch);
  4346. ggml_tensor * tensor_ggml = ggml_mul_mat(ggml_ctx, src0_ggml, src1_ggml);
  4347. src0_ggml->data = qx;
  4348. src1_ggml->data = y;
  4349. tensor_ggml->data = d_chk;
  4350. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  4351. ggml_build_forward_expand(cgraph, tensor_ggml);
  4352. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 1);
  4353. ggml_free(ggml_ctx);
  4354. double avg_err = 0.0;
  4355. int first_err_n = -1;
  4356. int first_err_m = -1;
  4357. int first_err_b = -1;
  4358. for (size_t i = 0; i < m*n*batch; i++) {
  4359. double err = std::fabs(d[i] - d_chk[i]);
  4360. avg_err += err;
  4361. if ((err > 0.05f || std::isnan(err)) && first_err_n == -1) {
  4362. first_err_b = i / (m * n);
  4363. first_err_n = (i % (m * n)) / m;
  4364. first_err_m = (i % (m * n)) % m;
  4365. }
  4366. }
  4367. avg_err /= m * n;
  4368. std::cerr << "TEST MMQ " << shname << " m=" << m << " n=" << n << " k=" << k << " batch=" << batch << " split_k=" << split_k << " matmul " << time_ms / num_it << "ms avg_err=" << avg_err << std::endl;
  4369. if (avg_err > 0.01 || std::isnan(avg_err)) {
  4370. std::cerr << "m = " << first_err_m << " n = " << first_err_n << " b = " << first_err_b << std::endl;
  4371. std::cerr << "Actual result: " << std::endl << std::endl;
  4372. ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4373. std::cerr << std::endl;
  4374. std::cerr << "Expected result: " << std::endl << std::endl;
  4375. ggml_vk_print_matrix_area(d_chk, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4376. if (split_k > 1) {
  4377. float * split_k_buf = (float *) malloc(sizeof(float) * d_ne * split_k);
  4378. ggml_vk_buffer_read(ctx, ctx->prealloc_split_k, 0, split_k_buf, sizeof(float) * d_ne * split_k);
  4379. std::cerr << "d_buf0: " << std::endl << std::endl;
  4380. ggml_vk_print_matrix_area(split_k_buf, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4381. std::cerr << "d_buf1: " << std::endl << std::endl;
  4382. ggml_vk_print_matrix_area(split_k_buf + d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4383. std::cerr << "d_buf2: " << std::endl << std::endl;
  4384. ggml_vk_print_matrix_area(split_k_buf + 2 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4385. std::cerr << "d_buf3: " << std::endl << std::endl;
  4386. ggml_vk_print_matrix_area(split_k_buf + 3 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4387. free(split_k_buf);
  4388. }
  4389. }
  4390. ggml_vk_destroy_buffer(qx_buf);
  4391. ggml_vk_destroy_buffer(y_buf);
  4392. ggml_vk_destroy_buffer(d_buf);
  4393. free(x);
  4394. free(qx);
  4395. free(y);
  4396. free(d);
  4397. free(d_chk);
  4398. }
  4399. #endif
  4400. static ggml_tensor_extra_gpu * ggml_vk_tensor_create_extra(ggml_tensor * tensor) {
  4401. VK_LOG_DEBUG("ggml_vk_create_extra(" << tensor << " (" << tensor->name << ", " << ggml_op_name(tensor->op) << "))");
  4402. ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu;
  4403. extra->reset();
  4404. tensor->extra = extra;
  4405. return extra;
  4406. }
  4407. static void ggml_vk_preallocate_buffers_graph(ggml_backend_vk_context * ctx, ggml_tensor * node){
  4408. VK_LOG_DEBUG("ggml_vk_preallocate_buffers_graph(" << node << ")");
  4409. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) node->extra;
  4410. if (extra == nullptr) {
  4411. return;
  4412. }
  4413. ggml_tensor * src0 = node->src[0];
  4414. ggml_tensor * src1 = node->src[1];
  4415. const bool use_src0 = src0 != nullptr;
  4416. const int64_t ne00 = use_src0 ? src0->ne[0] : 0;
  4417. const int64_t ne01 = use_src0 ? src0->ne[1] : 0;
  4418. const int64_t ne02 = use_src0 ? src0->ne[2] : 0;
  4419. const int64_t ne03 = use_src0 ? src0->ne[3] : 0;
  4420. const bool use_src1 = src1 != nullptr && node->op != GGML_OP_CPY && node->op != GGML_OP_CONT && node->op != GGML_OP_DUP;
  4421. const int64_t ne10 = use_src1 ? src1->ne[0] : 0;
  4422. const int64_t ne11 = use_src1 ? src1->ne[1] : 0;
  4423. const int64_t ne12 = use_src1 ? src1->ne[2] : 0;
  4424. const int64_t ne13 = use_src1 ? src1->ne[3] : 0;
  4425. const int64_t ne20 = node->ne[0];
  4426. const int64_t ne21 = node->ne[1];
  4427. const int64_t ne22 = node->ne[2];
  4428. const int64_t ne23 = node->ne[3];
  4429. const ggml_type src0_type = (use_src0 && src0->type == GGML_TYPE_F32) ? src0->type : GGML_TYPE_F16;
  4430. const ggml_type src1_type = (use_src1 && src1->type == GGML_TYPE_F32) ? src1->type : GGML_TYPE_F16;
  4431. const bool x_non_contig = use_src0 && !ggml_vk_dim01_contiguous(src0);
  4432. const bool y_non_contig = use_src1 && !ggml_vk_dim01_contiguous(src1);
  4433. const bool y_f32_kernel = use_src1 && src1->type == GGML_TYPE_F32 && !y_non_contig;
  4434. bool mmp = (use_src0 && use_src1 && src1_type == GGML_TYPE_F32) ? ggml_vk_get_mul_mat_mat_pipeline(ctx, src0_type, y_non_contig ? GGML_TYPE_F16 : src1->type) != nullptr : false;
  4435. const bool qx_needs_dequant = use_src0 && (!mmp || x_non_contig);
  4436. const bool qy_needs_dequant = use_src1 && ((src1->type != GGML_TYPE_F16 && !y_f32_kernel) || y_non_contig);
  4437. int split_k;
  4438. if (node->op == GGML_OP_MUL_MAT || node->op == GGML_OP_MUL_MAT_ID) {
  4439. split_k = ggml_vk_guess_split_k(ne01, ne11, ne10);
  4440. } else {
  4441. split_k = 1;
  4442. }
  4443. const uint32_t x_ne = ne00 * ne01;
  4444. const uint32_t y_ne = ne10 * ne11;
  4445. const uint32_t d_ne = ne20 * ne21;
  4446. const uint64_t x_sz = (use_src0 && qx_needs_dequant) ? ggml_vk_align_size(sizeof(src0_type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ne02 * ne03 : 0;
  4447. const uint64_t y_sz = (use_src1 && qy_needs_dequant) ? ggml_vk_align_size(sizeof(src1_type) * y_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ne12 * ne13 : 0;
  4448. uint64_t d_sz = ggml_vk_align_size(ggml_type_size(node->type) * d_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ne22 * ne23;
  4449. const uint64_t split_k_size = split_k > 1 ? d_sz * 4 : 0;
  4450. if (extra->buffer_gpu.expired()) {
  4451. // Workaround for CPU backend BLAS matmul calls
  4452. extra->buffer_gpu = ggml_vk_create_buffer_temp(ctx, d_sz);
  4453. }
  4454. switch (node->op) {
  4455. case GGML_OP_REPEAT:
  4456. case GGML_OP_GET_ROWS:
  4457. case GGML_OP_RESHAPE:
  4458. case GGML_OP_VIEW:
  4459. case GGML_OP_PERMUTE:
  4460. case GGML_OP_TRANSPOSE:
  4461. case GGML_OP_ADD:
  4462. case GGML_OP_SCALE:
  4463. case GGML_OP_SQR:
  4464. case GGML_OP_CLAMP:
  4465. case GGML_OP_CPY:
  4466. case GGML_OP_CONT:
  4467. case GGML_OP_DUP:
  4468. case GGML_OP_MUL:
  4469. case GGML_OP_DIV:
  4470. case GGML_OP_NORM:
  4471. case GGML_OP_RMS_NORM:
  4472. case GGML_OP_DIAG_MASK_INF:
  4473. case GGML_OP_SOFT_MAX:
  4474. case GGML_OP_ROPE:
  4475. case GGML_OP_ARGSORT:
  4476. case GGML_OP_SUM_ROWS:
  4477. break;
  4478. case GGML_OP_UNARY:
  4479. switch (ggml_get_unary_op(node)) {
  4480. case GGML_UNARY_OP_SILU:
  4481. case GGML_UNARY_OP_GELU:
  4482. case GGML_UNARY_OP_RELU:
  4483. break;
  4484. default:
  4485. return;
  4486. }
  4487. break;
  4488. case GGML_OP_MUL_MAT:
  4489. case GGML_OP_MUL_MAT_ID:
  4490. if (ctx->prealloc_size_x < x_sz) {
  4491. ctx->prealloc_size_x = x_sz;
  4492. }
  4493. if (ctx->prealloc_size_y < y_sz) {
  4494. ctx->prealloc_size_y = y_sz;
  4495. }
  4496. if (ctx->prealloc_size_split_k < split_k_size) {
  4497. ctx->prealloc_size_split_k = split_k_size;
  4498. }
  4499. if (ctx->staging_size < x_sz + y_sz) {
  4500. ctx->staging_size = x_sz + y_sz;
  4501. }
  4502. break;
  4503. default:
  4504. return;
  4505. }
  4506. }
  4507. static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx) {
  4508. #if defined(GGML_VULKAN_RUN_TESTS)
  4509. ctx->staging = ggml_vk_create_buffer_check(ctx, 100ul * 1024ul * 1024ul,
  4510. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  4511. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  4512. ggml_vk_test_transfer(ctx, 8192 * 1000, false);
  4513. ggml_vk_test_transfer(ctx, 8192 * 1000, true);
  4514. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_F32);
  4515. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q4_0);
  4516. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q4_1);
  4517. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q5_0);
  4518. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q5_1);
  4519. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q8_0);
  4520. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q2_K);
  4521. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q3_K);
  4522. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q4_K);
  4523. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q5_K);
  4524. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q6_K);
  4525. ggml_vk_test_matmul<ggml_fp16_t, ggml_fp16_t>(ctx, 512, 512, 100, 32, 100, 1, 2);
  4526. ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 1, 0);
  4527. ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 1, 1);
  4528. ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 1, 2);
  4529. ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 4, 0);
  4530. ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 4, 1);
  4531. ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 4, 2);
  4532. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q4_0);
  4533. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q4_0);
  4534. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q4_0);
  4535. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q4_0);
  4536. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q4_0);
  4537. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q4_0);
  4538. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q4_1);
  4539. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q4_1);
  4540. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q4_1);
  4541. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q4_1);
  4542. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q4_1);
  4543. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q4_1);
  4544. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q5_0);
  4545. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q5_0);
  4546. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q5_0);
  4547. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q5_0);
  4548. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q5_0);
  4549. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q5_0);
  4550. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q5_1);
  4551. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q5_1);
  4552. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q5_1);
  4553. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q5_1);
  4554. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q5_1);
  4555. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q5_1);
  4556. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q8_0);
  4557. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q8_0);
  4558. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q8_0);
  4559. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q8_0);
  4560. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q8_0);
  4561. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q8_0);
  4562. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q2_K);
  4563. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q2_K);
  4564. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q2_K);
  4565. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q2_K);
  4566. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q2_K);
  4567. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q2_K);
  4568. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q3_K);
  4569. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q3_K);
  4570. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q3_K);
  4571. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q3_K);
  4572. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q3_K);
  4573. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q3_K);
  4574. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q4_K);
  4575. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q4_K);
  4576. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q4_K);
  4577. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q4_K);
  4578. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q4_K);
  4579. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q4_K);
  4580. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q5_K);
  4581. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q5_K);
  4582. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q5_K);
  4583. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q5_K);
  4584. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q5_K);
  4585. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q5_K);
  4586. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q6_K);
  4587. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q6_K);
  4588. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q6_K);
  4589. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q6_K);
  4590. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q6_K);
  4591. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q6_K);
  4592. std::cerr << std::endl;
  4593. const std::vector<size_t> vals {
  4594. 8, 8, 8,
  4595. 100, 46, 576,
  4596. 623, 111, 128,
  4597. 100, 46, 558,
  4598. 512, 1, 256,
  4599. 128, 110, 622,
  4600. 511, 511, 127,
  4601. 511, 511, 7,
  4602. 511, 511, 17,
  4603. 49, 49, 128,
  4604. 128, 49, 49,
  4605. 4096, 49, 4096,
  4606. 11008, 49, 4096,
  4607. 4096, 49, 11008,
  4608. 32000, 49, 4096,
  4609. 512, 512, 128,
  4610. 128, 512, 512,
  4611. 4096, 512, 4096,
  4612. 11008, 512, 4096,
  4613. 4096, 512, 11008,
  4614. 32000, 512, 4096,
  4615. };
  4616. const size_t num_it = 1;
  4617. for (size_t i = 0; i < vals.size(); i += 3) {
  4618. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0);
  4619. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1);
  4620. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2);
  4621. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0);
  4622. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1);
  4623. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2);
  4624. std::cerr << std::endl;
  4625. }
  4626. GGML_ASSERT(false);
  4627. #endif
  4628. if (ctx->prealloc_x == nullptr || (ctx->prealloc_size_x > 0 && ctx->prealloc_x->size < ctx->prealloc_size_x)) {
  4629. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(x_size: " << ctx->prealloc_size_x << ")");
  4630. // Resize buffer
  4631. if (ctx->prealloc_x != nullptr) {
  4632. ggml_vk_destroy_buffer(ctx->prealloc_x);
  4633. }
  4634. ctx->prealloc_x = ggml_vk_create_buffer_device(ctx, ctx->prealloc_size_x);
  4635. }
  4636. if (ctx->prealloc_y == nullptr || (ctx->prealloc_size_y > 0 && ctx->prealloc_y->size < ctx->prealloc_size_y)) {
  4637. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(y_size: " << ctx->prealloc_size_y << ")");
  4638. // Resize buffer
  4639. if (ctx->prealloc_y != nullptr) {
  4640. ggml_vk_destroy_buffer(ctx->prealloc_y);
  4641. }
  4642. ctx->prealloc_y = ggml_vk_create_buffer_device(ctx, ctx->prealloc_size_y);
  4643. }
  4644. if (ctx->prealloc_split_k == nullptr || (ctx->prealloc_size_split_k > 0 && ctx->prealloc_split_k->size < ctx->prealloc_size_split_k)) {
  4645. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(split_k_size: " << ctx->prealloc_size_split_k << ")");
  4646. // Resize buffer
  4647. if (ctx->prealloc_split_k != nullptr) {
  4648. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  4649. }
  4650. ctx->prealloc_split_k = ggml_vk_create_buffer_device(ctx, ctx->prealloc_size_split_k);
  4651. }
  4652. if (ctx->staging == nullptr || (ctx->staging_size > 0 && ctx->staging->size < ctx->staging_size)) {
  4653. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(staging_size: " << ctx->staging_size << ")");
  4654. // Resize buffer
  4655. if (ctx->staging != nullptr) {
  4656. ggml_vk_destroy_buffer(ctx->staging);
  4657. }
  4658. ctx->staging = ggml_vk_create_buffer_check(ctx, ctx->staging_size,
  4659. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  4660. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  4661. }
  4662. }
  4663. static void ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * node, bool last_node){
  4664. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) node->extra;
  4665. if (ggml_is_empty(node) || extra == nullptr) {
  4666. return;
  4667. }
  4668. VK_LOG_DEBUG("ggml_vk_build_graph(" << node << ", " << ggml_op_name(node->op) << ")");
  4669. ctx->semaphore_idx = 0;
  4670. ctx->staging_offset = 0;
  4671. const ggml_tensor * src0 = node->src[0];
  4672. const ggml_tensor * src1 = node->src[1];
  4673. const ggml_tensor * src2 = node->src[2];
  4674. switch (node->op) {
  4675. // Return on empty ops to avoid generating a compute_ctx and setting exit_tensor
  4676. case GGML_OP_RESHAPE:
  4677. case GGML_OP_VIEW:
  4678. case GGML_OP_PERMUTE:
  4679. case GGML_OP_TRANSPOSE:
  4680. case GGML_OP_NONE:
  4681. return;
  4682. case GGML_OP_UNARY:
  4683. switch (ggml_get_unary_op(node)) {
  4684. case GGML_UNARY_OP_SILU:
  4685. case GGML_UNARY_OP_GELU:
  4686. case GGML_UNARY_OP_RELU:
  4687. break;
  4688. default:
  4689. return;
  4690. }
  4691. break;
  4692. case GGML_OP_REPEAT:
  4693. case GGML_OP_GET_ROWS:
  4694. case GGML_OP_ADD:
  4695. case GGML_OP_MUL:
  4696. case GGML_OP_DIV:
  4697. case GGML_OP_SCALE:
  4698. case GGML_OP_SQR:
  4699. case GGML_OP_CLAMP:
  4700. case GGML_OP_CPY:
  4701. case GGML_OP_CONT:
  4702. case GGML_OP_DUP:
  4703. case GGML_OP_NORM:
  4704. case GGML_OP_RMS_NORM:
  4705. case GGML_OP_DIAG_MASK_INF:
  4706. case GGML_OP_SOFT_MAX:
  4707. case GGML_OP_ROPE:
  4708. case GGML_OP_MUL_MAT:
  4709. case GGML_OP_MUL_MAT_ID:
  4710. case GGML_OP_ARGSORT:
  4711. case GGML_OP_SUM_ROWS:
  4712. break;
  4713. default:
  4714. std::cerr << "ggml_vulkan: Error: Missing op: " << ggml_op_name(node->op) << std::endl;
  4715. GGML_ASSERT(false);
  4716. return;
  4717. }
  4718. if (ctx->compute_ctx == nullptr) {
  4719. ctx->compute_ctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
  4720. ggml_vk_ctx_begin(ctx, ctx->compute_ctx);
  4721. }
  4722. switch (node->op) {
  4723. case GGML_OP_REPEAT:
  4724. ggml_vk_repeat(ctx, ctx->compute_ctx, src0, src1, node);
  4725. break;
  4726. case GGML_OP_GET_ROWS:
  4727. ggml_vk_get_rows(ctx, ctx->compute_ctx, src0, src1, node);
  4728. break;
  4729. case GGML_OP_ADD:
  4730. ggml_vk_add(ctx, ctx->compute_ctx, src0, src1, node);
  4731. break;
  4732. case GGML_OP_MUL:
  4733. ggml_vk_mul(ctx, ctx->compute_ctx, src0, src1, node);
  4734. break;
  4735. case GGML_OP_DIV:
  4736. ggml_vk_div(ctx, ctx->compute_ctx, src0, src1, node);
  4737. break;
  4738. case GGML_OP_SCALE:
  4739. ggml_vk_scale(ctx, ctx->compute_ctx, src0, node);
  4740. break;
  4741. case GGML_OP_SQR:
  4742. ggml_vk_sqr(ctx, ctx->compute_ctx, src0, node);
  4743. break;
  4744. case GGML_OP_CLAMP:
  4745. ggml_vk_clamp(ctx, ctx->compute_ctx, src0, node);
  4746. break;
  4747. case GGML_OP_CPY:
  4748. case GGML_OP_CONT:
  4749. case GGML_OP_DUP:
  4750. ggml_vk_cpy(ctx, ctx->compute_ctx, src0, node);
  4751. break;
  4752. case GGML_OP_NORM:
  4753. ggml_vk_norm(ctx, ctx->compute_ctx, src0, node);
  4754. break;
  4755. case GGML_OP_RMS_NORM:
  4756. ggml_vk_rms_norm(ctx, ctx->compute_ctx, src0, node);
  4757. break;
  4758. case GGML_OP_UNARY:
  4759. switch (ggml_get_unary_op(node)) {
  4760. case GGML_UNARY_OP_SILU:
  4761. case GGML_UNARY_OP_GELU:
  4762. case GGML_UNARY_OP_RELU:
  4763. ggml_vk_unary(ctx, ctx->compute_ctx, src0, node);
  4764. break;
  4765. default:
  4766. return;
  4767. }
  4768. break;
  4769. case GGML_OP_DIAG_MASK_INF:
  4770. ggml_vk_diag_mask_inf(ctx, ctx->compute_ctx, src0, node);
  4771. break;
  4772. case GGML_OP_SOFT_MAX:
  4773. ggml_vk_soft_max(ctx, ctx->compute_ctx, src0, src1, node);
  4774. break;
  4775. case GGML_OP_ROPE:
  4776. ggml_vk_rope(ctx, ctx->compute_ctx, src0, src1, src2, node);
  4777. break;
  4778. case GGML_OP_ARGSORT:
  4779. ggml_vk_argsort(ctx, ctx->compute_ctx, src0, node);
  4780. break;
  4781. case GGML_OP_SUM_ROWS:
  4782. ggml_vk_sum_rows(ctx, ctx->compute_ctx, src0, node);
  4783. break;
  4784. case GGML_OP_MUL_MAT:
  4785. ggml_vk_mul_mat(ctx, ctx->compute_ctx, src0, src1, node);
  4786. break;
  4787. case GGML_OP_MUL_MAT_ID:
  4788. ggml_vk_mul_mat_id(ctx, ctx->compute_ctx, src0, src1, src2, node);
  4789. break;
  4790. default:
  4791. return;
  4792. }
  4793. extra->ctx_idx = ctx->compute_ctx->idx;
  4794. #ifdef GGML_VULKAN_CHECK_RESULTS
  4795. // Force context reset on each node so that each tensor ends up in its own context
  4796. // and can be run and compared to its CPU equivalent separately
  4797. last_node = true;
  4798. #endif
  4799. if (last_node) {
  4800. ggml_vk_ctx_end(ctx->compute_ctx);
  4801. ctx->compute_ctx->exit_tensor = node;
  4802. ctx->compute_ctx = nullptr;
  4803. }
  4804. }
  4805. static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor){
  4806. ggml_tensor_extra_gpu * extra = nullptr;
  4807. switch (tensor->op) {
  4808. case GGML_OP_ADD:
  4809. case GGML_OP_GET_ROWS:
  4810. case GGML_OP_MUL:
  4811. case GGML_OP_DIV:
  4812. case GGML_OP_SCALE:
  4813. case GGML_OP_SQR:
  4814. case GGML_OP_CLAMP:
  4815. case GGML_OP_CPY:
  4816. case GGML_OP_CONT:
  4817. case GGML_OP_DUP:
  4818. case GGML_OP_NORM:
  4819. case GGML_OP_RMS_NORM:
  4820. case GGML_OP_DIAG_MASK_INF:
  4821. case GGML_OP_SOFT_MAX:
  4822. case GGML_OP_ROPE:
  4823. case GGML_OP_RESHAPE:
  4824. case GGML_OP_VIEW:
  4825. case GGML_OP_PERMUTE:
  4826. case GGML_OP_TRANSPOSE:
  4827. case GGML_OP_NONE:
  4828. case GGML_OP_ARGSORT:
  4829. case GGML_OP_SUM_ROWS:
  4830. extra = (ggml_tensor_extra_gpu *) tensor->extra;
  4831. break;
  4832. case GGML_OP_UNARY:
  4833. switch (ggml_get_unary_op(tensor)) {
  4834. case GGML_UNARY_OP_SILU:
  4835. case GGML_UNARY_OP_GELU:
  4836. case GGML_UNARY_OP_RELU:
  4837. extra = (ggml_tensor_extra_gpu *) tensor->extra;
  4838. break;
  4839. default:
  4840. return false;
  4841. }
  4842. break;
  4843. case GGML_OP_MUL_MAT:
  4844. case GGML_OP_MUL_MAT_ID:
  4845. extra = (ggml_tensor_extra_gpu *) tensor->extra;
  4846. break;
  4847. default:
  4848. return false;
  4849. }
  4850. if (extra == nullptr) {
  4851. return false;
  4852. }
  4853. if (params->ith != 0) {
  4854. return true;
  4855. }
  4856. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  4857. return true;
  4858. }
  4859. VK_LOG_DEBUG("ggml_vk_compute_forward(" << tensor << ", name=" << tensor->name << ", op=" << ggml_op_name(tensor->op) << ", type=" << tensor->type << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << ", view_src=" << tensor->view_src << ", view_offs=" << tensor->view_offs << ")");
  4860. #ifdef GGML_VULKAN_CHECK_RESULTS
  4861. ggml_vk_check_results_0(ctx, params, tensor);
  4862. #endif
  4863. vk_context& subctx = ctx->gc.contexts[extra->ctx_idx];
  4864. // Only run if ctx hasn't been submitted yet
  4865. if (!subctx.seqs.empty()) {
  4866. // Do staging buffer copies
  4867. for (auto& cpy : subctx.in_memcpys) {
  4868. memcpy(cpy.dst, cpy.src, cpy.n);
  4869. }
  4870. ggml_vk_submit(&subctx, ctx->fence);
  4871. }
  4872. if (tensor == subctx.exit_tensor) {
  4873. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_compute_forward waitForFences");
  4874. ctx->device->device.resetFences({ ctx->fence });
  4875. // Do staging buffer copies
  4876. for (auto& cpy : subctx.out_memcpys) {
  4877. memcpy(cpy.dst, cpy.src, cpy.n);
  4878. }
  4879. subctx.in_memcpys.clear();
  4880. subctx.out_memcpys.clear();
  4881. }
  4882. return true;
  4883. }
  4884. // Clean up after graph processing is done
  4885. static void ggml_vk_graph_cleanup(ggml_backend_vk_context * ctx) {
  4886. VK_LOG_DEBUG("ggml_vk_graph_cleanup()");
  4887. for (auto& buffer : ctx->gc.temp_buffers) {
  4888. ggml_vk_pool_free(ctx, buffer);
  4889. }
  4890. ctx->gc.temp_buffers.clear();
  4891. for (auto& pipeline : ctx->device->pipelines) {
  4892. if (pipeline.expired()) {
  4893. continue;
  4894. }
  4895. vk_pipeline pl = pipeline.lock();
  4896. ggml_pipeline_cleanup(pl);
  4897. }
  4898. ggml_vk_queue_cleanup(ctx, ctx->device->compute_queue);
  4899. ggml_vk_queue_cleanup(ctx, ctx->device->transfer_queue);
  4900. for (size_t i = 0; i < ctx->gc.semaphores.size(); i++) {
  4901. ctx->device->device.destroySemaphore({ ctx->gc.semaphores[i].s });
  4902. }
  4903. ctx->gc.semaphores.clear();
  4904. for (size_t i = 0; i < ctx->gc.tl_semaphores.size(); i++) {
  4905. ctx->device->device.destroySemaphore({ ctx->gc.tl_semaphores[i].s });
  4906. }
  4907. ctx->gc.tl_semaphores.clear();
  4908. ctx->semaphore_idx = 0;
  4909. ctx->event_idx = 0;
  4910. for (auto& event : ctx->gc.events) {
  4911. ctx->device->device.resetEvent(event);
  4912. }
  4913. ctx->staging_offset = 0;
  4914. ctx->compute_ctx = nullptr;
  4915. ctx->transfer_ctx = nullptr;
  4916. ctx->gc.contexts.clear();
  4917. }
  4918. // Clean up on backend free
  4919. static void ggml_vk_cleanup(ggml_backend_vk_context * ctx) {
  4920. VK_LOG_DEBUG("ggml_vk_cleanup(" << ctx->idx << ")");
  4921. ggml_vk_graph_cleanup(ctx);
  4922. ggml_vk_destroy_buffer(ctx->prealloc_x);
  4923. ggml_vk_destroy_buffer(ctx->prealloc_y);
  4924. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  4925. ggml_vk_destroy_buffer(ctx->staging);
  4926. ggml_vk_destroy_buffer(ctx->sync_staging);
  4927. for (auto& buffer : ctx->buffer_pool) {
  4928. ggml_vk_destroy_buffer(buffer);
  4929. }
  4930. ctx->prealloc_size_x = 0;
  4931. ctx->prealloc_size_y = 0;
  4932. ctx->prealloc_size_split_k = 0;
  4933. ctx->staging_size = 0;
  4934. for (auto& event : ctx->gc.events) {
  4935. ctx->device->device.destroyEvent(event);
  4936. }
  4937. ctx->gc.events.clear();
  4938. ctx->device->device.destroyFence(ctx->fence);
  4939. }
  4940. GGML_CALL static int ggml_vk_get_device_count() {
  4941. ggml_vk_instance_init();
  4942. return vk_instance.device_indices.size();
  4943. }
  4944. GGML_CALL static void ggml_vk_get_device_description(int device, char * description, size_t description_size) {
  4945. ggml_vk_instance_init();
  4946. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  4947. vk::PhysicalDeviceProperties props;
  4948. devices[device].getProperties(&props);
  4949. snprintf(description, description_size, "%s", props.deviceName.data());
  4950. }
  4951. // backend interface
  4952. #define UNUSED GGML_UNUSED
  4953. // device backend
  4954. static void * const vk_ptr_base = (void *)(uintptr_t) 0x1000; // NOLINT
  4955. struct ggml_backend_vk_buffer_context {
  4956. ggml_backend_vk_context * ctx;
  4957. vk_buffer dev_buffer;
  4958. ggml_tensor_extra_gpu * temp_tensor_extras = nullptr;
  4959. size_t temp_tensor_extra_index = 0;
  4960. std::string name;
  4961. ggml_backend_vk_buffer_context(ggml_backend_vk_context * ctx, vk_buffer&& dev_buffer, std::string& name) :
  4962. ctx(ctx),
  4963. dev_buffer(dev_buffer),
  4964. name(name) {
  4965. }
  4966. ~ggml_backend_vk_buffer_context() {
  4967. ggml_vk_destroy_buffer(dev_buffer);
  4968. if (temp_tensor_extras != nullptr) {
  4969. delete[] temp_tensor_extras;
  4970. }
  4971. }
  4972. ggml_tensor_extra_gpu * ggml_vk_alloc_temp_tensor_extra() {
  4973. if (temp_tensor_extras == nullptr) {
  4974. temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_VK_MAX_NODES];
  4975. }
  4976. size_t alloc_index = temp_tensor_extra_index;
  4977. temp_tensor_extra_index = (temp_tensor_extra_index + 1) % GGML_VK_MAX_NODES;
  4978. ggml_tensor_extra_gpu * extra = &temp_tensor_extras[alloc_index];
  4979. extra->reset();
  4980. return extra;
  4981. }
  4982. };
  4983. GGML_CALL static const char * ggml_backend_vk_buffer_get_name(ggml_backend_buffer_t buffer) {
  4984. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  4985. return ctx->name.c_str();
  4986. }
  4987. GGML_CALL static bool ggml_backend_buffer_is_vk(ggml_backend_buffer_t buffer) {
  4988. return buffer->iface.get_name == ggml_backend_vk_buffer_get_name;
  4989. }
  4990. GGML_CALL static void ggml_backend_vk_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  4991. VK_LOG_MEMORY("ggml_backend_vk_buffer_free_buffer()");
  4992. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  4993. ggml_vk_destroy_buffer(ctx->dev_buffer);
  4994. delete ctx;
  4995. }
  4996. GGML_CALL static void * ggml_backend_vk_buffer_get_base(ggml_backend_buffer_t buffer) {
  4997. return vk_ptr_base;
  4998. UNUSED(buffer);
  4999. }
  5000. GGML_CALL static void ggml_backend_vk_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
  5001. VK_LOG_DEBUG("ggml_backend_vk_buffer_init_tensor(" << buffer << " (" << buffer->context << "), " << tensor << ")");
  5002. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  5003. if (tensor->view_src != nullptr) {
  5004. GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft);
  5005. GGML_ASSERT(tensor->view_src->extra != nullptr);
  5006. tensor->extra = tensor->view_src->extra;
  5007. } else {
  5008. ggml_tensor_extra_gpu * extra = ctx->ggml_vk_alloc_temp_tensor_extra();
  5009. extra->buffer_gpu = ctx->dev_buffer;
  5010. extra->offset = (uint8_t *) tensor->data - (uint8_t *) vk_ptr_base;
  5011. tensor->extra = extra;
  5012. }
  5013. }
  5014. GGML_CALL static void ggml_backend_vk_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  5015. VK_LOG_DEBUG("ggml_backend_vk_buffer_set_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")");
  5016. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  5017. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  5018. vk_buffer buf = extra->buffer_gpu.lock();
  5019. ggml_vk_buffer_write(ctx->ctx, buf, extra->offset + tensor->view_offs + offset, data, size);
  5020. }
  5021. GGML_CALL static void ggml_backend_vk_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  5022. VK_LOG_DEBUG("ggml_backend_vk_buffer_get_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")");
  5023. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  5024. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  5025. vk_buffer buf = extra->buffer_gpu.lock();
  5026. ggml_vk_buffer_read(ctx->ctx, buf, extra->offset + tensor->view_offs + offset, data, size);
  5027. }
  5028. GGML_CALL static bool ggml_backend_vk_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
  5029. if (ggml_backend_buffer_is_vk(src->buffer)) {
  5030. ggml_tensor_extra_gpu * src_extra = (ggml_tensor_extra_gpu *) src->extra;
  5031. ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
  5032. vk_buffer src_buf = src_extra->buffer_gpu.lock();
  5033. vk_buffer dst_buf = dst_extra->buffer_gpu.lock();
  5034. ggml_vk_buffer_copy(dst_buf, dst_extra->offset + dst->view_offs, src_buf, src_extra->offset + src->view_offs, ggml_nbytes(src));
  5035. return true;
  5036. }
  5037. return false;
  5038. UNUSED(buffer);
  5039. }
  5040. GGML_CALL static void ggml_backend_vk_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  5041. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  5042. ggml_vk_buffer_memset(ctx->ctx, ctx->dev_buffer, 0, value, buffer->size);
  5043. }
  5044. static ggml_backend_buffer_i ggml_backend_vk_buffer_interface = {
  5045. /* .get_name = */ ggml_backend_vk_buffer_get_name,
  5046. /* .free_buffer = */ ggml_backend_vk_buffer_free_buffer,
  5047. /* .get_base = */ ggml_backend_vk_buffer_get_base,
  5048. /* .init_tensor = */ ggml_backend_vk_buffer_init_tensor,
  5049. /* .set_tensor = */ ggml_backend_vk_buffer_set_tensor,
  5050. /* .get_tensor = */ ggml_backend_vk_buffer_get_tensor,
  5051. /* .cpy_tensor = */ ggml_backend_vk_buffer_cpy_tensor,
  5052. /* .clear = */ ggml_backend_vk_buffer_clear,
  5053. /* .reset = */ NULL,
  5054. };
  5055. // vk buffer type
  5056. struct ggml_backend_vk_buffer_type_context {
  5057. std::string name;
  5058. ggml_backend_vk_context * ctx;
  5059. };
  5060. GGML_CALL static const char * ggml_backend_vk_buffer_type_name(ggml_backend_buffer_type_t buft) {
  5061. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *)buft->context;
  5062. return ctx->name.c_str();
  5063. }
  5064. GGML_CALL static ggml_backend_buffer_t ggml_backend_vk_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  5065. VK_LOG_MEMORY("ggml_backend_vk_buffer_type_alloc_buffer(" << size << ")");
  5066. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  5067. vk_buffer dev_buffer = nullptr;
  5068. try {
  5069. dev_buffer = ggml_vk_create_buffer_device(ctx->ctx, size);
  5070. } catch (const vk::SystemError& e) {
  5071. return nullptr;
  5072. }
  5073. ggml_backend_vk_buffer_context * bufctx = new ggml_backend_vk_buffer_context(ctx->ctx, std::move(dev_buffer), ctx->name);
  5074. return ggml_backend_buffer_init(buft, ggml_backend_vk_buffer_interface, bufctx, size);
  5075. }
  5076. GGML_CALL static size_t ggml_backend_vk_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  5077. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  5078. return ctx->ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  5079. }
  5080. GGML_CALL static size_t ggml_backend_vk_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
  5081. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  5082. return ctx->ctx->device->max_memory_allocation_size;
  5083. }
  5084. GGML_CALL static size_t ggml_backend_vk_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
  5085. return ggml_nbytes(tensor);
  5086. UNUSED(buft);
  5087. }
  5088. static ggml_backend_buffer_type_i ggml_backend_vk_buffer_type_interface = {
  5089. /* .get_name = */ ggml_backend_vk_buffer_type_name,
  5090. /* .alloc_buffer = */ ggml_backend_vk_buffer_type_alloc_buffer,
  5091. /* .get_alignment = */ ggml_backend_vk_buffer_type_get_alignment,
  5092. /* .get_max_size = */ ggml_backend_vk_buffer_type_get_max_size,
  5093. /* .get_alloc_size = */ ggml_backend_vk_buffer_type_get_alloc_size,
  5094. /* .is_host = */ NULL,
  5095. };
  5096. GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num) {
  5097. ggml_vk_instance_init();
  5098. VK_LOG_DEBUG("ggml_backend_vk_buffer_type(" << dev_num << ")");
  5099. GGML_ASSERT(dev_num < vk_instance.device_indices.size());
  5100. ggml_backend_vk_init(dev_num);
  5101. return &vk_instance.buffer_types[dev_num];
  5102. }
  5103. // host buffer type
  5104. GGML_CALL static const char * ggml_backend_vk_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
  5105. return GGML_VK_NAME "_Host";
  5106. UNUSED(buft);
  5107. }
  5108. GGML_CALL static const char * ggml_backend_vk_host_buffer_name(ggml_backend_buffer_t buffer) {
  5109. return GGML_VK_NAME "_Host";
  5110. UNUSED(buffer);
  5111. }
  5112. GGML_CALL static void ggml_backend_vk_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  5113. VK_LOG_MEMORY("ggml_backend_vk_host_buffer_free_buffer()");
  5114. ggml_vk_host_free(&vk_instance.contexts[0], buffer->context);
  5115. }
  5116. GGML_CALL static ggml_backend_buffer_t ggml_backend_vk_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  5117. VK_LOG_MEMORY("ggml_backend_vk_host_buffer_type_alloc_buffer(" << size << ")");
  5118. size += 32; // Behave like the CPU buffer type
  5119. void * ptr = nullptr;
  5120. try {
  5121. ptr = ggml_vk_host_malloc(&vk_instance.contexts[0], size);
  5122. } catch (vk::SystemError& e) {
  5123. std::cerr << "ggml_vulkan: Failed to allocate pinned memory." << std::endl;
  5124. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  5125. // fallback to cpu buffer
  5126. return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
  5127. }
  5128. ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
  5129. buffer->buft = buft;
  5130. buffer->iface.get_name = ggml_backend_vk_host_buffer_name;
  5131. buffer->iface.free_buffer = ggml_backend_vk_host_buffer_free_buffer;
  5132. return buffer;
  5133. }
  5134. GGML_CALL static size_t ggml_backend_vk_host_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  5135. return vk_instance.contexts[0].device->properties.limits.minMemoryMapAlignment;
  5136. UNUSED(buft);
  5137. }
  5138. GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type() {
  5139. static struct ggml_backend_buffer_type ggml_backend_vk_buffer_type_host = {
  5140. /* .iface = */ {
  5141. /* .get_name = */ ggml_backend_vk_host_buffer_type_name,
  5142. /* .alloc_buffer = */ ggml_backend_vk_host_buffer_type_alloc_buffer,
  5143. /* .get_alignment = */ ggml_backend_vk_host_buffer_type_get_alignment,
  5144. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  5145. /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
  5146. /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
  5147. },
  5148. /* .context = */ nullptr,
  5149. };
  5150. if (!vk_instance.contexts[0].initialized) {
  5151. // Fall back to CPU
  5152. return ggml_backend_cpu_buffer_type();
  5153. }
  5154. return &ggml_backend_vk_buffer_type_host;
  5155. }
  5156. // backend
  5157. GGML_CALL static const char * ggml_backend_vk_name(ggml_backend_t backend) {
  5158. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5159. return ctx->name.c_str();
  5160. }
  5161. GGML_CALL static void ggml_backend_vk_free(ggml_backend_t backend) {
  5162. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5163. VK_LOG_DEBUG("ggml_backend_vk_free(" << ctx->name << ")");
  5164. size_t idx = ctx->idx;
  5165. ggml_vk_cleanup(ctx);
  5166. ctx->device.reset();
  5167. ctx->initialized = false;
  5168. vk_instance.initialized[idx] = false;
  5169. vk_instance.backends[idx] = nullptr;
  5170. memset(&vk_instance.buffer_types[idx], 0, sizeof(ggml_backend_buffer_type));
  5171. delete backend;
  5172. }
  5173. GGML_CALL static ggml_backend_buffer_type_t ggml_backend_vk_get_default_buffer_type(ggml_backend_t backend) {
  5174. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5175. GGML_ASSERT(ctx->initialized);
  5176. return ggml_backend_vk_buffer_type(ctx->idx);
  5177. }
  5178. GGML_CALL static void ggml_backend_vk_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  5179. VK_LOG_DEBUG("ggml_backend_vk_set_tensor_async(" << size << ")");
  5180. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5181. GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_buffer_type(ctx->idx) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
  5182. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  5183. if (ctx->transfer_ctx == nullptr) {
  5184. // Initialize new transfer context
  5185. ctx->transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
  5186. ggml_vk_ctx_begin(ctx, ctx->transfer_ctx);
  5187. }
  5188. vk_buffer buf = extra->buffer_gpu.lock();
  5189. ggml_vk_buffer_write_async(ctx, ctx->transfer_ctx, buf, extra->offset + tensor->view_offs + offset, data, size);
  5190. }
  5191. GGML_CALL static void ggml_backend_vk_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  5192. VK_LOG_DEBUG("ggml_backend_vk_get_tensor_async(" << size << ")");
  5193. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5194. GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_buffer_type(ctx->idx) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
  5195. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  5196. if (ctx->transfer_ctx == nullptr) {
  5197. // Initialize new transfer context
  5198. ctx->transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
  5199. ggml_vk_ctx_begin(ctx, ctx->transfer_ctx);
  5200. }
  5201. vk_buffer buf = extra->buffer_gpu.lock();
  5202. ggml_vk_buffer_read_async(ctx, ctx->transfer_ctx, buf, extra->offset + tensor->view_offs + offset, data, size);
  5203. }
  5204. GGML_CALL static bool ggml_backend_vk_cpy_tensor_async(ggml_backend_t backend, const ggml_tensor * src, ggml_tensor * dst) {
  5205. VK_LOG_DEBUG("ggml_backend_vk_cpy_tensor_async()");
  5206. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5207. if ((dst->buffer->buft == ggml_backend_vk_buffer_type(ctx->idx) || dst->buffer->buft == ggml_backend_vk_host_buffer_type()) && ggml_backend_buffer_is_vk(src->buffer)) {
  5208. ggml_tensor_extra_gpu * src_extra = (ggml_tensor_extra_gpu *) src->extra;
  5209. ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
  5210. if (ctx->transfer_ctx == nullptr) {
  5211. // Initialize new transfer context
  5212. ctx->transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
  5213. ggml_vk_ctx_begin(ctx, ctx->transfer_ctx);
  5214. }
  5215. vk_buffer src_buf = src_extra->buffer_gpu.lock();
  5216. vk_buffer dst_buf = dst_extra->buffer_gpu.lock();
  5217. ggml_vk_buffer_copy_async(ctx->transfer_ctx, dst_buf, dst_extra->offset + dst->view_offs, src_buf, src_extra->offset + src->view_offs, ggml_nbytes(src));
  5218. return true;
  5219. }
  5220. return false;
  5221. }
  5222. GGML_CALL static void ggml_backend_vk_synchronize(ggml_backend_t backend) {
  5223. VK_LOG_DEBUG("ggml_backend_vk_synchronize()");
  5224. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5225. if(ctx->transfer_ctx == nullptr) {
  5226. return;
  5227. }
  5228. ggml_vk_ctx_end(ctx->transfer_ctx);
  5229. for (auto& cpy : ctx->transfer_ctx->in_memcpys) {
  5230. memcpy(cpy.dst, cpy.src, cpy.n);
  5231. }
  5232. ggml_vk_submit(ctx->transfer_ctx, ctx->fence);
  5233. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_backend_vk_synchronize waitForFences");
  5234. ctx->device->device.resetFences({ ctx->fence });
  5235. for (auto& cpy : ctx->transfer_ctx->out_memcpys) {
  5236. memcpy(cpy.dst, cpy.src, cpy.n);
  5237. }
  5238. ctx->transfer_ctx = nullptr;
  5239. }
  5240. static bool ggml_vk_is_empty(ggml_tensor * node) {
  5241. return ggml_is_empty(node) || node->op == GGML_OP_NONE || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE;
  5242. }
  5243. GGML_CALL static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
  5244. VK_LOG_DEBUG("ggml_backend_vk_graph_compute(" << cgraph->n_nodes << " nodes)");
  5245. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5246. for (int i = 0; i < cgraph->n_nodes; i++) {
  5247. ggml_vk_preallocate_buffers_graph(ctx, cgraph->nodes[i]);
  5248. }
  5249. ggml_vk_preallocate_buffers(ctx);
  5250. int last_node = cgraph->n_nodes - 1;
  5251. // If the last op in the cgraph isn't backend GPU, the command buffer doesn't get closed properly
  5252. while (last_node > 0 && ggml_vk_is_empty(cgraph->nodes[last_node])) {
  5253. last_node -= 1;
  5254. }
  5255. for (int i = 0; i < cgraph->n_nodes; i++) {
  5256. ggml_vk_build_graph(ctx,cgraph->nodes[i], i == last_node);
  5257. }
  5258. ggml_compute_params params = {};
  5259. params.type = GGML_TASK_TYPE_COMPUTE;
  5260. params.ith = 0;
  5261. for (int i = 0; i < cgraph->n_nodes; i++) {
  5262. ggml_tensor * node = cgraph->nodes[i];
  5263. if (ggml_vk_is_empty(node)) {
  5264. continue;
  5265. }
  5266. bool ok = ggml_vk_compute_forward(ctx, &params, node);
  5267. if (!ok) {
  5268. fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op));
  5269. }
  5270. #ifdef GGML_VULKAN_CHECK_RESULTS
  5271. else {
  5272. ggml_vk_check_results_1(ctx, &params, node);
  5273. }
  5274. #endif
  5275. GGML_ASSERT(ok);
  5276. }
  5277. ggml_vk_graph_cleanup(ctx);
  5278. return GGML_STATUS_SUCCESS;
  5279. UNUSED(backend);
  5280. }
  5281. GGML_CALL static bool ggml_backend_vk_supports_op(ggml_backend_t backend, const ggml_tensor * op) {
  5282. // ggml_backend_vk_context * ctx = (ggml_backend_vk_context *) backend->context;
  5283. switch (op->op) {
  5284. case GGML_OP_UNARY:
  5285. switch (ggml_get_unary_op(op)) {
  5286. case GGML_UNARY_OP_GELU:
  5287. case GGML_UNARY_OP_SILU:
  5288. case GGML_UNARY_OP_RELU:
  5289. return ggml_is_contiguous(op->src[0]);
  5290. default:
  5291. return false;
  5292. }
  5293. break;
  5294. case GGML_OP_MUL_MAT:
  5295. case GGML_OP_MUL_MAT_ID:
  5296. {
  5297. switch (op->src[0]->type) {
  5298. case GGML_TYPE_F32:
  5299. case GGML_TYPE_F16:
  5300. case GGML_TYPE_Q4_0:
  5301. case GGML_TYPE_Q4_1:
  5302. case GGML_TYPE_Q5_0:
  5303. case GGML_TYPE_Q5_1:
  5304. case GGML_TYPE_Q8_0:
  5305. case GGML_TYPE_Q2_K:
  5306. case GGML_TYPE_Q3_K:
  5307. case GGML_TYPE_Q4_K:
  5308. case GGML_TYPE_Q5_K:
  5309. case GGML_TYPE_Q6_K:
  5310. break;
  5311. default:
  5312. return false;
  5313. }
  5314. struct ggml_tensor * a;
  5315. struct ggml_tensor * b;
  5316. if (op->op == GGML_OP_MUL_MAT) {
  5317. a = op->src[0];
  5318. b = op->src[1];
  5319. } else {
  5320. a = op->src[2];
  5321. b = op->src[1];
  5322. }
  5323. if (a->ne[3] != b->ne[3]) {
  5324. return false;
  5325. }
  5326. return true;
  5327. } break;
  5328. case GGML_OP_GET_ROWS:
  5329. {
  5330. switch (op->src[0]->type) {
  5331. case GGML_TYPE_F32:
  5332. case GGML_TYPE_F16:
  5333. case GGML_TYPE_Q4_0:
  5334. case GGML_TYPE_Q4_1:
  5335. case GGML_TYPE_Q5_0:
  5336. case GGML_TYPE_Q5_1:
  5337. case GGML_TYPE_Q8_0:
  5338. return true;
  5339. default:
  5340. return false;
  5341. }
  5342. } break;
  5343. case GGML_OP_CPY:
  5344. case GGML_OP_DUP:
  5345. {
  5346. ggml_type src0_type = op->src[0]->type;
  5347. ggml_type src1_type = op->src[1] != nullptr ? op->src[1]->type : src0_type;
  5348. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  5349. return true;
  5350. }
  5351. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
  5352. return true;
  5353. }
  5354. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  5355. return true;
  5356. }
  5357. return false;
  5358. } break;
  5359. // case GGML_OP_REPEAT:
  5360. // {
  5361. // ggml_type src0_type = op->src[0]->type;
  5362. // return src0_type != GGML_TYPE_I32 && src0_type != GGML_TYPE_I16;
  5363. // } break;
  5364. case GGML_OP_ROPE:
  5365. return ggml_is_contiguous(op->src[0]);
  5366. case GGML_OP_NONE:
  5367. case GGML_OP_RESHAPE:
  5368. case GGML_OP_VIEW:
  5369. case GGML_OP_PERMUTE:
  5370. case GGML_OP_TRANSPOSE:
  5371. case GGML_OP_NORM:
  5372. case GGML_OP_ADD:
  5373. case GGML_OP_MUL:
  5374. case GGML_OP_DIV:
  5375. case GGML_OP_RMS_NORM:
  5376. case GGML_OP_SCALE:
  5377. case GGML_OP_SQR:
  5378. case GGML_OP_CLAMP:
  5379. case GGML_OP_CONT:
  5380. case GGML_OP_DIAG_MASK_INF:
  5381. case GGML_OP_SOFT_MAX:
  5382. case GGML_OP_ARGSORT:
  5383. case GGML_OP_SUM_ROWS:
  5384. return true;
  5385. default:
  5386. return false;
  5387. }
  5388. UNUSED(backend);
  5389. }
  5390. GGML_CALL static bool ggml_backend_vk_offload_op(ggml_backend_t backend, const ggml_tensor * op) {
  5391. const int min_batch_size = 32;
  5392. return (op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS) ||
  5393. (op->ne[2] >= min_batch_size && op->op == GGML_OP_MUL_MAT_ID);
  5394. UNUSED(backend);
  5395. }
  5396. GGML_CALL static bool ggml_backend_vk_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
  5397. if (buft->iface.get_name != ggml_backend_vk_buffer_type_name) {
  5398. return false;
  5399. }
  5400. ggml_backend_vk_buffer_type_context * buft_ctx = (ggml_backend_vk_buffer_type_context *)buft->context;
  5401. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5402. return buft_ctx->ctx->idx == ctx->idx;
  5403. }
  5404. // TODO: enable async and synchronize
  5405. static ggml_backend_i ggml_backend_vk_interface = {
  5406. /* .get_name = */ ggml_backend_vk_name,
  5407. /* .free = */ ggml_backend_vk_free,
  5408. /* .get_default_buffer_type = */ ggml_backend_vk_get_default_buffer_type,
  5409. /* .set_tensor_async = */ NULL, // ggml_backend_vk_set_tensor_async,
  5410. /* .get_tensor_async = */ NULL, // ggml_backend_vk_get_tensor_async,
  5411. /* .cpy_tensor_async = */ NULL, // ggml_backend_vk_cpy_tensor_async,
  5412. /* .synchronize = */ NULL, // ggml_backend_vk_synchronize,
  5413. /* .graph_plan_create = */ NULL,
  5414. /* .graph_plan_free = */ NULL,
  5415. /* .graph_plan_update = */ NULL,
  5416. /* .graph_plan_compute = */ NULL,
  5417. /* .graph_compute = */ ggml_backend_vk_graph_compute,
  5418. /* .supports_op = */ ggml_backend_vk_supports_op,
  5419. /* .supports_buft = */ ggml_backend_vk_supports_buft,
  5420. /* .offload_op = */ ggml_backend_vk_offload_op,
  5421. /* .event_new = */ NULL,
  5422. /* .event_free = */ NULL,
  5423. /* .event_record = */ NULL,
  5424. /* .event_wait = */ NULL,
  5425. /* .event_synchronize = */ NULL,
  5426. };
  5427. static ggml_guid_t ggml_backend_vk_guid() {
  5428. static ggml_guid guid = { 0xb8, 0xf7, 0x4f, 0x86, 0x40, 0x3c, 0xe1, 0x02, 0x91, 0xc8, 0xdd, 0xe9, 0x02, 0x3f, 0xc0, 0x2b };
  5429. return &guid;
  5430. }
  5431. GGML_CALL ggml_backend_t ggml_backend_vk_init(size_t dev_num) {
  5432. if (vk_instance.initialized[dev_num]) {
  5433. return vk_instance.backends[dev_num];
  5434. }
  5435. VK_LOG_DEBUG("ggml_backend_vk_init(" << dev_num << ")");
  5436. ggml_backend_vk_context * ctx = &vk_instance.contexts[dev_num];
  5437. ggml_vk_init(ctx, dev_num);
  5438. ctx->name = GGML_VK_NAME + std::to_string(dev_num);
  5439. vk_instance.buffer_types[dev_num] = {
  5440. /* .iface = */ ggml_backend_vk_buffer_type_interface,
  5441. /* .context = */ new ggml_backend_vk_buffer_type_context{ ctx->name, ctx },
  5442. };
  5443. vk_instance.initialized[dev_num] = true;
  5444. ggml_backend_t vk_backend = new ggml_backend {
  5445. /* .guid = */ ggml_backend_vk_guid(),
  5446. /* .interface = */ ggml_backend_vk_interface,
  5447. /* .context = */ &vk_instance.contexts[ctx->idx],
  5448. };
  5449. vk_instance.backends[dev_num] = vk_backend;
  5450. return vk_backend;
  5451. }
  5452. GGML_CALL bool ggml_backend_is_vk(ggml_backend_t backend) {
  5453. return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_vk_guid());
  5454. }
  5455. GGML_CALL int ggml_backend_vk_get_device_count() {
  5456. return ggml_vk_get_device_count();
  5457. }
  5458. GGML_CALL void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size) {
  5459. ggml_vk_get_device_description(device, description, description_size);
  5460. }
  5461. GGML_CALL void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total) {
  5462. GGML_ASSERT(device < (int) vk_instance.device_indices.size());
  5463. vk::PhysicalDevice vkdev = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device]];
  5464. vk::PhysicalDeviceMemoryProperties memprops = vkdev.getMemoryProperties();
  5465. for (const vk::MemoryHeap& heap : memprops.memoryHeaps) {
  5466. if (heap.flags & vk::MemoryHeapFlagBits::eDeviceLocal) {
  5467. *total = heap.size;
  5468. *free = heap.size;
  5469. break;
  5470. }
  5471. }
  5472. }
  5473. // backend registry
  5474. GGML_CALL static ggml_backend_t ggml_backend_reg_vk_init(const char * params, void * user_data) {
  5475. ggml_backend_t vk_backend = ggml_backend_vk_init((int) (intptr_t) user_data);
  5476. return vk_backend;
  5477. UNUSED(params);
  5478. }
  5479. extern "C" GGML_CALL int ggml_backend_vk_reg_devices();
  5480. GGML_CALL int ggml_backend_vk_reg_devices() {
  5481. ggml_vk_instance_init();
  5482. for (size_t i = 0; i < vk_instance.device_indices.size(); i++) {
  5483. char name[128];
  5484. snprintf(name, sizeof(name), "%s%ld", GGML_VK_NAME, i);
  5485. ggml_backend_register(name, ggml_backend_reg_vk_init, ggml_backend_vk_buffer_type(i), (void *) (intptr_t) i); // NOLINT
  5486. }
  5487. return vk_instance.device_indices.size();
  5488. }
  5489. // Extension availability
  5490. static bool ggml_vk_instance_validation_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions) {
  5491. #ifdef GGML_VULKAN_VALIDATE
  5492. bool portability_enumeration_ext = false;
  5493. // Check for portability enumeration extension for MoltenVK support
  5494. for (const auto& properties : instance_extensions) {
  5495. if (strcmp("VK_KHR_portability_enumeration", properties.extensionName) == 0) {
  5496. return true;
  5497. }
  5498. }
  5499. if (!portability_enumeration_ext) {
  5500. std::cerr << "ggml_vulkan: WARNING: Instance extension VK_KHR_portability_enumeration not found." << std::endl;
  5501. }
  5502. #endif
  5503. return false;
  5504. UNUSED(instance_extensions);
  5505. }
  5506. static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions) {
  5507. #ifdef __APPLE__
  5508. bool portability_enumeration_ext = false;
  5509. // Check for portability enumeration extension for MoltenVK support
  5510. for (const auto& properties : instance_extensions) {
  5511. if (strcmp("VK_KHR_portability_enumeration", properties.extensionName) == 0) {
  5512. return true;
  5513. }
  5514. }
  5515. if (!portability_enumeration_ext) {
  5516. std::cerr << "ggml_vulkan: WARNING: Instance extension VK_KHR_portability_enumeration not found." << std::endl;
  5517. }
  5518. #endif
  5519. return false;
  5520. UNUSED(instance_extensions);
  5521. }
  5522. // checks
  5523. #ifdef GGML_VULKAN_CHECK_RESULTS
  5524. static void ggml_vk_print_graph_origin(const ggml_tensor * tensor, std::vector<const ggml_tensor *>& done, int level = 0) {
  5525. if (std::find(done.begin(), done.end(), tensor) != done.end() || level > 10) {
  5526. return;
  5527. }
  5528. for (int j = 0; j < level; j++) {
  5529. std::cerr << " ";
  5530. }
  5531. std::cerr << ggml_op_name(tensor->op) << " gpu=" << (tensor->extra != nullptr) << std::endl;
  5532. done.push_back(tensor);
  5533. for (int i = 0; i < GGML_MAX_SRC; i++) {
  5534. if (tensor->src[i] != nullptr) {
  5535. ggml_vk_print_graph_origin(tensor->src[i], done, level + 1);
  5536. }
  5537. }
  5538. }
  5539. static void ggml_vk_print_tensor_area(const ggml_tensor * tensor, const void * data, int i0, int i1, int i2, int i3) {
  5540. if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16 && tensor->type != GGML_TYPE_I32) {
  5541. return;
  5542. }
  5543. i0 = std::max(i0, 5);
  5544. i1 = std::max(i1, 5);
  5545. i2 = std::max(i2, 0);
  5546. i3 = std::max(i3, 0);
  5547. fprintf(stderr, " ");
  5548. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  5549. fprintf(stderr, "%7d ", idx1);
  5550. }
  5551. fprintf(stderr, "\n");
  5552. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  5553. fprintf(stderr, "%7d: ", idx0);
  5554. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  5555. if (idx0 >= 0 && idx0 < tensor->ne[0] && idx1 >= 0 && idx1 < tensor->ne[1] && i2 >= 0 && i2 < tensor->ne[2] && i3 >= 0 && i3 < tensor->ne[3]) {
  5556. float val;
  5557. if (tensor->type == GGML_TYPE_F32) {
  5558. val = *(const float *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  5559. } else if (tensor->type == GGML_TYPE_F16) {
  5560. val = ggml_fp16_to_fp32(*(const ggml_fp16_t *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]));
  5561. } else if (tensor->type == GGML_TYPE_I32) {
  5562. val = *(const int32_t *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  5563. } else {
  5564. GGML_ASSERT(false);
  5565. }
  5566. fprintf(stderr, "% 7.2f ", val);
  5567. } else {
  5568. fprintf(stderr, " ");
  5569. }
  5570. }
  5571. fprintf(stderr, "\n");
  5572. }
  5573. }
  5574. static void ggml_vk_print_tensor(ggml_backend_vk_context * ctx, const ggml_tensor * tensor, const char * name) {
  5575. void * tensor_data = tensor->data;
  5576. if (ggml_backend_buffer_is_vk(tensor->buffer)) {
  5577. const size_t tensor_size = ggml_nbytes(tensor);
  5578. tensor_data = malloc(tensor_size);
  5579. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  5580. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  5581. ggml_vk_buffer_read(ctx, buffer_gpu, extra->offset + tensor->view_offs, tensor_data, tensor_size);
  5582. }
  5583. std::cerr << "TENSOR CHECK " << name << " (" << tensor->name << "): " << ggml_op_name(tensor->op) << std::endl;
  5584. std::cerr << "tensor=" << tensor << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << std::endl;
  5585. if (tensor->src[0] != nullptr) {
  5586. std::cerr << "tensor->src[0]=" << tensor->src[0] << " name=" << tensor->src[0]->name << " op=" << ggml_op_name(tensor->src[0]->op) << " type=" << ggml_type_name(tensor->src[0]->type) << " ne0=" << tensor->src[0]->ne[0] << " nb0=" << tensor->src[0]->nb[0] << " ne1=" << tensor->src[0]->ne[1] << " nb1=" << tensor->src[0]->nb[1] << " ne2=" << tensor->src[0]->ne[2] << " nb2=" << tensor->src[0]->nb[2] << " ne3=" << tensor->src[0]->ne[3] << " nb3=" << tensor->src[0]->nb[3] << std::endl;
  5587. }
  5588. if (tensor->src[1] != nullptr) {
  5589. std::cerr << "tensor->src[1]=" << tensor->src[1] << " name=" << tensor->src[1]->name << " op=" << ggml_op_name(tensor->src[1]->op) << " type=" << ggml_type_name(tensor->src[1]->type) << " ne0=" << tensor->src[1]->ne[0] << " nb0=" << tensor->src[1]->nb[0] << " ne1=" << tensor->src[1]->ne[1] << " nb1=" << tensor->src[1]->nb[1] << " ne2=" << tensor->src[1]->ne[2] << " nb2=" << tensor->src[1]->nb[2] << " ne3=" << tensor->src[1]->ne[3] << " nb3=" << tensor->src[1]->nb[3] << std::endl;
  5590. }
  5591. std::cerr << std::endl << "Result:" << std::endl;
  5592. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 0, 0);
  5593. std::cerr << std::endl;
  5594. std::cerr << std::endl << "Result:" << std::endl;
  5595. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 1, 0);
  5596. std::cerr << std::endl;
  5597. std::vector<const ggml_tensor *> done;
  5598. ggml_vk_print_graph_origin(tensor, done);
  5599. if (ggml_backend_buffer_is_vk(tensor->buffer)) {
  5600. free(tensor_data);
  5601. }
  5602. }
  5603. void * comp_result;
  5604. size_t comp_size;
  5605. size_t comp_nb[GGML_MAX_DIMS];
  5606. size_t check_counter = 0;
  5607. static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor) {
  5608. if (params->ith != 0) {
  5609. return;
  5610. }
  5611. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE || tensor->op == GGML_OP_TRANSPOSE) {
  5612. return;
  5613. }
  5614. check_counter++;
  5615. if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {
  5616. return;
  5617. }
  5618. VK_LOG_DEBUG("ggml_vk_check_results_0(" << tensor->name << ")");
  5619. ggml_tensor * src0 = tensor->src[0];
  5620. ggml_tensor * src1 = tensor->src[1];
  5621. ggml_tensor * src2 = tensor->src[2];
  5622. struct ggml_init_params iparams = {
  5623. /*.mem_size =*/ 1024*1024*1024,
  5624. /*.mem_buffer =*/ NULL,
  5625. /*.no_alloc =*/ false,
  5626. };
  5627. struct ggml_context * ggml_ctx = ggml_init(iparams);
  5628. struct ggml_tensor * src0_clone = nullptr;
  5629. struct ggml_tensor * src1_clone = nullptr;
  5630. struct ggml_tensor * src2_clone = nullptr;
  5631. struct ggml_tensor * tensor_clone = nullptr;
  5632. size_t src0_size;
  5633. size_t src1_size;
  5634. size_t src2_size;
  5635. void * src0_buffer = nullptr;
  5636. void * src1_buffer = nullptr;
  5637. void * src2_buffer = nullptr;
  5638. if (src0 != nullptr) {
  5639. src0_clone = ggml_dup_tensor(ggml_ctx, src0);
  5640. src0_size = ggml_nbytes(src0);
  5641. src0_buffer = malloc(src0_size);
  5642. src0_clone->data = src0_buffer;
  5643. if (ggml_backend_buffer_is_host(src0->buffer)) {
  5644. memcpy(src0_clone->data, src0->data, src0_size);
  5645. memcpy(src0_clone->nb, src0->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5646. } else if (ggml_backend_buffer_is_vk(src0->buffer)) {
  5647. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src0->extra;
  5648. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  5649. uint64_t offset = extra->offset + src0->view_offs;
  5650. if (!ggml_is_contiguous(src0) && ggml_vk_dim01_contiguous(src0)) {
  5651. for (int i3 = 0; i3 < src0->ne[3]; i3++) {
  5652. for (int i2 = 0; i2 < src0->ne[2]; i2++) {
  5653. const int idx = i3*src0->ne[2] + i2;
  5654. ggml_vk_buffer_read(ctx, buffer_gpu, offset + idx * src0->nb[2], ((char *)src0_clone->data + idx * src0_clone->nb[2]), src0->ne[1] * src0->nb[1]);
  5655. }
  5656. }
  5657. src0_clone->nb[0] = src0->nb[0];
  5658. src0_clone->nb[1] = src0->nb[1];
  5659. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  5660. src0_clone->nb[i] = src0_clone->nb[i - 1]*src0_clone->ne[i - 1];
  5661. }
  5662. } else {
  5663. if (offset + src0_size >= buffer_gpu->size) {
  5664. src0_size = buffer_gpu->size - offset;
  5665. }
  5666. ggml_vk_buffer_read(ctx, buffer_gpu, offset, src0_clone->data, src0_size);
  5667. memcpy(src0_clone->nb, src0->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5668. }
  5669. } else {
  5670. GGML_ASSERT(false);
  5671. }
  5672. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  5673. ggml_vk_print_tensor(ctx, src0, "src0");
  5674. }
  5675. }
  5676. if (src1 != nullptr) {
  5677. src1_clone = ggml_dup_tensor(ggml_ctx, src1);
  5678. src1_size = ggml_nbytes(src1);
  5679. src1_buffer = malloc(src1_size);
  5680. src1_clone->data = src1_buffer;
  5681. if (ggml_backend_buffer_is_host(src1->buffer)) {
  5682. memcpy(src1_clone->data, src1->data, src1_size);
  5683. memcpy(src1_clone->nb, src1->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5684. } else if (ggml_backend_buffer_is_vk(src1->buffer)) {
  5685. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src1->extra;
  5686. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  5687. uint64_t offset = extra->offset + src1->view_offs;
  5688. if (!ggml_is_contiguous(src1) && ggml_vk_dim01_contiguous(src1)) {
  5689. for (int i3 = 0; i3 < src1->ne[3]; i3++) {
  5690. for (int i2 = 0; i2 < src1->ne[2]; i2++) {
  5691. const int idx = i3*src1->ne[2] + i2;
  5692. ggml_vk_buffer_read(ctx, buffer_gpu, offset + idx * src1->nb[2], ((char *)src1_clone->data + idx * src1_clone->nb[2]), src1->ne[1] * src1->nb[1]);
  5693. }
  5694. }
  5695. src1_clone->nb[0] = src1->nb[0];
  5696. src1_clone->nb[1] = src1->nb[1];
  5697. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  5698. src1_clone->nb[i] = src1_clone->nb[i - 1]*src1_clone->ne[i - 1];
  5699. }
  5700. } else {
  5701. if (offset + src1_size >= buffer_gpu->size) {
  5702. src1_size = buffer_gpu->size - offset;
  5703. }
  5704. ggml_vk_buffer_read(ctx, buffer_gpu, offset, src1_clone->data, src1_size);
  5705. memcpy(src1_clone->nb, src1->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5706. }
  5707. } else {
  5708. GGML_ASSERT(false);
  5709. }
  5710. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  5711. ggml_vk_print_tensor(ctx, src1, "src1");
  5712. std::cerr << "TENSOR CHECK: " << ggml_op_name(src1_clone->op) << " (check " << check_counter << ")" << std::endl;
  5713. std::cerr << "src1_clone=" << tensor << " src1_clone->type: " << ggml_type_name(src1_clone->type) << " ne0=" << src1_clone->ne[0] << " nb0=" << src1_clone->nb[0] << " ne1=" << src1_clone->ne[1] << " nb1=" << src1_clone->nb[1] << " ne2=" << src1_clone->ne[2] << " nb2=" << src1_clone->nb[2] << " ne3=" << src1_clone->ne[3] << " nb3=" << src1_clone->nb[3] << std::endl;
  5714. if (src1->src[0] != nullptr) {
  5715. std::cerr << "src1->src[0]=" << src1->src[0] << " op=" << ggml_op_name(src1->src[0]->op) << " type=" << ggml_type_name(src1->src[0]->type) << " ne0=" << src1->src[0]->ne[0] << " nb0=" << src1->src[0]->nb[0] << " ne1=" << src1->src[0]->ne[1] << " nb1=" << src1->src[0]->nb[1] << " ne2=" << src1->src[0]->ne[2] << " nb2=" << src1->src[0]->nb[2] << " ne3=" << src1->src[0]->ne[3] << " nb3=" << src1->src[0]->nb[3] << std::endl;
  5716. }
  5717. if (src1->src[1] != nullptr) {
  5718. std::cerr << "src1->src[1]=" << src1->src[1] << " op=" << ggml_op_name(src1->src[1]->op) << " type=" << ggml_type_name(src1->src[1]->type) << " ne0=" << src1->src[1]->ne[0] << " nb0=" << src1->src[1]->nb[0] << " ne1=" << src1->src[1]->ne[1] << " nb1=" << src1->src[1]->nb[1] << " ne2=" << src1->src[1]->ne[2] << " nb2=" << src1->src[1]->nb[2] << " ne3=" << src1->src[1]->ne[3] << " nb3=" << src1->src[1]->nb[3] << std::endl;
  5719. }
  5720. std::cerr << std::endl << "Result:" << std::endl;
  5721. ggml_vk_print_tensor_area(src1_clone, src1_clone->data, 5, 5, 0, 0);
  5722. std::cerr << std::endl;
  5723. std::cerr << std::endl << "Result:" << std::endl;
  5724. ggml_vk_print_tensor_area(src1_clone, src1_clone->data, 5, 5, 1, 0);
  5725. std::cerr << std::endl;
  5726. std::vector<const ggml_tensor *> done;
  5727. ggml_vk_print_graph_origin(src1_clone, done);
  5728. }
  5729. }
  5730. if (src2 != nullptr) {
  5731. src2_clone = ggml_dup_tensor(ggml_ctx, src2);
  5732. src2_size = ggml_nbytes(src2);
  5733. src2_buffer = malloc(src2_size);
  5734. src2_clone->data = src2_buffer;
  5735. if (ggml_backend_buffer_is_host(src2->buffer)) {
  5736. memcpy(src2_clone->data, src2->data, src2_size);
  5737. memcpy(src2_clone->nb, src2->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5738. } else if (ggml_backend_buffer_is_vk(src2->buffer)) {
  5739. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src2->extra;
  5740. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  5741. uint64_t offset = extra->offset + src2->view_offs;
  5742. if (!ggml_is_contiguous(src2) && ggml_vk_dim01_contiguous(src2)) {
  5743. for (int i3 = 0; i3 < src2->ne[3]; i3++) {
  5744. for (int i2 = 0; i2 < src2->ne[2]; i2++) {
  5745. const int idx = i3*src2->ne[2] + i2;
  5746. ggml_vk_buffer_read(ctx, buffer_gpu, offset + idx * src2->nb[2], ((char *)src2_clone->data + idx * src2_clone->nb[2]), src2->ne[1] * src2->nb[1]);
  5747. }
  5748. }
  5749. src2_clone->nb[0] = src2->nb[0];
  5750. src2_clone->nb[1] = src2->nb[1];
  5751. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  5752. src2_clone->nb[i] = src2_clone->nb[i - 1]*src2_clone->ne[i - 1];
  5753. }
  5754. } else {
  5755. if (offset + src2_size >= buffer_gpu->size) {
  5756. src2_size = buffer_gpu->size - offset;
  5757. }
  5758. ggml_vk_buffer_read(ctx, buffer_gpu, offset, src2_clone->data, src2_size);
  5759. memcpy(src2_clone->nb, src2->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5760. }
  5761. } else {
  5762. GGML_ASSERT(false);
  5763. }
  5764. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  5765. ggml_vk_print_tensor(ctx, src2, "src2");
  5766. std::cerr << "TENSOR CHECK: " << ggml_op_name(src2_clone->op) << " (check " << check_counter << ")" << std::endl;
  5767. std::cerr << "src2_clone=" << tensor << " src2_clone->type: " << ggml_type_name(src2_clone->type) << " ne0=" << src2_clone->ne[0] << " nb0=" << src2_clone->nb[0] << " ne1=" << src2_clone->ne[1] << " nb1=" << src2_clone->nb[1] << " ne2=" << src2_clone->ne[2] << " nb2=" << src2_clone->nb[2] << " ne3=" << src2_clone->ne[3] << " nb3=" << src2_clone->nb[3] << std::endl;
  5768. if (src2->src[0] != nullptr) {
  5769. std::cerr << "src2->src[0]=" << src2->src[0] << " op=" << ggml_op_name(src2->src[0]->op) << " type=" << ggml_type_name(src2->src[0]->type) << " ne0=" << src2->src[0]->ne[0] << " nb0=" << src2->src[0]->nb[0] << " ne1=" << src2->src[0]->ne[1] << " nb1=" << src2->src[0]->nb[1] << " ne2=" << src2->src[0]->ne[2] << " nb2=" << src2->src[0]->nb[2] << " ne3=" << src2->src[0]->ne[3] << " nb3=" << src2->src[0]->nb[3] << std::endl;
  5770. }
  5771. if (src2->src[1] != nullptr) {
  5772. std::cerr << "src2->src[1]=" << src2->src[1] << " op=" << ggml_op_name(src2->src[1]->op) << " type=" << ggml_type_name(src2->src[1]->type) << " ne0=" << src2->src[1]->ne[0] << " nb0=" << src2->src[1]->nb[0] << " ne1=" << src2->src[1]->ne[1] << " nb1=" << src2->src[1]->nb[1] << " ne2=" << src2->src[1]->ne[2] << " nb2=" << src2->src[1]->nb[2] << " ne3=" << src2->src[1]->ne[3] << " nb3=" << src2->src[1]->nb[3] << std::endl;
  5773. }
  5774. std::cerr << std::endl << "Result:" << std::endl;
  5775. ggml_vk_print_tensor_area(src2_clone, src2_clone->data, 5, 5, 0, 0);
  5776. std::cerr << std::endl;
  5777. std::cerr << std::endl << "Result:" << std::endl;
  5778. ggml_vk_print_tensor_area(src2_clone, src2_clone->data, 5, 5, 1, 0);
  5779. std::cerr << std::endl;
  5780. std::vector<const ggml_tensor *> done;
  5781. ggml_vk_print_graph_origin(src2_clone, done);
  5782. }
  5783. }
  5784. if (tensor->op == GGML_OP_MUL_MAT) {
  5785. tensor_clone = ggml_mul_mat(ggml_ctx, src0_clone, src1_clone);
  5786. } else if (tensor->op == GGML_OP_MUL_MAT_ID) {
  5787. tensor_clone = ggml_mul_mat_id(ggml_ctx, src0_clone, src1_clone, src2_clone);
  5788. } else if (tensor->op == GGML_OP_MUL) {
  5789. tensor_clone = ggml_mul(ggml_ctx, src0_clone, src1_clone);
  5790. } else if (tensor->op == GGML_OP_DIV) {
  5791. tensor_clone = ggml_div(ggml_ctx, src0_clone, src1_clone);
  5792. } else if (tensor->op == GGML_OP_SCALE) {
  5793. tensor_clone = ggml_scale(ggml_ctx, src0_clone, ((float *)tensor->op_params)[0]);
  5794. } else if (tensor->op == GGML_OP_SQR) {
  5795. tensor_clone = ggml_sqr(ggml_ctx, src0_clone);
  5796. } else if (tensor->op == GGML_OP_CLAMP) {
  5797. tensor_clone = ggml_clamp(ggml_ctx, src0_clone, ((float *)tensor->op_params)[0], ((float *)tensor->op_params)[1]);
  5798. } else if (tensor->op == GGML_OP_ADD) {
  5799. tensor_clone = ggml_add(ggml_ctx, src0_clone, src1_clone);
  5800. } else if (tensor->op == GGML_OP_NORM) {
  5801. tensor_clone = ggml_norm(ggml_ctx, src0_clone, *(float *)tensor->op_params);
  5802. } else if (tensor->op == GGML_OP_RMS_NORM) {
  5803. tensor_clone = ggml_rms_norm(ggml_ctx, src0_clone, *(float *)tensor->op_params);
  5804. } else if (tensor->op == GGML_OP_SOFT_MAX) {
  5805. if (src1 != nullptr) {
  5806. tensor_clone = ggml_soft_max_ext(ggml_ctx, src0_clone, src1_clone, ((float *)tensor->op_params)[0], ((float *)tensor->op_params)[1]);
  5807. } else {
  5808. tensor_clone = ggml_soft_max(ggml_ctx, src0_clone);
  5809. }
  5810. } else if (tensor->op == GGML_OP_DIAG_MASK_INF) {
  5811. tensor_clone = ggml_diag_mask_inf(ggml_ctx, src0_clone, *(int *)tensor->op_params);
  5812. } else if (tensor->op == GGML_OP_ROPE) {
  5813. const int n_dims = ((int32_t *) tensor->op_params)[1];
  5814. const int mode = ((int32_t *) tensor->op_params)[2];
  5815. //const int n_ctx_ggml = ((int32_t *) tensor->op_params)[3];
  5816. const int n_ctx_orig_ggml = ((int32_t *) tensor->op_params)[4];
  5817. float freq_base = ((float *) tensor->op_params)[5];
  5818. float freq_scale = ((float *) tensor->op_params)[6];
  5819. float ext_factor = ((float *) tensor->op_params)[7];
  5820. float attn_factor = ((float *) tensor->op_params)[8];
  5821. float beta_fast = ((float *) tensor->op_params)[9];
  5822. float beta_slow = ((float *) tensor->op_params)[10];
  5823. tensor_clone = ggml_rope_ext(ggml_ctx, src0_clone, src1_clone, src2_clone, n_dims, mode, n_ctx_orig_ggml, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
  5824. } else if (tensor->op == GGML_OP_UNARY) {
  5825. switch (ggml_get_unary_op(tensor)) {
  5826. case GGML_UNARY_OP_SILU:
  5827. tensor_clone = ggml_silu(ggml_ctx, src0_clone);
  5828. break;
  5829. case GGML_UNARY_OP_GELU:
  5830. tensor_clone = ggml_gelu(ggml_ctx, src0_clone);
  5831. break;
  5832. case GGML_UNARY_OP_RELU:
  5833. tensor_clone = ggml_relu(ggml_ctx, src0_clone);
  5834. break;
  5835. default:
  5836. std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
  5837. GGML_ASSERT(false);
  5838. }
  5839. } else if (tensor->op == GGML_OP_CPY || tensor->op == GGML_OP_DUP) {
  5840. if (src1 == nullptr) {
  5841. tensor_clone = ggml_dup(ggml_ctx, src0_clone);
  5842. tensor_clone->type = tensor->type;
  5843. } else {
  5844. tensor_clone = ggml_cpy(ggml_ctx, src0_clone, src1_clone);
  5845. }
  5846. } else if (tensor->op == GGML_OP_CONT) {
  5847. tensor_clone = ggml_cont_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  5848. } else if (tensor->op == GGML_OP_RESHAPE) {
  5849. tensor_clone = ggml_reshape_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  5850. } else if (tensor->op == GGML_OP_VIEW) {
  5851. tensor_clone = ggml_view_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], tensor->nb[1], tensor->nb[2], tensor->nb[3], ((int32_t *) tensor->op_params)[0]);
  5852. } else if (tensor->op == GGML_OP_PERMUTE) {
  5853. int32_t * params = (int32_t *)tensor->op_params;
  5854. tensor_clone = ggml_permute(ggml_ctx, src0_clone, params[0], params[1], params[2], params[3]);
  5855. } else if (tensor->op == GGML_OP_TRANSPOSE) {
  5856. tensor_clone = ggml_transpose(ggml_ctx, src0_clone);
  5857. } else if (tensor->op == GGML_OP_GET_ROWS) {
  5858. tensor_clone = ggml_get_rows(ggml_ctx, src0_clone, src1_clone);
  5859. } else if (tensor->op == GGML_OP_ARGSORT) {
  5860. tensor_clone = ggml_argsort(ggml_ctx, src0_clone, (ggml_sort_order) *(int *)tensor->op_params);
  5861. } else if (tensor->op == GGML_OP_SUM_ROWS) {
  5862. tensor_clone = ggml_sum_rows(ggml_ctx, src0_clone);
  5863. } else {
  5864. std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
  5865. GGML_ASSERT(false);
  5866. }
  5867. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  5868. ggml_build_forward_expand(cgraph, tensor_clone);
  5869. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 8);
  5870. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  5871. ggml_vk_print_tensor(ctx, tensor_clone, "tensor_clone");
  5872. }
  5873. comp_size = ggml_nbytes(tensor_clone);
  5874. comp_result = malloc(comp_size);
  5875. memcpy(comp_result, tensor_clone->data, comp_size);
  5876. memcpy(comp_nb, tensor_clone->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5877. if (src0 != nullptr) {
  5878. free(src0_buffer);
  5879. }
  5880. if (src1 != nullptr) {
  5881. free(src1_buffer);
  5882. }
  5883. ggml_free(ggml_ctx);
  5884. }
  5885. static void ggml_vk_check_results_1(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor) {
  5886. if (params->ith != 0) {
  5887. return;
  5888. }
  5889. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE || tensor->op == GGML_OP_TRANSPOSE) {
  5890. return;
  5891. }
  5892. if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {
  5893. return;
  5894. }
  5895. VK_LOG_DEBUG("ggml_vk_check_results_1(" << tensor->name << ")");
  5896. ggml_tensor * src0 = tensor->src[0];
  5897. ggml_tensor * src1 = tensor->src[1];
  5898. ggml_tensor * src2 = tensor->src[2];
  5899. void * tensor_data = tensor->data;
  5900. if (ggml_backend_buffer_is_vk(tensor->buffer)) {
  5901. size_t tensor_size = ggml_nbytes(tensor);
  5902. tensor_data = malloc(tensor_size);
  5903. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  5904. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  5905. if (extra->offset + tensor->view_offs + tensor_size >= buffer_gpu->size) {
  5906. tensor_size = buffer_gpu->size - (extra->offset + tensor->view_offs);
  5907. }
  5908. ggml_vk_buffer_read(ctx, buffer_gpu, extra->offset + tensor->view_offs, tensor_data, tensor_size);
  5909. }
  5910. float first_error_result = -1.0f;
  5911. float first_error_correct = -1.0f;
  5912. std::array<int, 4> first_error = { -1, -1, -1, -1 };
  5913. double avg_err = 0.0;
  5914. size_t counter = 0;
  5915. for (int i3 = 0; i3 < tensor->ne[3]; i3++) {
  5916. for (int i2 = 0; i2 < tensor->ne[2]; i2++) {
  5917. for (int i1 = 0; i1 < tensor->ne[1]; i1++) {
  5918. for (int i0 = 0; i0 < tensor->ne[0]; i0++) {
  5919. const bool buffer_size_fit = i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0] < comp_size;
  5920. float correct = 0.0f;
  5921. float result = 0.0f;
  5922. if (buffer_size_fit) {
  5923. if (tensor->type == GGML_TYPE_F32) {
  5924. correct = *(float *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
  5925. result = *(float *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  5926. } else if (tensor->type == GGML_TYPE_F16) {
  5927. correct = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]));
  5928. result = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]));
  5929. } else if (tensor->type == GGML_TYPE_I32) {
  5930. correct = *(int32_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
  5931. result = *(int32_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  5932. } else {
  5933. std::cerr << "Results check not implemented for type " << ggml_type_name(tensor->type) << std::endl;
  5934. }
  5935. } else {
  5936. std::cerr << "Missing debug code for type " << ggml_type_name(tensor->type) << std::endl;
  5937. GGML_ASSERT(false);
  5938. }
  5939. if ((std::isnan(correct) != std::isnan(result)) || (std::isinf(correct) != std::isinf(result)) || !buffer_size_fit) {
  5940. std::cerr << "ERROR: Invalid value in " << ggml_op_name(tensor->op) << " i3=" << i3 << " i2=" << i2 << " i1=" << i1 << " i0=" << i0 << " result=" << result << " correct=" << correct << " avg_err=" << (avg_err / counter) << std::endl;
  5941. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  5942. if (src0 != nullptr) {
  5943. std::cerr << "src0=" << src0 << " src0->name=" << src0->name << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  5944. }
  5945. if (src1 != nullptr) {
  5946. std::cerr << "src1=" << src1 << " src1->name=" << src1->name << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  5947. }
  5948. if (src2 != nullptr) {
  5949. std::cerr << "src2=" << src2 << " src2->name=" << src2->name << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  5950. }
  5951. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  5952. std::cerr << std::endl << "Result:" << std::endl;
  5953. ggml_vk_print_tensor_area(tensor, tensor_data, i0, i1, i2, i3);
  5954. std::cerr << std::endl << "Correct:" << std::endl;
  5955. ggml_vk_print_tensor_area(tensor, comp_result, i0, i1, i2, i3);
  5956. std::cerr << std::endl;
  5957. std::vector<const ggml_tensor *> done;
  5958. ggml_vk_print_graph_origin(tensor, done);
  5959. GGML_ASSERT(false);
  5960. }
  5961. if (first_error[0] == -1 && std::fabs(correct - result) > 0.1f) {
  5962. first_error[0] = i0;
  5963. first_error[1] = i1;
  5964. first_error[2] = i2;
  5965. first_error[3] = i3;
  5966. first_error_result = result;
  5967. first_error_correct = correct;
  5968. }
  5969. // Special case, value is infinite, avoid NaN result in avg_err
  5970. // NaN also appears in results, if both are nan error is 0
  5971. if (!std::isinf(correct) && !std::isinf(result) && !std::isnan(correct) && !std::isnan(result)) {
  5972. avg_err += std::fabs(correct - result);
  5973. }
  5974. counter++;
  5975. }
  5976. }
  5977. }
  5978. }
  5979. avg_err /= counter;
  5980. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  5981. std::cerr << "TENSOR CHECK: avg_err=" << avg_err << " in " << ggml_op_name(tensor->op) << " (check " << check_counter << ")" << std::endl;
  5982. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  5983. if (src0 != nullptr) {
  5984. std::cerr << "src0=" << src0 << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  5985. }
  5986. if (src1 != nullptr) {
  5987. std::cerr << "src1=" << src1 << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  5988. }
  5989. if (src2 != nullptr) {
  5990. std::cerr << "src2=" << src2 << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  5991. }
  5992. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  5993. std::cerr << std::endl << "Result:" << std::endl;
  5994. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 0, 0);
  5995. std::cerr << std::endl << "Correct:" << std::endl;
  5996. ggml_vk_print_tensor_area(tensor, comp_result, 5, 5, 0, 0);
  5997. std::cerr << std::endl;
  5998. std::cerr << std::endl << "Result:" << std::endl;
  5999. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 1, 0);
  6000. std::cerr << std::endl << "Correct:" << std::endl;
  6001. ggml_vk_print_tensor_area(tensor, comp_result, 5, 5, 1, 0);
  6002. std::cerr << std::endl;
  6003. std::vector<const ggml_tensor *> done;
  6004. ggml_vk_print_graph_origin(tensor, done);
  6005. }
  6006. if (avg_err > 0.05 || std::isnan(avg_err)) {
  6007. std::cerr << "ERROR: avg_err=" << avg_err << " in " << ggml_op_name(tensor->op) << " (check " << check_counter << ")" << std::endl;
  6008. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  6009. if (src0 != nullptr) {
  6010. std::cerr << "src0=" << src0 << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  6011. }
  6012. if (src1 != nullptr) {
  6013. std::cerr << "src1=" << src1 << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  6014. }
  6015. if (src2 != nullptr) {
  6016. std::cerr << "src2=" << src2 << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  6017. }
  6018. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  6019. std::cerr << std::endl << "Result:" << std::endl;
  6020. ggml_vk_print_tensor_area(tensor, tensor_data, first_error[0], first_error[1], first_error[2], first_error[3]);
  6021. std::cerr << std::endl << "Correct:" << std::endl;
  6022. ggml_vk_print_tensor_area(tensor, comp_result, first_error[0], first_error[1], first_error[2], first_error[3]);
  6023. std::cerr << std::endl;
  6024. std::vector<const ggml_tensor *> done;
  6025. ggml_vk_print_graph_origin(tensor, done);
  6026. GGML_ASSERT(false);
  6027. } else {
  6028. std::cerr << check_counter << " " << tensor->name << " op=" << ggml_op_name(tensor->op) << " avg_err=" << avg_err << std::endl;
  6029. }
  6030. free(comp_result);
  6031. comp_result = nullptr;
  6032. comp_size = 0;
  6033. if (ggml_backend_buffer_is_vk(tensor->buffer)) {
  6034. free(tensor_data);
  6035. }
  6036. }
  6037. #endif