ggml-sycl.cpp 515 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337
  1. //
  2. // MIT license
  3. // Copyright (C) 2024 Intel Corporation
  4. // SPDX-License-Identifier: MIT
  5. //
  6. //
  7. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  8. // See https://llvm.org/LICENSE.txt for license information.
  9. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  10. //
  11. #include <algorithm>
  12. #include <assert.h>
  13. #include <atomic>
  14. #include <cinttypes>
  15. #include <cstddef>
  16. #include <cstdint>
  17. #include <cstdlib>
  18. #include <float.h>
  19. #include <limits>
  20. #include <stdint.h>
  21. #include <stdio.h>
  22. #include <vector>
  23. #include <cmath>
  24. #include <iostream>
  25. #include <fstream>
  26. #include <stdio.h>
  27. #include <stdlib.h>
  28. #include <regex>
  29. #include <sycl/sycl.hpp>
  30. #include <sycl/half_type.hpp>
  31. #include "ggml-sycl.h"
  32. #include "ggml.h"
  33. #include "ggml-backend-impl.h"
  34. #include "ggml-sycl/backend.hpp"
  35. /*
  36. Following definition copied from DPCT head files, which are used by ggml-sycl.cpp
  37. */
  38. // COPY from DPCT head files
  39. #include <sycl/sycl.hpp>
  40. #include <oneapi/mkl.hpp>
  41. #include <map>
  42. #if defined(__linux__)
  43. #include <sys/mman.h>
  44. #elif defined(_WIN64)
  45. #ifndef NOMINMAX
  46. #define NOMINMAX
  47. #endif
  48. #include <windows.h>
  49. #else
  50. #error "Only support Windows and Linux."
  51. #endif
  52. #if defined(__linux__)
  53. #include <unistd.h>
  54. #include <sys/syscall.h>
  55. #endif
  56. #if defined(_WIN64)
  57. #ifndef NOMINMAX
  58. #define NOMINMAX
  59. #endif
  60. #include <windows.h>
  61. #endif
  62. #define DPCT_COMPATIBILITY_TEMP (900)
  63. #if defined(_MSC_VER)
  64. #define __dpct_align__(n) __declspec(align(n))
  65. #define __dpct_inline__ __forceinline
  66. #else
  67. #define __dpct_align__(n) __attribute__((aligned(n)))
  68. #define __dpct_inline__ __inline__ __attribute__((always_inline))
  69. #endif
  70. #if defined(_MSC_VER)
  71. #define __dpct_noinline__ __declspec(noinline)
  72. #else
  73. #define __dpct_noinline__ __attribute__((noinline))
  74. #endif
  75. bool ggml_sycl_loaded(void);
  76. void ggml_sycl_free_data(struct ggml_tensor * tensor);
  77. void ggml_sycl_assign_buffers(struct ggml_tensor * tensor);
  78. void ggml_sycl_assign_buffers_no_scratch(struct ggml_tensor * tensor);
  79. void ggml_sycl_assign_buffers_force_inplace(struct ggml_tensor * tensor);
  80. void ggml_sycl_assign_buffers_no_alloc(struct ggml_tensor * tensor);
  81. void ggml_sycl_copy_to_device(struct ggml_tensor * tensor);
  82. void ggml_sycl_set_main_device(int main_device);
  83. void ggml_sycl_set_mul_mat_q(bool mul_mat_q);
  84. void ggml_sycl_set_scratch_size(size_t scratch_size);
  85. void ggml_sycl_free_scratch(void);
  86. void ggml_sycl_get_device_description(int device, char * description, size_t description_size);
  87. bool ggml_backend_is_sycl(ggml_backend_t backend);
  88. int ggml_backend_sycl_get_device(ggml_backend_t backend);
  89. static bool ggml_backend_buffer_is_sycl_split(ggml_backend_buffer_t buffer);
  90. void dev2dev_memcpy(sycl::queue &q_dst, sycl::queue &q_src, void *ptr_dst,
  91. const void *ptr_src, size_t size) {
  92. char *host_buf = (char *)malloc(size);
  93. q_src.memcpy(host_buf, (const char *)ptr_src, size).wait();
  94. q_dst.memcpy((char *)ptr_dst, host_buf, size).wait();
  95. free(host_buf);
  96. }
  97. static __dpct_inline__ int get_int_from_int8(const int8_t *x8, const int &i32) {
  98. const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment
  99. int x32 = 0;
  100. x32 |= x16[0] << 0;
  101. x32 |= x16[1] << 16;
  102. return x32;
  103. }
  104. static __dpct_inline__ int get_int_from_uint8(const uint8_t *x8,
  105. const int &i32) {
  106. const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment
  107. int x32 = 0;
  108. x32 |= x16[0] << 0;
  109. x32 |= x16[1] << 16;
  110. return x32;
  111. }
  112. static __dpct_inline__ int get_int_from_int8_aligned(const int8_t *x8,
  113. const int &i32) {
  114. return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
  115. }
  116. static __dpct_inline__ int get_int_from_uint8_aligned(const uint8_t *x8,
  117. const int &i32) {
  118. return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
  119. }
  120. template <typename T>
  121. using to_t_sycl_t = void (*)(const void *__restrict__ x, T *__restrict__ y,
  122. int k, queue_ptr stream);
  123. typedef to_t_sycl_t<float> to_fp32_sycl_t;
  124. typedef to_t_sycl_t<sycl::half> to_fp16_sycl_t;
  125. typedef void (*dequantize_kernel_t)(const void * vx, const int ib, const int iqs, dfloat2 & v);
  126. typedef void (*dot_kernel_k_t)(const void * __restrict__ vx, const int ib, const int iqs, const float * __restrict__ y, float & v);
  127. typedef void (*cpy_kernel_t)(const char * cx, char * cdst);
  128. typedef void (*ggml_sycl_func_t)(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
  129. typedef void (*ggml_sycl_op_mul_mat_t)(
  130. ggml_backend_sycl_context & ctx,
  131. const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst,
  132. const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i,
  133. float *dst_dd_i, const int64_t row_low, const int64_t row_high,
  134. const int64_t src1_ncols, const int64_t src1_padded_row_size,
  135. const queue_ptr &stream);
  136. typedef void (*ggml_sycl_op_flatten_t)(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  137. const ggml_tensor *src1,
  138. ggml_tensor *dst, const float *src0_dd,
  139. const float *src1_dd, float *dst_dd,
  140. const queue_ptr &main_stream);
  141. typedef float (*vec_dot_q_sycl_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs);
  142. typedef void (*allocate_tiles_sycl_t)(int **x_ql, sycl::half2 **x_dm,
  143. int **x_qh, int **x_sc);
  144. typedef void (*load_tiles_sycl_t)(const void *__restrict__ vx,
  145. int *__restrict__ x_ql,
  146. sycl::half2 *__restrict__ x_dm,
  147. int *__restrict__ x_qh,
  148. int *__restrict__ x_sc, const int &i_offset,
  149. const int &i_max, const int &k,
  150. const int &blocks_per_row);
  151. typedef float (*vec_dot_q_mul_mat_sycl_t)(
  152. const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
  153. const int *__restrict__ x_qh, const int *__restrict__ x_sc,
  154. const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ms,
  155. const int &i, const int &j, const int &k);
  156. static __dpct_inline__ float warp_reduce_sum(float x,
  157. const sycl::nd_item<3> &item_ct1) {
  158. #pragma unroll
  159. for (int mask = 16; mask > 0; mask >>= 1) {
  160. /*
  161. DPCT1096:98: The right-most dimension of the work-group used in the SYCL
  162. kernel that calls this function may be less than "32". The function
  163. "dpct::permute_sub_group_by_xor" may return an unexpected result on the
  164. CPU device. Modify the size of the work-group to ensure that the value
  165. of the right-most dimension is a multiple of "32".
  166. */
  167. x += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), x, mask);
  168. }
  169. return x;
  170. }
  171. static __dpct_inline__ sycl::float2
  172. warp_reduce_sum(sycl::float2 a, const sycl::nd_item<3> &item_ct1) {
  173. #pragma unroll
  174. for (int mask = 16; mask > 0; mask >>= 1) {
  175. a.x() += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), a.x(),
  176. mask);
  177. a.y() += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), a.y(),
  178. mask);
  179. }
  180. return a;
  181. }
  182. static __dpct_inline__ float warp_reduce_max(float x,
  183. const sycl::nd_item<3> &item_ct1) {
  184. #pragma unroll
  185. for (int mask = 16; mask > 0; mask >>= 1) {
  186. /*
  187. DPCT1096:97: The right-most dimension of the work-group used in the SYCL
  188. kernel that calls this function may be less than "32". The function
  189. "dpct::permute_sub_group_by_xor" may return an unexpected result on the
  190. CPU device. Modify the size of the work-group to ensure that the value
  191. of the right-most dimension is a multiple of "32".
  192. */
  193. x = sycl::fmax(x, dpct::permute_sub_group_by_xor(
  194. item_ct1.get_sub_group(), x, mask));
  195. }
  196. return x;
  197. }
  198. static __dpct_inline__ float op_repeat(const float a, const float b) {
  199. return b;
  200. GGML_UNUSED(a);
  201. }
  202. static __dpct_inline__ float op_add(const float a, const float b) {
  203. return a + b;
  204. }
  205. static __dpct_inline__ float op_mul(const float a, const float b) {
  206. return a * b;
  207. }
  208. static __dpct_inline__ float op_div(const float a, const float b) {
  209. return a / b;
  210. }
  211. template<float (*bin_op)(const float, const float), typename src0_t, typename src1_t, typename dst_t>
  212. static void k_bin_bcast(const src0_t * src0, const src1_t * src1, dst_t * dst,
  213. int ne0, int ne1, int ne2, int ne3,
  214. int ne10, int ne11, int ne12, int ne13,
  215. /*int s0, */ int s1, int s2, int s3,
  216. /*int s10,*/ int s11, int s12, int s13,
  217. const sycl::nd_item<3> &item_ct1) {
  218. const int i0s = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  219. item_ct1.get_local_id(2);
  220. const int i1 = (item_ct1.get_local_range(1) * item_ct1.get_group(1) +
  221. item_ct1.get_local_id(1));
  222. const int i2 = (item_ct1.get_local_range(0) * item_ct1.get_group(0) +
  223. item_ct1.get_local_id(0)) /
  224. ne3;
  225. const int i3 = (item_ct1.get_local_range(0) * item_ct1.get_group(0) +
  226. item_ct1.get_local_id(0)) %
  227. ne3;
  228. if (i0s >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) {
  229. return;
  230. }
  231. const int i11 = i1 % ne11;
  232. const int i12 = i2 % ne12;
  233. const int i13 = i3 % ne13;
  234. const size_t i_src0 = i3*s3 + i2*s2 + i1*s1;
  235. const size_t i_src1 = i13*s13 + i12*s12 + i11*s11;
  236. const size_t i_dst = i_src0;
  237. const src0_t * src0_row = src0 + i_src0;
  238. const src1_t * src1_row = src1 + i_src1;
  239. dst_t * dst_row = dst + i_dst;
  240. for (int i0 = i0s; i0 < ne0;
  241. i0 += item_ct1.get_local_range(2) * item_ct1.get_group_range(2)) {
  242. const int i10 = i0 % ne10;
  243. dst_row[i0] = (dst_t)bin_op(src0 ? (float)src0_row[i0] : 0.0f, (float)src1_row[i10]);
  244. }
  245. }
  246. template<float (*bin_op)(const float, const float), typename src0_t, typename src1_t, typename dst_t>
  247. static void k_bin_bcast_unravel(const src0_t * src0, const src1_t * src1, dst_t * dst,
  248. int ne0, int ne1, int ne2, int ne3,
  249. int ne10, int ne11, int ne12, int ne13,
  250. /*int s0, */ int s1, int s2, int s3,
  251. /*int s10,*/ int s11, int s12, int s13,
  252. const sycl::nd_item<3> &item_ct1) {
  253. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  254. item_ct1.get_local_id(2);
  255. const int i3 = i/(ne2*ne1*ne0);
  256. const int i2 = (i/(ne1*ne0)) % ne2;
  257. const int i1 = (i/ne0) % ne1;
  258. const int i0 = i % ne0;
  259. if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) {
  260. return;
  261. }
  262. const int i11 = i1 % ne11;
  263. const int i12 = i2 % ne12;
  264. const int i13 = i3 % ne13;
  265. const size_t i_src0 = i3*s3 + i2*s2 + i1*s1;
  266. const size_t i_src1 = i13*s13 + i12*s12 + i11*s11;
  267. const size_t i_dst = i_src0;
  268. const src0_t * src0_row = src0 + i_src0;
  269. const src1_t * src1_row = src1 + i_src1;
  270. dst_t * dst_row = dst + i_dst;
  271. const int i10 = i0 % ne10;
  272. dst_row[i0] = (dst_t)bin_op(src0 ? (float)src0_row[i0] : 0.0f, (float)src1_row[i10]);
  273. }
  274. static void acc_f32(const float * x, const float * y, float * dst, const int ne,
  275. const int ne10, const int ne11, const int ne12,
  276. const int nb1, const int nb2, int offset, const sycl::nd_item<3> &item_ct1) {
  277. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  278. item_ct1.get_local_id(2);
  279. if (i >= ne) {
  280. return;
  281. }
  282. int src1_idx = i - offset;
  283. int oz = src1_idx / nb2;
  284. int oy = (src1_idx - (oz * nb2)) / nb1;
  285. int ox = src1_idx % nb1;
  286. if (src1_idx >= 0 && ox < ne10 && oy < ne11 && oz < ne12) {
  287. dst[i] = x[i] + y[ox + oy * ne10 + oz * ne10 * ne11];
  288. } else {
  289. dst[i] = x[i];
  290. }
  291. }
  292. static void gelu_f32(const float * x, float * dst, const int k,
  293. const sycl::nd_item<3> &item_ct1) {
  294. const float GELU_COEF_A = 0.044715f;
  295. const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  296. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  297. item_ct1.get_local_id(2);
  298. if (i >= k) {
  299. return;
  300. }
  301. float xi = x[i];
  302. dst[i] = 0.5f * xi *
  303. (1.0f +
  304. sycl::tanh(SQRT_2_OVER_PI * xi * (1.0f + GELU_COEF_A * xi * xi)));
  305. }
  306. static void silu_f32(const float * x, float * dst, const int k,
  307. const sycl::nd_item<3> &item_ct1) {
  308. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  309. item_ct1.get_local_id(2);
  310. if (i >= k) {
  311. return;
  312. }
  313. dst[i] = x[i] / (1.0f + sycl::native::exp(-x[i]));
  314. }
  315. static void gelu_quick_f32(const float *x, float *dst, int k,
  316. const sycl::nd_item<3> &item_ct1) {
  317. const float GELU_QUICK_COEF = -1.702f;
  318. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  319. item_ct1.get_local_id(2);
  320. if (i >= k) {
  321. return;
  322. }
  323. dst[i] = x[i] * (1.0f / (1.0f + sycl::native::exp(GELU_QUICK_COEF * x[i])));
  324. }
  325. static void tanh_f32(const float *x, float *dst, int k,
  326. const sycl::nd_item<3> &item_ct1) {
  327. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  328. item_ct1.get_local_id(2);
  329. if (i >= k) {
  330. return;
  331. }
  332. dst[i] = sycl::tanh((float)(x[i]));
  333. }
  334. static void relu_f32(const float * x, float * dst, const int k,
  335. const sycl::nd_item<3> &item_ct1) {
  336. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  337. item_ct1.get_local_id(2);
  338. if (i >= k) {
  339. return;
  340. }
  341. dst[i] = sycl::fmax((float)(x[i]), (float)0);
  342. }
  343. static void hardsigmoid_f32(const float * x, float * dst, const int k,
  344. const sycl::nd_item<3> &item_ct1) {
  345. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  346. item_ct1.get_local_id(2);
  347. if (i >= k) {
  348. return;
  349. }
  350. dst[i] = sycl::fmin(1.0f, sycl::fmax(0.0f, (x[i] + 3.0f) / 6.0f));
  351. }
  352. static void hardswish_f32(const float * x, float * dst, const int k,
  353. const sycl::nd_item<3> &item_ct1) {
  354. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  355. item_ct1.get_local_id(2);
  356. if (i >= k) {
  357. return;
  358. }
  359. dst[i] = x[i] * sycl::fmin(1.0f, sycl::fmax(0.0f, (x[i] + 3.0f) / 6.0f));
  360. }
  361. static void leaky_relu_f32(const float *x, float *dst, const int k, const float negative_slope,
  362. const sycl::nd_item<3> &item_ct1) {
  363. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  364. item_ct1.get_local_id(2);
  365. if (i >= k) {
  366. return;
  367. }
  368. dst[i] = sycl::fmax((float)(x[i]), (float)0) +
  369. sycl::fmin((float)(x[i]), 0.0f) * negative_slope;
  370. }
  371. static void sqr_f32(const float * x, float * dst, const int k,
  372. const sycl::nd_item<3> &item_ct1) {
  373. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  374. item_ct1.get_local_id(2);
  375. if (i >= k) {
  376. return;
  377. }
  378. dst[i] = x[i] * x[i];
  379. }
  380. static void norm_f32(const float * x, float * dst, const int ncols, const float eps,
  381. const sycl::nd_item<3> &item_ct1, sycl::float2 *s_sum, int block_size) {
  382. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  383. item_ct1.get_local_id(1);
  384. const int tid = item_ct1.get_local_id(2);
  385. sycl::float2 mean_var = sycl::float2(0.f, 0.f);
  386. for (int col = tid; col < ncols; col += block_size) {
  387. const float xi = x[row*ncols + col];
  388. mean_var.x() += xi;
  389. mean_var.y() += xi * xi;
  390. }
  391. // sum up partial sums
  392. mean_var = warp_reduce_sum(mean_var, item_ct1);
  393. if (block_size > WARP_SIZE) {
  394. int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
  395. int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
  396. if (lane_id == 0) {
  397. s_sum[warp_id] = mean_var;
  398. }
  399. /*
  400. DPCT1118:0: SYCL group functions and algorithms must be encountered in
  401. converged control flow. You may need to adjust the code.
  402. */
  403. item_ct1.barrier(sycl::access::fence_space::local_space);
  404. mean_var = s_sum[lane_id];
  405. mean_var = warp_reduce_sum(mean_var, item_ct1);
  406. }
  407. const float mean = mean_var.x() / ncols;
  408. const float var = mean_var.y() / ncols - mean * mean;
  409. const float inv_std = sycl::rsqrt(var + eps);
  410. for (int col = tid; col < ncols; col += block_size) {
  411. dst[row*ncols + col] = (x[row*ncols + col] - mean) * inv_std;
  412. }
  413. }
  414. static void concat_f32(const float *x,const float *y, float *dst, const int ne0, const int ne02,
  415. const sycl::nd_item<3> &item_ct1) {
  416. int nidx = item_ct1.get_local_id(2) +
  417. item_ct1.get_group(2) * item_ct1.get_local_range(2);
  418. if (nidx >= ne0) {
  419. return;
  420. }
  421. // operation
  422. int offset_dst = nidx + item_ct1.get_group(1) * ne0 +
  423. item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1);
  424. if (item_ct1.get_group(0) < ne02) { // src0
  425. int offset_src =
  426. nidx + item_ct1.get_group(1) * ne0 +
  427. item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1);
  428. dst[offset_dst] = x[offset_src];
  429. } else {
  430. int offset_src =
  431. nidx + item_ct1.get_group(1) * ne0 +
  432. (item_ct1.get_group(0) - ne02) * ne0 * item_ct1.get_group_range(1);
  433. dst[offset_dst] = y[offset_src];
  434. }
  435. }
  436. static void upscale_f32(const float *x, float *dst, const int nb00, const int nb01,
  437. const int nb02, const int nb03, const int ne10, const int ne11,
  438. const int ne12, const int ne13, const float sf0, const float sf1,
  439. const float sf2, const float sf3, const sycl::nd_item<1> &item_ct1) {
  440. int index = item_ct1.get_local_id(0) +
  441. item_ct1.get_group(0) * item_ct1.get_local_range(0);
  442. if (index >= ne10 * ne11 * ne12 * ne13) {
  443. return;
  444. }
  445. // operation
  446. int i10 = index % ne10;
  447. int i11 = (index / ne10) % ne11;
  448. int i12 = (index / (ne10 * ne11)) % ne12;
  449. int i13 = (index / (ne10 * ne11 * ne12)) % ne13;
  450. int i00 = i10 / sf0;
  451. int i01 = i11 / sf1;
  452. int i02 = i12 / sf2;
  453. int i03 = i13 / sf3;
  454. dst[index] = *(float *)((char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00);
  455. }
  456. static void pad_f32(const float *x, float *dst, const int ne0, const int ne00, const int ne01, const int ne02,
  457. const sycl::nd_item<3> &item_ct1) {
  458. int nidx = item_ct1.get_local_id(2) +
  459. item_ct1.get_group(2) * item_ct1.get_local_range(2);
  460. if (nidx >= ne0) {
  461. return;
  462. }
  463. // operation
  464. int offset_dst = nidx + item_ct1.get_group(1) * ne0 +
  465. item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1);
  466. if (nidx < ne00 && item_ct1.get_group(1) < ne01 &&
  467. item_ct1.get_group(0) < ne02) {
  468. int offset_src = nidx + item_ct1.get_group(1) * ne00 +
  469. item_ct1.get_group(0) * ne00 * ne01;
  470. dst[offset_dst] = x[offset_src];
  471. } else {
  472. dst[offset_dst] = 0.0f;
  473. }
  474. }
  475. static void group_norm_f32(const float * x, float * dst, const int group_size, const int ne_elements, const float eps,
  476. const sycl::nd_item<3> &item_ct1, float *s_sum, int block_size) {
  477. int start = item_ct1.get_group(2) * group_size;
  478. int end = start + group_size;
  479. start += item_ct1.get_local_id(2);
  480. if (end >= ne_elements) {
  481. end = ne_elements;
  482. }
  483. float tmp = 0.0f; // partial sum for thread in warp
  484. for (int j = start; j < end; j += block_size) {
  485. tmp += x[j];
  486. }
  487. tmp = warp_reduce_sum(tmp, item_ct1);
  488. if (block_size > WARP_SIZE) {
  489. int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
  490. int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
  491. if (lane_id == 0) {
  492. s_sum[warp_id] = tmp;
  493. }
  494. /*
  495. DPCT1118:1: SYCL group functions and algorithms must be encountered in
  496. converged control flow. You may need to adjust the code.
  497. */
  498. /*
  499. DPCT1065:54: Consider replacing sycl::nd_item::barrier() with
  500. sycl::nd_item::barrier(sycl::access::fence_space::local_space) for
  501. better performance if there is no access to global memory.
  502. */
  503. item_ct1.barrier();
  504. tmp = s_sum[lane_id];
  505. tmp = warp_reduce_sum(tmp, item_ct1);
  506. }
  507. float mean = tmp / group_size;
  508. tmp = 0.0f;
  509. for (int j = start; j < end; j += block_size) {
  510. float xi = x[j] - mean;
  511. dst[j] = xi;
  512. tmp += xi * xi;
  513. }
  514. tmp = warp_reduce_sum(tmp, item_ct1);
  515. if (block_size > WARP_SIZE) {
  516. int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
  517. int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
  518. if (lane_id == 0) {
  519. s_sum[warp_id] = tmp;
  520. }
  521. /*
  522. DPCT1118:2: SYCL group functions and algorithms must be encountered in
  523. converged control flow. You may need to adjust the code.
  524. */
  525. /*
  526. DPCT1065:55: Consider replacing sycl::nd_item::barrier() with
  527. sycl::nd_item::barrier(sycl::access::fence_space::local_space) for
  528. better performance if there is no access to global memory.
  529. */
  530. item_ct1.barrier();
  531. tmp = s_sum[lane_id];
  532. tmp = warp_reduce_sum(tmp, item_ct1);
  533. }
  534. float variance = tmp / group_size;
  535. float scale = sycl::rsqrt(variance + eps);
  536. for (int j = start; j < end; j += block_size) {
  537. dst[j] *= scale;
  538. }
  539. }
  540. static void rms_norm_f32(const float * x, float * dst, const int ncols, const float eps,
  541. const sycl::nd_item<3> &item_ct1, float *s_sum, int block_size) {
  542. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  543. item_ct1.get_local_id(1);
  544. const int tid = item_ct1.get_local_id(2);
  545. float tmp = 0.0f; // partial sum for thread in warp
  546. for (int col = tid; col < ncols; col += block_size) {
  547. const float xi = x[row*ncols + col];
  548. tmp += xi * xi;
  549. }
  550. // sum up partial sums
  551. tmp = warp_reduce_sum(tmp, item_ct1);
  552. if (block_size > WARP_SIZE) {
  553. int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
  554. int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
  555. if (lane_id == 0) {
  556. s_sum[warp_id] = tmp;
  557. }
  558. /*
  559. DPCT1118:3: SYCL group functions and algorithms must be encountered in
  560. converged control flow. You may need to adjust the code.
  561. */
  562. item_ct1.barrier(sycl::access::fence_space::local_space);
  563. tmp = s_sum[lane_id];
  564. tmp = warp_reduce_sum(tmp, item_ct1);
  565. }
  566. const float mean = tmp / ncols;
  567. const float scale = sycl::rsqrt(mean + eps);
  568. for (int col = tid; col < ncols; col += block_size) {
  569. dst[row*ncols + col] = scale * x[row*ncols + col];
  570. }
  571. }
  572. static __dpct_inline__ void dequantize_q4_0(const void *vx, const int ib,
  573. const int iqs, dfloat2 &v) {
  574. const block_q4_0 * x = (const block_q4_0 *) vx;
  575. const dfloat d = x[ib].d;
  576. const int vui = x[ib].qs[iqs];
  577. v.x() = vui & 0xF;
  578. v.y() = vui >> 4;
  579. #ifdef GGML_SYCL_F16
  580. // v = v - {8.0f, 8.0f};
  581. // v = v * {d, d};
  582. v.s0() = (v.s0() - 8.0f) * d;
  583. v.s1() = (v.s1() - 8.0f) * d;
  584. #else
  585. v.x() = (v.x() - 8.0f) * d;
  586. v.y() = (v.y() - 8.0f) * d;
  587. #endif // GGML_SYCL_F16
  588. }
  589. static __dpct_inline__ void dequantize_q4_1(const void *vx, const int ib,
  590. const int iqs, dfloat2 &v) {
  591. const block_q4_1 * x = (const block_q4_1 *) vx;
  592. const dfloat d = x[ib].dm[0];
  593. const dfloat m = x[ib].dm[1];
  594. const int vui = x[ib].qs[iqs];
  595. v.x() = vui & 0xF;
  596. v.y() = vui >> 4;
  597. #ifdef GGML_SYCL_F16
  598. // v = v * {d, d};
  599. // v = v + {m, m};
  600. v.s0() = (v.s0() * d) + m;
  601. v.s1() = (v.s1() * d) + m;
  602. #else
  603. v.x() = (v.x() * d) + m;
  604. v.y() = (v.y() * d) + m;
  605. #endif // GGML_SYCL_F16
  606. }
  607. static __dpct_inline__ void dequantize_q5_0(const void *vx, const int ib,
  608. const int iqs, dfloat2 &v) {
  609. const block_q5_0 * x = (const block_q5_0 *) vx;
  610. const dfloat d = x[ib].d;
  611. uint32_t qh;
  612. memcpy(&qh, x[ib].qh, sizeof(qh));
  613. const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
  614. const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
  615. v.x() = ((x[ib].qs[iqs] & 0xf) | xh_0);
  616. v.y() = ((x[ib].qs[iqs] >> 4) | xh_1);
  617. #ifdef GGML_SYCL_F16
  618. // v = v - {16.0f, 16.0f};
  619. // v = v * {d, d};
  620. v.s0() = (v.s0() - 16.0f) * d;
  621. v.s1() = (v.s1() - 16.0f) * d;
  622. #else
  623. v.x() = (v.x() - 16.0f) * d;
  624. v.y() = (v.y() - 16.0f) * d;
  625. #endif // GGML_SYCL_F16
  626. }
  627. static __dpct_inline__ void dequantize_q5_1(const void *vx, const int ib,
  628. const int iqs, dfloat2 &v) {
  629. const block_q5_1 * x = (const block_q5_1 *) vx;
  630. const dfloat d = x[ib].dm[0];
  631. const dfloat m = x[ib].dm[1];
  632. uint32_t qh;
  633. memcpy(&qh, x[ib].qh, sizeof(qh));
  634. const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
  635. const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
  636. v.x() = ((x[ib].qs[iqs] & 0xf) | xh_0);
  637. v.y() = ((x[ib].qs[iqs] >> 4) | xh_1);
  638. #ifdef GGML_SYCL_F16
  639. // v = v * {d, d};
  640. // v = v + {m, m};
  641. v.s0() = (v.s0() * d) + m;
  642. v.s1() = (v.s1() * d) + m;
  643. #else
  644. v.x() = (v.x() * d) + m;
  645. v.y() = (v.y() * d) + m;
  646. #endif // GGML_SYCL_F16
  647. }
  648. static __dpct_inline__ void dequantize_q8_0(const void *vx, const int ib,
  649. const int iqs, dfloat2 &v) {
  650. const block_q8_0 * x = (const block_q8_0 *) vx;
  651. const dfloat d = x[ib].d;
  652. v.x() = x[ib].qs[iqs + 0];
  653. v.y() = x[ib].qs[iqs + 1];
  654. #ifdef GGML_SYCL_F16
  655. // v = v * {d, d};
  656. v.s0() *= d;
  657. v.s1() *= d;
  658. #else
  659. v.x() *= d;
  660. v.y() *= d;
  661. #endif // GGML_SYCL_F16
  662. }
  663. template<typename dst_t>
  664. static void dequantize_block_q4_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32,
  665. const sycl::nd_item<3> &item_ct1) {
  666. const int i = item_ct1.get_group(2);
  667. // assume 32 threads
  668. const int tid = item_ct1.get_local_id(2);
  669. const int il = tid/8;
  670. const int ir = tid%8;
  671. const int ib = 8*i + ir;
  672. if (ib >= nb32) {
  673. return;
  674. }
  675. dst_t * y = yy + 256*i + 32*ir + 4*il;
  676. const block_q4_0 * x = (const block_q4_0 *)vx + ib;
  677. const float d = sycl::vec<sycl::half, 1>(x->d)
  678. .convert<float, sycl::rounding_mode::automatic>()[0];
  679. const float dm = -8*d;
  680. const uint8_t * q = x->qs + 4*il;
  681. for (int l = 0; l < 4; ++l) {
  682. y[l+ 0] = d * (q[l] & 0xF) + dm;
  683. y[l+16] = d * (q[l] >> 4) + dm;
  684. }
  685. }
  686. template<typename dst_t>
  687. static void dequantize_block_q4_1(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32,
  688. const sycl::nd_item<3> &item_ct1) {
  689. const int i = item_ct1.get_group(2);
  690. // assume 32 threads
  691. const int tid = item_ct1.get_local_id(2);
  692. const int il = tid/8;
  693. const int ir = tid%8;
  694. const int ib = 8*i + ir;
  695. if (ib >= nb32) {
  696. return;
  697. }
  698. dst_t * y = yy + 256*i + 32*ir + 4*il;
  699. const block_q4_1 * x = (const block_q4_1 *)vx + ib;
  700. const sycl::float2 d =
  701. x->dm.convert<float, sycl::rounding_mode::automatic>();
  702. const uint8_t * q = x->qs + 4*il;
  703. for (int l = 0; l < 4; ++l) {
  704. y[l + 0] = d.x() * (q[l] & 0xF) + d.y();
  705. y[l + 16] = d.x() * (q[l] >> 4) + d.y();
  706. }
  707. }
  708. //================================== k-quants
  709. template<typename dst_t>
  710. static void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
  711. const sycl::nd_item<3> &item_ct1) {
  712. const int i = item_ct1.get_group(2);
  713. const block_q2_K * x = (const block_q2_K *) vx;
  714. const int tid = item_ct1.get_local_id(2);
  715. const int n = tid/32;
  716. const int l = tid - 32*n;
  717. const int is = 8*n + l/16;
  718. const uint8_t q = x[i].qs[32*n + l];
  719. dst_t * y = yy + i*QK_K + 128*n;
  720. float dall = x[i].dm[0];
  721. float dmin = x[i].dm[1];
  722. y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4);
  723. y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4);
  724. y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4);
  725. y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4);
  726. }
  727. template<typename dst_t>
  728. static void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
  729. const sycl::nd_item<3> &item_ct1) {
  730. const int i = item_ct1.get_group(2);
  731. const block_q3_K * x = (const block_q3_K *) vx;
  732. const int r = item_ct1.get_local_id(2) / 4;
  733. const int tid = r/2;
  734. const int is0 = r%2;
  735. const int l0 = 16 * is0 + 4 * (item_ct1.get_local_id(2) % 4);
  736. const int n = tid / 4;
  737. const int j = tid - 4*n;
  738. uint8_t m = 1 << (4*n + j);
  739. int is = 8*n + 2*j + is0;
  740. int shift = 2*j;
  741. int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) :
  742. is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) :
  743. is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) :
  744. (x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4);
  745. float d_all = x[i].d;
  746. float dl = d_all * (us - 32);
  747. dst_t * y = yy + i*QK_K + 128*n + 32*j;
  748. const uint8_t * q = x[i].qs + 32*n;
  749. const uint8_t * hm = x[i].hmask;
  750. for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4));
  751. }
  752. static inline void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) {
  753. if (j < 4) {
  754. d = q[j] & 63; m = q[j + 4] & 63;
  755. } else {
  756. d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
  757. m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
  758. }
  759. }
  760. template<typename dst_t>
  761. static void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
  762. const sycl::nd_item<3> &item_ct1) {
  763. const block_q4_K * x = (const block_q4_K *) vx;
  764. const int i = item_ct1.get_group(2);
  765. // assume 32 threads
  766. const int tid = item_ct1.get_local_id(2);
  767. const int il = tid/8;
  768. const int ir = tid%8;
  769. const int is = 2*il;
  770. const int n = 4;
  771. dst_t * y = yy + i*QK_K + 64*il + n*ir;
  772. const float dall = x[i].dm[0];
  773. const float dmin = x[i].dm[1];
  774. const uint8_t * q = x[i].qs + 32*il + n*ir;
  775. uint8_t sc, m;
  776. get_scale_min_k4(is + 0, x[i].scales, sc, m);
  777. const float d1 = dall * sc; const float m1 = dmin * m;
  778. get_scale_min_k4(is + 1, x[i].scales, sc, m);
  779. const float d2 = dall * sc; const float m2 = dmin * m;
  780. for (int l = 0; l < n; ++l) {
  781. y[l + 0] = d1 * (q[l] & 0xF) - m1;
  782. y[l +32] = d2 * (q[l] >> 4) - m2;
  783. }
  784. }
  785. template<typename dst_t>
  786. static void dequantize_block_q5_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
  787. const sycl::nd_item<3> &item_ct1) {
  788. const block_q5_K * x = (const block_q5_K *) vx;
  789. const int i = item_ct1.get_group(2);
  790. // assume 64 threads - this is very slightly better than the one below
  791. const int tid = item_ct1.get_local_id(2);
  792. const int il = tid/16; // il is in 0...3
  793. const int ir = tid%16; // ir is in 0...15
  794. const int is = 2*il; // is is in 0...6
  795. dst_t * y = yy + i*QK_K + 64*il + 2*ir;
  796. const float dall = x[i].dm[0];
  797. const float dmin = x[i].dm[1];
  798. const uint8_t * ql = x[i].qs + 32*il + 2*ir;
  799. const uint8_t * qh = x[i].qh + 2*ir;
  800. uint8_t sc, m;
  801. get_scale_min_k4(is + 0, x[i].scales, sc, m);
  802. const float d1 = dall * sc; const float m1 = dmin * m;
  803. get_scale_min_k4(is + 1, x[i].scales, sc, m);
  804. const float d2 = dall * sc; const float m2 = dmin * m;
  805. uint8_t hm = 1 << (2*il);
  806. y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1;
  807. y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1;
  808. hm <<= 1;
  809. y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2;
  810. y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2;
  811. }
  812. template<typename dst_t>
  813. static void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
  814. const sycl::nd_item<3> &item_ct1) {
  815. const block_q6_K * x = (const block_q6_K *) vx;
  816. const int i = item_ct1.get_group(2);
  817. // assume 64 threads - this is very slightly better than the one below
  818. const int tid = item_ct1.get_local_id(2);
  819. const int ip = tid/32; // ip is 0 or 1
  820. const int il = tid - 32*ip; // 0...32
  821. const int is = 8*ip + il/16;
  822. dst_t * y = yy + i*QK_K + 128*ip + il;
  823. const float d = x[i].d;
  824. const uint8_t * ql = x[i].ql + 64*ip + il;
  825. const uint8_t qh = x[i].qh[32*ip + il];
  826. const int8_t * sc = x[i].scales + is;
  827. y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
  828. y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32);
  829. y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32);
  830. y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32);
  831. }
  832. template<typename dst_t>
  833. static void dequantize_block_iq2_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy,
  834. const sycl::nd_item<3> &item_ct1,
  835. const uint64_t *iq2xxs_grid_ptr,
  836. const uint8_t *ksigns_iq2xs_ptr,
  837. const uint8_t *kmask_iq2xs_ptr) {
  838. const int i = item_ct1.get_group(2);
  839. const block_iq2_xxs * x = (const block_iq2_xxs *) vx;
  840. const int tid = item_ct1.get_local_id(2);
  841. const int il = tid/8; // 0...3
  842. const int ib = tid%8; // 0...7
  843. dst_t * y = yy + i*QK_K + 32*ib + 8*il;
  844. const uint16_t * q2 = x[i].qs + 4*ib;
  845. const uint8_t * aux8 = (const uint8_t *)q2;
  846. const uint8_t * grid = (const uint8_t *)(iq2xxs_grid_ptr + aux8[il]);
  847. const uint32_t aux32 = q2[2] | (q2[3] << 16);
  848. const float d = (float)x[i].d * (0.5f + (aux32 >> 28)) * 0.25f;
  849. const uint8_t signs = ksigns_iq2xs_ptr[(aux32 >> 7*il) & 127];
  850. for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs_ptr[j] ? -1.f : 1.f);
  851. }
  852. template<typename dst_t>
  853. static void dequantize_block_iq2_xs(const void * __restrict__ vx, dst_t * __restrict__ yy,
  854. const sycl::nd_item<3> &item_ct1,
  855. const uint64_t *iq2xs_grid,
  856. const uint8_t *ksigns_iq2xs,
  857. const uint8_t *kmask_iq2xs) {
  858. const int i = item_ct1.get_group(2);
  859. const block_iq2_xs * x = (const block_iq2_xs *) vx;
  860. const int tid = item_ct1.get_local_id(2);
  861. const int il = tid/8; // 0...3
  862. const int ib = tid%8; // 0...7
  863. dst_t * y = yy + i*QK_K + 32*ib + 8*il;
  864. const uint16_t * q2 = x[i].qs + 4*ib;
  865. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[il] & 511));
  866. const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f;
  867. const uint8_t signs = ksigns_iq2xs[q2[il] >> 9];
  868. for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
  869. }
  870. template <typename dst_t>
  871. __dpct_inline__ static void
  872. dequantize_block_iq2_s(const void *__restrict__ vx, dst_t *__restrict__ yy,
  873. const sycl::nd_item<3> &item_ct1) {
  874. const int i = item_ct1.get_group(2);
  875. const block_iq2_s * x = (const block_iq2_s *) vx;
  876. const int tid = item_ct1.get_local_id(2);
  877. const int il = tid/8; // 0...3
  878. const int ib = tid%8; // 0...7
  879. dst_t * y = yy + i*QK_K + 32*ib + 8*il;
  880. const uint8_t * grid = (const uint8_t *)(iq2s_grid + (x[i].qs[4*ib+il] | ((x[i].qh[ib] << (8-2*il)) & 0x300)));
  881. const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f;
  882. const uint8_t signs = x[i].qs[QK_K/8+4*ib+il];
  883. #pragma unroll
  884. for (int j = 0; j < 8; ++j) {
  885. y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
  886. }
  887. }
  888. template<typename dst_t>
  889. static void dequantize_block_iq3_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy,
  890. const sycl::nd_item<3> &item_ct1,
  891. const uint32_t *iq3xxs_grid,
  892. const uint8_t *ksigns_iq2xs,
  893. const uint8_t *kmask_iq2xs) {
  894. const int i = item_ct1.get_group(2);
  895. const block_iq3_xxs * x = (const block_iq3_xxs *) vx;
  896. const int tid = item_ct1.get_local_id(2);
  897. const int il = tid/8; // 0...3
  898. const int ib = tid%8; // 0...7
  899. dst_t * y = yy + i*QK_K + 32*ib + 8*il;
  900. const uint8_t * q3 = x[i].qs + 8*ib;
  901. const uint16_t * gas = (const uint16_t *)(x[i].qs + QK_K/4) + 2*ib;
  902. const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*il+0]);
  903. const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*il+1]);
  904. const uint32_t aux32 = gas[0] | (gas[1] << 16);
  905. const float d = (float)x[i].d * (0.5f + (aux32 >> 28)) * 0.5f;
  906. const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*il) & 127];
  907. for (int j = 0; j < 4; ++j) {
  908. y[j+0] = d * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f);
  909. y[j+4] = d * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f);
  910. }
  911. }
  912. template <typename dst_t>
  913. __dpct_inline__ static void
  914. dequantize_block_iq3_s(const void *__restrict__ vx, dst_t *__restrict__ yy,
  915. const sycl::nd_item<3> &item_ct1,
  916. const uint8_t *kmask_iq2xs, const uint32_t *iq3s_grid) {
  917. const int i = item_ct1.get_group(2);
  918. const block_iq3_s * x = (const block_iq3_s *) vx;
  919. const int tid = item_ct1.get_local_id(2);
  920. const int il = tid/8; // 0...3
  921. const int ib = tid%8; // 0...7
  922. dst_t * y = yy + i*QK_K + 32*ib + 8*il;
  923. const uint8_t * qs = x[i].qs + 8*ib;
  924. const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*il+0] | ((x[i].qh[ib] << (8-2*il)) & 256)));
  925. const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*il+1] | ((x[i].qh[ib] << (7-2*il)) & 256)));
  926. const float d = (float)x[i].d * (1 + 2*((x[i].scales[ib/2] >> 4*(ib%2)) & 0xf));
  927. const uint8_t signs = x[i].signs[4*ib + il];
  928. #pragma unroll
  929. for (int j = 0; j < 4; ++j) {
  930. y[j+0] = d * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f);
  931. y[j+4] = d * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f);
  932. }
  933. }
  934. template <typename dst_t>
  935. __dpct_inline__ static void
  936. dequantize_block_iq1_s(const void *__restrict__ vx, dst_t *__restrict__ yy,
  937. const sycl::nd_item<3> &item_ct1,
  938. const uint32_t *iq1s_grid_gpu) {
  939. const int i = item_ct1.get_group(2);
  940. const block_iq1_s * x = (const block_iq1_s *) vx;
  941. const int tid = item_ct1.get_local_id(2);
  942. const int il = tid/8; // 0...3
  943. const int ib = tid%8; // 0...7
  944. dst_t * y = yy + i*QK_K + 32*ib + 8*il;
  945. const float delta = x[i].qh[ib] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA;
  946. const float d = (float)x[i].d * (2*((x[i].qh[ib] >> 12) & 7) + 1);
  947. uint32_t grid32[2]; const int8_t * q = (const int8_t *)grid32;
  948. grid32[0] = iq1s_grid_gpu[x[i].qs[4*ib+il] | (((x[i].qh[ib] >> 3*il) & 7) << 8)];
  949. grid32[1] = (grid32[0] >> 4) & 0x0f0f0f0f;
  950. grid32[0] &= 0x0f0f0f0f;
  951. #pragma unroll
  952. for (int j = 0; j < 8; ++j) {
  953. y[j] = d * (q[j] + delta);
  954. }
  955. }
  956. template <typename dst_t>
  957. __dpct_inline__ static void
  958. dequantize_block_iq1_m(const void *__restrict__ vx, dst_t *__restrict__ yy,
  959. const sycl::nd_item<3> &item_ct1,
  960. const uint32_t *iq1s_grid_gpu) {
  961. const int i = item_ct1.get_group(2);
  962. const block_iq1_m * x = (const block_iq1_m *) vx;
  963. const int tid = item_ct1.get_local_id(2);
  964. const int il = tid/8; // 0...3
  965. const int ib = tid%8; // 0...7
  966. dst_t * y = yy + i*QK_K + 32*ib + 8*il;
  967. const uint16_t * sc = (const uint16_t *)x[i].scales;
  968. iq1m_scale_t scale;
  969. scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
  970. const int ib16 = 2*ib + il/2; // sc[ib16/4] >> 3*(ib16%4) -> sc[ib/2] >> 3*((2*ib+il/2)%4);
  971. const float d = (float)scale.f16 * (2*((sc[ib16/4] >> 3*(ib16%4)) & 0x7) + 1);
  972. const float delta = x[i].qh[2*ib+il/2] & (0x08 << 4*(il%2)) ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA;
  973. uint32_t grid32[2]; const int8_t * q = (const int8_t *)grid32;
  974. grid32[0] = iq1s_grid_gpu[x[i].qs[4*ib+il] | (((x[i].qh[2*ib+il/2] >> 4*(il%2)) & 7) << 8)];
  975. grid32[1] = (grid32[0] >> 4) & 0x0f0f0f0f;
  976. grid32[0] &= 0x0f0f0f0f;
  977. #pragma unroll
  978. for (int j = 0; j < 8; ++j) {
  979. y[j] = d * (q[j] + delta);
  980. }
  981. }
  982. template <typename dst_t>
  983. __dpct_inline__ static void
  984. dequantize_block_iq4_nl(const void *__restrict__ vx, dst_t *__restrict__ yy,
  985. const sycl::nd_item<3> &item_ct1) {
  986. const int i = item_ct1.get_group(2);
  987. const block_iq4_nl * x = (const block_iq4_nl *) vx + i*(QK_K/QK4_NL);
  988. const int tid = item_ct1.get_local_id(2);
  989. const int il = tid/8; // 0...3
  990. const int ib = tid%8; // 0...7
  991. dst_t * y = yy + i*QK_K + 32*ib + 4*il;
  992. const uint8_t * q4 = x[ib].qs + 4*il;
  993. const float d = (float)x[ib].d;
  994. #pragma unroll
  995. for (int j = 0; j < 4; ++j) {
  996. y[j+ 0] = d * kvalues_iq4nl[q4[j] & 0xf];
  997. y[j+16] = d * kvalues_iq4nl[q4[j] >> 4];
  998. }
  999. }
  1000. template <typename dst_t>
  1001. __dpct_inline__ static void
  1002. dequantize_block_iq4_xs(const void *__restrict__ vx, dst_t *__restrict__ yy,
  1003. const sycl::nd_item<3> &item_ct1) {
  1004. const int i = item_ct1.get_group(2);
  1005. const block_iq4_xs * x = (const block_iq4_xs *)vx;
  1006. const int tid = item_ct1.get_local_id(2);
  1007. const int il = tid/8; // 0...3
  1008. const int ib = tid%8; // 0...7
  1009. dst_t * y = yy + i*QK_K + 32*ib + 4*il;
  1010. const uint8_t * q4 = x[i].qs + 16*ib + 4*il;
  1011. const float d = (float)x[i].d * ((((x[i].scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((x[i].scales_h >> 2*ib) & 3) << 4)) - 32);
  1012. #pragma unroll
  1013. for (int j = 0; j < 4; ++j) {
  1014. y[j+ 0] = d * kvalues_iq4nl[q4[j] & 0xf];
  1015. y[j+16] = d * kvalues_iq4nl[q4[j] >> 4];
  1016. }
  1017. }
  1018. /*
  1019. DPCT1110:4: The total declared local variable size in device function
  1020. dequantize_mul_mat_vec_q2_k exceeds 128 bytes and may cause high register
  1021. pressure. Consult with your hardware vendor to find the total register size
  1022. available and adjust the code, or use smaller sub-group size to avoid high
  1023. register pressure.
  1024. */
  1025. static void dequantize_mul_mat_vec_q2_k(const void *__restrict__ vx,
  1026. const float *__restrict__ yy,
  1027. float *__restrict__ dst,
  1028. const int ncols, int nrows,
  1029. const sycl::nd_item<3> &item_ct1) {
  1030. static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION");
  1031. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  1032. item_ct1.get_local_id(1);
  1033. if (row > nrows) return;
  1034. const int num_blocks_per_row = ncols / QK_K;
  1035. const int ib0 = row*num_blocks_per_row;
  1036. const block_q2_K * x = (const block_q2_K *)vx + ib0;
  1037. float tmp = 0; // partial sum for thread in warp
  1038. const int tid =
  1039. item_ct1.get_local_id(2) / K_QUANTS_PER_ITERATION; // 0...31 or 0...15
  1040. const int ix =
  1041. item_ct1.get_local_id(2) % K_QUANTS_PER_ITERATION; // 0 or 0,1
  1042. const int step = 16/K_QUANTS_PER_ITERATION;
  1043. const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
  1044. const int in = tid - step*im; // 0...15 or 0...7
  1045. const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2
  1046. const int q_offset = 32*im + l0;
  1047. const int s_offset = 8*im;
  1048. const int y_offset = 128*im + l0;
  1049. uint32_t aux[4];
  1050. const uint8_t * d = (const uint8_t *)aux;
  1051. const uint8_t * m = (const uint8_t *)(aux + 2);
  1052. for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
  1053. const float * y = yy + i * QK_K + y_offset;
  1054. const uint8_t * q = x[i].qs + q_offset;
  1055. const float dall = x[i].dm[0];
  1056. const float dmin = x[i].dm[1];
  1057. const uint32_t * a = (const uint32_t *)(x[i].scales + s_offset);
  1058. aux[0] = a[0] & 0x0f0f0f0f;
  1059. aux[1] = a[1] & 0x0f0f0f0f;
  1060. aux[2] = (a[0] >> 4) & 0x0f0f0f0f;
  1061. aux[3] = (a[1] >> 4) & 0x0f0f0f0f;
  1062. float sum1 = 0, sum2 = 0;
  1063. for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
  1064. sum1 += y[l+ 0] * d[0] * ((q[l+ 0] >> 0) & 3)
  1065. + y[l+32] * d[2] * ((q[l+ 0] >> 2) & 3)
  1066. + y[l+64] * d[4] * ((q[l+ 0] >> 4) & 3)
  1067. + y[l+96] * d[6] * ((q[l+ 0] >> 6) & 3)
  1068. + y[l+16] * d[1] * ((q[l+16] >> 0) & 3)
  1069. + y[l+48] * d[3] * ((q[l+16] >> 2) & 3)
  1070. + y[l+80] * d[5] * ((q[l+16] >> 4) & 3)
  1071. +y[l+112] * d[7] * ((q[l+16] >> 6) & 3);
  1072. sum2 += y[l+ 0] * m[0] + y[l+32] * m[2] + y[l+64] * m[4] + y[ l+96] * m[6]
  1073. + y[l+16] * m[1] + y[l+48] * m[3] + y[l+80] * m[5] + y[l+112] * m[7];
  1074. }
  1075. tmp += dall * sum1 - dmin * sum2;
  1076. }
  1077. // sum up partial sums and write back result
  1078. #pragma unroll
  1079. for (int mask = 16; mask > 0; mask >>= 1) {
  1080. tmp +=
  1081. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  1082. }
  1083. if (item_ct1.get_local_id(2) == 0) {
  1084. dst[row] = tmp;
  1085. }
  1086. }
  1087. /*
  1088. DPCT1110:5: The total declared local variable size in device function
  1089. dequantize_mul_mat_vec_q3_k exceeds 128 bytes and may cause high register
  1090. pressure. Consult with your hardware vendor to find the total register size
  1091. available and adjust the code, or use smaller sub-group size to avoid high
  1092. register pressure.
  1093. */
  1094. static void dequantize_mul_mat_vec_q3_k(const void *__restrict__ vx,
  1095. const float *__restrict__ yy,
  1096. float *__restrict__ dst,
  1097. const int ncols, int nrows,
  1098. const sycl::nd_item<3> &item_ct1) {
  1099. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  1100. item_ct1.get_local_id(1);
  1101. if (row > nrows) return;
  1102. const int num_blocks_per_row = ncols / QK_K;
  1103. const int ib0 = row*num_blocks_per_row;
  1104. const block_q3_K * x = (const block_q3_K *)vx + ib0;
  1105. float tmp = 0; // partial sum for thread in warp
  1106. const uint16_t kmask1 = 0x0303;
  1107. const uint16_t kmask2 = 0x0f0f;
  1108. const int tid =
  1109. item_ct1.get_local_id(2) / K_QUANTS_PER_ITERATION; // 0...31 or 0...16
  1110. const int ix =
  1111. item_ct1.get_local_id(2) % K_QUANTS_PER_ITERATION; // 0 or 0,1
  1112. const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop
  1113. const int step = 16/K_QUANTS_PER_ITERATION;
  1114. const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
  1115. const int in = tid - step*im; // 0....15 or 0...7
  1116. const uint8_t m = 1 << (4*im);
  1117. const int l0 = n*in; // 0...15 or 0...14 in steps of 2
  1118. const int q_offset = 32*im + l0;
  1119. const int y_offset = 128*im + l0;
  1120. uint16_t utmp[4];
  1121. const int8_t * s = (const int8_t *)utmp;
  1122. const uint16_t s_shift = 4*im;
  1123. for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
  1124. const float * y = yy + i * QK_K + y_offset;
  1125. const uint8_t * q = x[i].qs + q_offset;
  1126. const uint8_t * h = x[i].hmask + l0;
  1127. const uint16_t * a = (const uint16_t *)x[i].scales;
  1128. utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4);
  1129. utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4);
  1130. utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4);
  1131. utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4);
  1132. const float d = x[i].d;
  1133. float sum = 0;
  1134. for (int l = 0; l < n; ++l) {
  1135. sum += y[l+ 0] * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4))
  1136. + y[l+32] * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4))
  1137. + y[l+64] * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4))
  1138. + y[l+96] * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4));
  1139. sum += y[l+16] * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4))
  1140. + y[l+48] * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4))
  1141. + y[l+80] * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4))
  1142. + y[l+112] * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4));
  1143. }
  1144. tmp += d * sum;
  1145. }
  1146. // sum up partial sums and write back result
  1147. #pragma unroll
  1148. for (int mask = 16; mask > 0; mask >>= 1) {
  1149. tmp +=
  1150. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  1151. }
  1152. if (item_ct1.get_local_id(2) == 0) {
  1153. dst[row] = tmp;
  1154. }
  1155. }
  1156. /*
  1157. DPCT1110:6: The total declared local variable size in device function
  1158. dequantize_mul_mat_vec_q4_k exceeds 128 bytes and may cause high register
  1159. pressure. Consult with your hardware vendor to find the total register size
  1160. available and adjust the code, or use smaller sub-group size to avoid high
  1161. register pressure.
  1162. */
  1163. static void dequantize_mul_mat_vec_q4_k(const void *__restrict__ vx,
  1164. const float *__restrict__ yy,
  1165. float *__restrict__ dst,
  1166. const int ncols, int nrows,
  1167. const sycl::nd_item<3> &item_ct1) {
  1168. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  1169. item_ct1.get_local_id(1);
  1170. if (row > nrows) return;
  1171. const int num_blocks_per_row = ncols / QK_K;
  1172. const int ib0 = row*num_blocks_per_row;
  1173. const block_q4_K * x = (const block_q4_K *)vx + ib0;
  1174. const uint16_t kmask1 = 0x3f3f;
  1175. const uint16_t kmask2 = 0x0f0f;
  1176. const uint16_t kmask3 = 0xc0c0;
  1177. const int tid =
  1178. item_ct1.get_local_id(2) / K_QUANTS_PER_ITERATION; // 0...31 or 0...16
  1179. const int ix =
  1180. item_ct1.get_local_id(2) % K_QUANTS_PER_ITERATION; // 0 or 0,1
  1181. const int step = 8/K_QUANTS_PER_ITERATION; // 8 or 4
  1182. const int il = tid/step; // 0...3
  1183. const int ir = tid - step*il; // 0...7 or 0...3
  1184. const int n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4
  1185. const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
  1186. const int in = il%2;
  1187. const int l0 = n*(2*ir + in);
  1188. const int q_offset = 32*im + l0;
  1189. const int y_offset = 64*im + l0;
  1190. uint16_t aux[4];
  1191. const uint8_t * sc = (const uint8_t *)aux;
  1192. #if K_QUANTS_PER_ITERATION == 2
  1193. uint32_t q32[4];
  1194. const uint8_t * q4 = (const uint8_t *)q32;
  1195. #else
  1196. uint16_t q16[4];
  1197. const uint8_t * q4 = (const uint8_t *)q16;
  1198. #endif
  1199. float tmp = 0; // partial sum for thread in warp
  1200. for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
  1201. const float * y1 = yy + i*QK_K + y_offset;
  1202. const float * y2 = y1 + 128;
  1203. const float dall = x[i].dm[0];
  1204. const float dmin = x[i].dm[1];
  1205. const uint16_t * a = (const uint16_t *)x[i].scales;
  1206. aux[0] = a[im+0] & kmask1;
  1207. aux[1] = a[im+2] & kmask1;
  1208. aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
  1209. aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
  1210. #if K_QUANTS_PER_ITERATION == 2
  1211. const uint32_t * q1 = (const uint32_t *)(x[i].qs + q_offset);
  1212. const uint32_t * q2 = q1 + 16;
  1213. q32[0] = q1[0] & 0x0f0f0f0f;
  1214. q32[1] = q1[0] & 0xf0f0f0f0;
  1215. q32[2] = q2[0] & 0x0f0f0f0f;
  1216. q32[3] = q2[0] & 0xf0f0f0f0;
  1217. sycl::float4 s = {0.f, 0.f, 0.f, 0.f};
  1218. float smin = 0;
  1219. for (int l = 0; l < 4; ++l) {
  1220. s.x() += y1[l] * q4[l + 0]; s.y() += y1[l + 32] * q4[l + 4];
  1221. s.z() += y2[l] * q4[l + 8]; s.w() += y2[l + 32] * q4[l + 12];
  1222. smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7];
  1223. }
  1224. tmp += dall * (s.x() * sc[0] + s.y() * sc[1] * 1.f / 16.f +
  1225. s.z() * sc[4] + s.w() * sc[5] * 1.f / 16.f) -
  1226. dmin * smin;
  1227. #else
  1228. const uint16_t * q1 = (const uint16_t *)(x[i].qs + q_offset);
  1229. const uint16_t * q2 = q1 + 32;
  1230. q16[0] = q1[0] & 0x0f0f;
  1231. q16[1] = q1[0] & 0xf0f0;
  1232. q16[2] = q2[0] & 0x0f0f;
  1233. q16[3] = q2[0] & 0xf0f0;
  1234. float4 s = {0.f, 0.f, 0.f, 0.f};
  1235. float smin = 0;
  1236. for (int l = 0; l < 2; ++l) {
  1237. s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+2];
  1238. s.z += y2[l] * q4[l+4]; s.w += y2[l+32] * q4[l+6];
  1239. smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7];
  1240. }
  1241. tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin;
  1242. #endif
  1243. }
  1244. // sum up partial sums and write back result
  1245. #pragma unroll
  1246. for (int mask = 16; mask > 0; mask >>= 1) {
  1247. tmp +=
  1248. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  1249. }
  1250. if (tid == 0) {
  1251. dst[row] = tmp;
  1252. }
  1253. }
  1254. /*
  1255. DPCT1110:7: The total declared local variable size in device function
  1256. dequantize_mul_mat_vec_q5_k exceeds 128 bytes and may cause high register
  1257. pressure. Consult with your hardware vendor to find the total register size
  1258. available and adjust the code, or use smaller sub-group size to avoid high
  1259. register pressure.
  1260. */
  1261. static void dequantize_mul_mat_vec_q5_k(const void *__restrict__ vx,
  1262. const float *__restrict__ yy,
  1263. float *__restrict__ dst,
  1264. const int ncols,
  1265. const sycl::nd_item<3> &item_ct1) {
  1266. const int row = item_ct1.get_group(2);
  1267. const int num_blocks_per_row = ncols / QK_K;
  1268. const int ib0 = row*num_blocks_per_row;
  1269. const block_q5_K * x = (const block_q5_K *)vx + ib0;
  1270. float tmp = 0; // partial sum for thread in warp
  1271. const uint16_t kmask1 = 0x3f3f;
  1272. const uint16_t kmask2 = 0x0f0f;
  1273. const uint16_t kmask3 = 0xc0c0;
  1274. const int tid = item_ct1.get_local_id(2) / 2; // 0...15
  1275. const int ix = item_ct1.get_local_id(2) % 2;
  1276. const int il = tid/4; // 0...3
  1277. const int ir = tid - 4*il;// 0...3
  1278. const int n = 2;
  1279. const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
  1280. const int in = il%2;
  1281. const int l0 = n*(2*ir + in);
  1282. const int q_offset = 32*im + l0;
  1283. const int y_offset = 64*im + l0;
  1284. const uint8_t hm1 = 1 << (2*im);
  1285. const uint8_t hm2 = hm1 << 4;
  1286. uint16_t aux[4];
  1287. const uint8_t * sc = (const uint8_t *)aux;
  1288. uint16_t q16[8];
  1289. const uint8_t * q4 = (const uint8_t *)q16;
  1290. for (int i = ix; i < num_blocks_per_row; i += 2) {
  1291. const uint8_t * ql1 = x[i].qs + q_offset;
  1292. const uint8_t * qh = x[i].qh + l0;
  1293. const float * y1 = yy + i*QK_K + y_offset;
  1294. const float * y2 = y1 + 128;
  1295. const float dall = x[i].dm[0];
  1296. const float dmin = x[i].dm[1];
  1297. const uint16_t * a = (const uint16_t *)x[i].scales;
  1298. aux[0] = a[im+0] & kmask1;
  1299. aux[1] = a[im+2] & kmask1;
  1300. aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
  1301. aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
  1302. sycl::float4 sum = {0.f, 0.f, 0.f, 0.f};
  1303. float smin = 0;
  1304. const uint16_t * q1 = (const uint16_t *)ql1;
  1305. const uint16_t * q2 = q1 + 32;
  1306. q16[0] = q1[0] & 0x0f0f;
  1307. q16[1] = q1[8] & 0x0f0f;
  1308. q16[2] = (q1[0] >> 4) & 0x0f0f;
  1309. q16[3] = (q1[8] >> 4) & 0x0f0f;
  1310. q16[4] = q2[0] & 0x0f0f;
  1311. q16[5] = q2[8] & 0x0f0f;
  1312. q16[6] = (q2[0] >> 4) & 0x0f0f;
  1313. q16[7] = (q2[8] >> 4) & 0x0f0f;
  1314. for (int l = 0; l < n; ++l) {
  1315. sum.x() +=
  1316. y1[l + 0] * (q4[l + 0] + (qh[l + 0] & (hm1 << 0) ? 16 : 0)) +
  1317. y1[l + 16] * (q4[l + 2] + (qh[l + 16] & (hm1 << 0) ? 16 : 0));
  1318. sum.y() +=
  1319. y1[l + 32] * (q4[l + 4] + (qh[l + 0] & (hm1 << 1) ? 16 : 0)) +
  1320. y1[l + 48] * (q4[l + 6] + (qh[l + 16] & (hm1 << 1) ? 16 : 0));
  1321. sum.z() +=
  1322. y2[l + 0] * (q4[l + 8] + (qh[l + 0] & (hm2 << 0) ? 16 : 0)) +
  1323. y2[l + 16] * (q4[l + 10] + (qh[l + 16] & (hm2 << 0) ? 16 : 0));
  1324. sum.w() +=
  1325. y2[l + 32] * (q4[l + 12] + (qh[l + 0] & (hm2 << 1) ? 16 : 0)) +
  1326. y2[l + 48] * (q4[l + 14] + (qh[l + 16] & (hm2 << 1) ? 16 : 0));
  1327. smin += (y1[l] + y1[l+16]) * sc[2] + (y1[l+32] + y1[l+48]) * sc[3]
  1328. + (y2[l] + y2[l+16]) * sc[6] + (y2[l+32] + y2[l+48]) * sc[7];
  1329. }
  1330. tmp += dall * (sum.x() * sc[0] + sum.y() * sc[1] + sum.z() * sc[4] +
  1331. sum.w() * sc[5]) -
  1332. dmin * smin;
  1333. }
  1334. // sum up partial sums and write back result
  1335. #pragma unroll
  1336. for (int mask = 16; mask > 0; mask >>= 1) {
  1337. tmp +=
  1338. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  1339. }
  1340. if (item_ct1.get_local_id(2) == 0) {
  1341. dst[row] = tmp;
  1342. }
  1343. }
  1344. static void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows,
  1345. const sycl::nd_item<3> &item_ct1) {
  1346. static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION");
  1347. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  1348. item_ct1.get_local_id(1);
  1349. if (row > nrows) return;
  1350. const int num_blocks_per_row = ncols / QK_K;
  1351. const int ib0 = row*num_blocks_per_row;
  1352. const block_q6_K * x = (const block_q6_K *)vx + ib0;
  1353. const int tid =
  1354. item_ct1.get_local_id(2) / K_QUANTS_PER_ITERATION; // 0...31 or 0...16
  1355. const int ix =
  1356. item_ct1.get_local_id(2) % K_QUANTS_PER_ITERATION; // 0 or 0, 1
  1357. const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8
  1358. const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
  1359. const int in = tid - step*im; // 0...15 or 0...7
  1360. #if K_QUANTS_PER_ITERATION == 1
  1361. const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15
  1362. const int is = 0;
  1363. #else
  1364. const int l0 = 4 * in; // 0, 4, 8, ..., 28
  1365. const int is = in / 4;
  1366. #endif
  1367. const int ql_offset = 64*im + l0;
  1368. const int qh_offset = 32*im + l0;
  1369. const int s_offset = 8*im + is;
  1370. const int y_offset = 128*im + l0;
  1371. float tmp = 0; // partial sum for thread in warp
  1372. for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
  1373. const float * y = yy + i * QK_K + y_offset;
  1374. const uint8_t * ql = x[i].ql + ql_offset;
  1375. const uint8_t * qh = x[i].qh + qh_offset;
  1376. const int8_t * s = x[i].scales + s_offset;
  1377. const float d = x[i].d;
  1378. #if K_QUANTS_PER_ITERATION == 1
  1379. float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32)
  1380. + y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32)
  1381. + y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32)
  1382. + y[48] * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32)
  1383. + y[64] * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32)
  1384. + y[80] * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32)
  1385. + y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32)
  1386. +y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32);
  1387. tmp += sum;
  1388. #else
  1389. float sum = 0;
  1390. for (int l = 0; l < 4; ++l) {
  1391. sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32)
  1392. + y[l+32] * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32)
  1393. + y[l+64] * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32)
  1394. + y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32);
  1395. }
  1396. tmp += sum;
  1397. #endif
  1398. }
  1399. // sum up partial sums and write back result
  1400. #pragma unroll
  1401. for (int mask = 16; mask > 0; mask >>= 1) {
  1402. tmp +=
  1403. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  1404. }
  1405. if (tid == 0) {
  1406. dst[row] = tmp;
  1407. }
  1408. }
  1409. static void convert_f16(const void * vx, const int ib, const int iqs, dfloat2 & v){
  1410. const sycl::half *x = (const sycl::half *)vx;
  1411. // automatic half -> float type cast if dfloat == float
  1412. v.x() = x[ib + iqs + 0];
  1413. v.y() = x[ib + iqs + 1];
  1414. }
  1415. static void convert_f32(const void * vx, const int ib, const int iqs, dfloat2 & v){
  1416. const float * x = (const float *) vx;
  1417. // automatic half -> float type cast if dfloat == float
  1418. v.x() = x[ib + iqs + 0];
  1419. v.y() = x[ib + iqs + 1];
  1420. }
  1421. static void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, const int kx, const int kx_padded,
  1422. const sycl::nd_item<3> &item_ct1) {
  1423. const int ix = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  1424. item_ct1.get_local_id(2);
  1425. if (ix >= kx_padded) {
  1426. return;
  1427. }
  1428. const int iy = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
  1429. item_ct1.get_local_id(1);
  1430. const int i_padded = iy*kx_padded + ix;
  1431. block_q8_1 * y = (block_q8_1 *) vy;
  1432. const int ib = i_padded / QK8_1; // block index
  1433. const int iqs = i_padded % QK8_1; // quant index
  1434. const float xi = ix < kx ? x[iy*kx + ix] : 0.0f;
  1435. float amax = sycl::fabs((float)xi);
  1436. float sum = xi;
  1437. #pragma unroll
  1438. for (int mask = 16; mask > 0; mask >>= 1) {
  1439. amax = sycl::fmax(amax, dpct::permute_sub_group_by_xor(
  1440. item_ct1.get_sub_group(), amax, mask));
  1441. sum +=
  1442. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), sum, mask);
  1443. }
  1444. const float d = amax / 127;
  1445. const int8_t q = amax == 0.0f ? 0 : sycl::round(xi / d);
  1446. y[ib].qs[iqs] = q;
  1447. if (iqs > 0) {
  1448. return;
  1449. }
  1450. reinterpret_cast<sycl::half &>(y[ib].ds.x()) = d;
  1451. reinterpret_cast<sycl::half &>(y[ib].ds.y()) = sum;
  1452. }
  1453. template<int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
  1454. static void k_get_rows(
  1455. const void * src0, const int32_t * src1, dst_t * dst,
  1456. int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/
  1457. /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/
  1458. /*size_t s0,*/ size_t s1, size_t s2, size_t s3,
  1459. /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03,
  1460. size_t s10, size_t s11, size_t s12,
  1461. const sycl::nd_item<3> &item_ct1/*, size_t s13*/) {
  1462. const int i00 = (item_ct1.get_group(2) * item_ct1.get_local_range(2) +
  1463. item_ct1.get_local_id(2)) *
  1464. 2;
  1465. const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
  1466. item_ct1.get_local_id(1);
  1467. const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
  1468. item_ct1.get_local_id(0)) /
  1469. ne12;
  1470. const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
  1471. item_ct1.get_local_id(0)) %
  1472. ne12;
  1473. if (i00 >= ne00) {
  1474. return;
  1475. }
  1476. const int i01 = src1[i10*s10 + i11*s11 + i12*s12];
  1477. dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3;
  1478. const void * src0_row = (const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03;
  1479. const int ib = i00/qk; // block index
  1480. const int iqs = (i00%qk)/qr; // quant index
  1481. const int iybs = i00 - i00%qk; // dst block start index
  1482. const int y_offset = qr == 1 ? 1 : qk/2;
  1483. // dequantize
  1484. dfloat2 v;
  1485. dequantize_kernel(src0_row, ib, iqs, v);
  1486. dst_row[iybs + iqs + 0] = v.x();
  1487. dst_row[iybs + iqs + y_offset] = v.y();
  1488. }
  1489. template<typename src0_t, typename dst_t>
  1490. static void k_get_rows_float(
  1491. const src0_t * src0, const int32_t * src1, dst_t * dst,
  1492. int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/
  1493. /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/
  1494. /*size_t s0,*/ size_t s1, size_t s2, size_t s3,
  1495. /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03,
  1496. size_t s10, size_t s11, size_t s12,
  1497. const sycl::nd_item<3> &item_ct1/*, size_t s13*/) {
  1498. const int i00 = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
  1499. item_ct1.get_local_id(2);
  1500. const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
  1501. item_ct1.get_local_id(1);
  1502. const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
  1503. item_ct1.get_local_id(0)) /
  1504. ne12;
  1505. const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
  1506. item_ct1.get_local_id(0)) %
  1507. ne12;
  1508. if (i00 >= ne00) {
  1509. return;
  1510. }
  1511. const int i01 = src1[i10*s10 + i11*s11 + i12*s12];
  1512. dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3;
  1513. const src0_t * src0_row = (const src0_t *)((const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03);
  1514. dst_row[i00] = src0_row[i00];
  1515. }
  1516. template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
  1517. static void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int k,
  1518. const sycl::nd_item<3> &item_ct1) {
  1519. const int i = 2 * (item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  1520. item_ct1.get_local_id(2));
  1521. if (i >= k) {
  1522. return;
  1523. }
  1524. const int ib = i/qk; // block index
  1525. const int iqs = (i%qk)/qr; // quant index
  1526. const int iybs = i - i%qk; // y block start index
  1527. const int y_offset = qr == 1 ? 1 : qk/2;
  1528. // dequantize
  1529. dfloat2 v;
  1530. dequantize_kernel(vx, ib, iqs, v);
  1531. y[iybs + iqs + 0] = v.x();
  1532. y[iybs + iqs + y_offset] = v.y();
  1533. }
  1534. template <typename src_t, typename dst_t>
  1535. static void convert_unary(const void * __restrict__ vx, dst_t * __restrict__ y, const int k,
  1536. const sycl::nd_item<3> &item_ct1) {
  1537. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  1538. item_ct1.get_local_id(2);
  1539. if (i >= k) {
  1540. return;
  1541. }
  1542. const src_t * x = (src_t *) vx;
  1543. y[i] = x[i];
  1544. }
  1545. // VDR = vec dot ratio, how many contiguous integers each thread processes when the vec dot kernel is called
  1546. // MMVQ = mul_mat_vec_q, MMQ = mul_mat_q
  1547. #define VDR_Q4_0_Q8_1_MMVQ 2
  1548. #define VDR_Q4_0_Q8_1_MMQ 4
  1549. template <int vdr>
  1550. static __dpct_inline__ float vec_dot_q4_0_q8_1_impl(const int *v, const int *u,
  1551. const float &d4,
  1552. const sycl::half2 &ds8) {
  1553. int sumi = 0;
  1554. #pragma unroll
  1555. for (int i = 0; i < vdr; ++i) {
  1556. const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
  1557. const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
  1558. // SIMD dot product of quantized values
  1559. sumi = dpct::dp4a(vi0, u[2 * i + 0], sumi);
  1560. sumi = dpct::dp4a(vi1, u[2 * i + 1], sumi);
  1561. }
  1562. const sycl::float2 ds8f =
  1563. ds8.convert<float, sycl::rounding_mode::automatic>();
  1564. // second part effectively subtracts 8 from each quant value
  1565. return d4 * (sumi * ds8f.x() - (8 * vdr / QI4_0) * ds8f.y());
  1566. }
  1567. #define VDR_Q4_1_Q8_1_MMVQ 2
  1568. #define VDR_Q4_1_Q8_1_MMQ 4
  1569. template <int vdr>
  1570. static __dpct_inline__ float vec_dot_q4_1_q8_1_impl(const int *v, const int *u,
  1571. const sycl::half2 &dm4,
  1572. const sycl::half2 &ds8) {
  1573. int sumi = 0;
  1574. #pragma unroll
  1575. for (int i = 0; i < vdr; ++i) {
  1576. const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
  1577. const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
  1578. // SIMD dot product of quantized values
  1579. sumi = dpct::dp4a(vi0, u[2 * i + 0], sumi);
  1580. sumi = dpct::dp4a(vi1, u[2 * i + 1], sumi);
  1581. }
  1582. #ifdef GGML_SYCL_F16
  1583. const sycl::float2 tmp =
  1584. (dm4 * ds8).convert<float, sycl::rounding_mode::automatic>();
  1585. const float d4d8 = tmp.x();
  1586. const float m4s8 = tmp.y();
  1587. #else
  1588. const sycl::float2 dm4f =
  1589. dm4.convert<float, sycl::rounding_mode::automatic>();
  1590. const sycl::float2 ds8f =
  1591. ds8.convert<float, sycl::rounding_mode::automatic>();
  1592. const float d4d8 = dm4f.x() * ds8f.x();
  1593. const float m4s8 = dm4f.y() * ds8f.y();
  1594. #endif // GGML_SYCL_F16
  1595. // scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it
  1596. return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1));
  1597. }
  1598. #define VDR_Q5_0_Q8_1_MMVQ 2
  1599. #define VDR_Q5_0_Q8_1_MMQ 4
  1600. template <int vdr>
  1601. static __dpct_inline__ float
  1602. vec_dot_q5_0_q8_1_impl(const int *vl, const int *vh, const int *u,
  1603. const float &d5, const sycl::half2 &ds8) {
  1604. int sumi = 0;
  1605. #pragma unroll
  1606. for (int i = 0; i < vdr; ++i) {
  1607. int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits
  1608. vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4
  1609. vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12
  1610. vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20
  1611. vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28
  1612. sumi = dpct::dp4a(vi0, u[2 * i + 0],
  1613. sumi); // SIMD dot product of quantized values
  1614. int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits
  1615. vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4
  1616. vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12
  1617. vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20
  1618. vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28
  1619. sumi = dpct::dp4a(vi1, u[2 * i + 1],
  1620. sumi); // SIMD dot product of quantized values
  1621. }
  1622. const sycl::float2 ds8f =
  1623. ds8.convert<float, sycl::rounding_mode::automatic>();
  1624. // second part effectively subtracts 16 from each quant value
  1625. return d5 * (sumi * ds8f.x() - (16 * vdr / QI5_0) * ds8f.y());
  1626. }
  1627. #define VDR_Q5_1_Q8_1_MMVQ 2
  1628. #define VDR_Q5_1_Q8_1_MMQ 4
  1629. template <int vdr>
  1630. static __dpct_inline__ float
  1631. vec_dot_q5_1_q8_1_impl(const int *vl, const int *vh, const int *u,
  1632. const sycl::half2 &dm5, const sycl::half2 &ds8) {
  1633. int sumi = 0;
  1634. #pragma unroll
  1635. for (int i = 0; i < vdr; ++i) {
  1636. int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits
  1637. vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4
  1638. vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12
  1639. vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20
  1640. vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28
  1641. sumi = dpct::dp4a(vi0, u[2 * i + 0],
  1642. sumi); // SIMD dot product of quantized values
  1643. int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits
  1644. vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4
  1645. vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12
  1646. vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20
  1647. vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28
  1648. sumi = dpct::dp4a(vi1, u[2 * i + 1],
  1649. sumi); // SIMD dot product of quantized values
  1650. }
  1651. #ifdef GGML_SYCL_F16
  1652. const sycl::float2 tmp =
  1653. (dm5 * ds8).convert<float, sycl::rounding_mode::automatic>();
  1654. const float d5d8 = tmp.x();
  1655. const float m5s8 = tmp.y();
  1656. #else
  1657. const sycl::float2 dm5f =
  1658. dm5.convert<float, sycl::rounding_mode::automatic>();
  1659. const sycl::float2 ds8f =
  1660. ds8.convert<float, sycl::rounding_mode::automatic>();
  1661. const float d5d8 = dm5f.x() * ds8f.x();
  1662. const float m5s8 = dm5f.y() * ds8f.y();
  1663. #endif // GGML_SYCL_F16
  1664. // scale second part of sum by QI5_1 / vdr to compensate for multiple threads adding it
  1665. return sumi*d5d8 + m5s8 / (QI5_1 / vdr);
  1666. }
  1667. #define VDR_Q8_0_Q8_1_MMVQ 2
  1668. #define VDR_Q8_0_Q8_1_MMQ 8
  1669. template <int vdr>
  1670. static __dpct_inline__ float vec_dot_q8_0_q8_1_impl(const int *v, const int *u,
  1671. const float &d8_0,
  1672. const float &d8_1) {
  1673. int sumi = 0;
  1674. #pragma unroll
  1675. for (int i = 0; i < vdr; ++i) {
  1676. // SIMD dot product of quantized values
  1677. sumi = dpct::dp4a(v[i], u[i], sumi);
  1678. }
  1679. return d8_0*d8_1 * sumi;
  1680. }
  1681. template <int vdr>
  1682. static __dpct_inline__ float vec_dot_q8_1_q8_1_impl(const int *v, const int *u,
  1683. const sycl::half2 &dm8,
  1684. const sycl::half2 &ds8) {
  1685. int sumi = 0;
  1686. #pragma unroll
  1687. for (int i = 0; i < vdr; ++i) {
  1688. // SIMD dot product of quantized values
  1689. sumi = dpct::dp4a(v[i], u[i], sumi);
  1690. }
  1691. #ifdef GGML_SYCL_F16
  1692. const sycl::float2 tmp =
  1693. (dm8 * ds8).convert<float, sycl::rounding_mode::automatic>();
  1694. const float d8d8 = tmp.x();
  1695. const float m8s8 = tmp.y();
  1696. #else
  1697. const sycl::float2 dm8f =
  1698. dm8.convert<float, sycl::rounding_mode::automatic>();
  1699. const sycl::float2 ds8f =
  1700. ds8.convert<float, sycl::rounding_mode::automatic>();
  1701. const float d8d8 = dm8f.x() * ds8f.x();
  1702. const float m8s8 = dm8f.y() * ds8f.y();
  1703. #endif // GGML_SYCL_F16
  1704. // scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it
  1705. return sumi*d8d8 + m8s8 / (QI8_1 / vdr);
  1706. }
  1707. #define VDR_Q2_K_Q8_1_MMVQ 1
  1708. #define VDR_Q2_K_Q8_1_MMQ 2
  1709. // contiguous v/x values
  1710. static __dpct_inline__ float vec_dot_q2_K_q8_1_impl_mmvq(
  1711. const int &v, const int *__restrict__ u, const uint8_t *__restrict__ scales,
  1712. const sycl::half2 &dm2, const float *__restrict__ d8) {
  1713. float sumf_d = 0.0f;
  1714. float sumf_m = 0.0f;
  1715. #pragma unroll
  1716. for (int i = 0; i < QR2_K; ++i) {
  1717. const int sc = scales[2*i];
  1718. const int vi = (v >> (2*i)) & 0x03030303;
  1719. sumf_d +=
  1720. d8[i] * (dpct::dp4a(vi, u[i], 0) * (sc & 0xF)); // SIMD dot product
  1721. // fill int with 4x m
  1722. int m = sc >> 4;
  1723. m |= m << 8;
  1724. m |= m << 16;
  1725. sumf_m += d8[i] *
  1726. dpct::dp4a(
  1727. m, u[i],
  1728. 0); // multiply constant q2_K part with sum of q8_1 values
  1729. }
  1730. const sycl::float2 dm2f =
  1731. dm2.convert<float, sycl::rounding_mode::automatic>();
  1732. return dm2f.x() * sumf_d - dm2f.y() * sumf_m;
  1733. }
  1734. // contiguous u/y values
  1735. static __dpct_inline__ float
  1736. vec_dot_q2_K_q8_1_impl_mmq(const int *__restrict__ v, const int *__restrict__ u,
  1737. const uint8_t *__restrict__ scales,
  1738. const sycl::half2 &dm2, const float &d8) {
  1739. int sumi_d = 0;
  1740. int sumi_m = 0;
  1741. #pragma unroll
  1742. for (int i0 = 0; i0 < QI8_1; i0 += QI8_1/2) {
  1743. int sumi_d_sc = 0;
  1744. const int sc = scales[i0 / (QI8_1/2)];
  1745. // fill int with 4x m
  1746. int m = sc >> 4;
  1747. m |= m << 8;
  1748. m |= m << 16;
  1749. #pragma unroll
  1750. for (int i = i0; i < i0 + QI8_1/2; ++i) {
  1751. sumi_d_sc = dpct::dp4a(v[i], u[i], sumi_d_sc); // SIMD dot product
  1752. sumi_m = dpct::dp4a(m, u[i],
  1753. sumi_m); // multiply sum of q8_1 values with m
  1754. }
  1755. sumi_d += sumi_d_sc * (sc & 0xF);
  1756. }
  1757. const sycl::float2 dm2f =
  1758. dm2.convert<float, sycl::rounding_mode::automatic>();
  1759. return d8 * (dm2f.x() * sumi_d - dm2f.y() * sumi_m);
  1760. }
  1761. #define VDR_Q3_K_Q8_1_MMVQ 1
  1762. #define VDR_Q3_K_Q8_1_MMQ 2
  1763. // contiguous v/x values
  1764. static __dpct_inline__ float vec_dot_q3_K_q8_1_impl_mmvq(
  1765. const int &vl, const int &vh, const int *__restrict__ u,
  1766. const uint8_t *__restrict__ scales, const int &scale_offset,
  1767. const float &d3, const float *__restrict__ d8) {
  1768. float sumf = 0.0f;
  1769. #pragma unroll
  1770. for (int i = 0; i < QR3_K; ++i) {
  1771. const int isc = scale_offset + 2*i;
  1772. const int isc_low = isc % (QK_K/32);
  1773. const int sc_shift_low = 4 * (isc / (QK_K/32));
  1774. const int sc_low = (scales[isc_low] >> sc_shift_low) & 0xF;
  1775. const int isc_high = isc % (QK_K/64);
  1776. const int sc_shift_high = 2 * (isc / (QK_K/64));
  1777. const int sc_high = ((scales[(QK_K/32) + isc_high] >> sc_shift_high) & 3) << 4;
  1778. const int sc = (sc_low | sc_high) - 32;
  1779. const int vil = (vl >> (2*i)) & 0x03030303;
  1780. const int vih = ((vh >> i) << 2) & 0x04040404;
  1781. const int vi =
  1782. dpct::vectorized_binary<sycl::char4>(vil, vih, dpct::sub_sat());
  1783. sumf += d8[i] * (dpct::dp4a(vi, u[i], 0) * sc); // SIMD dot product
  1784. }
  1785. return d3 * sumf;
  1786. }
  1787. // contiguous u/y values
  1788. static __dpct_inline__ float
  1789. vec_dot_q3_K_q8_1_impl_mmq(const int *__restrict__ v, const int *__restrict__ u,
  1790. const int8_t *__restrict__ scales, const float &d3,
  1791. const float &d8) {
  1792. int sumi = 0;
  1793. #pragma unroll
  1794. for (int i0 = 0; i0 < QR3_K*VDR_Q3_K_Q8_1_MMQ; i0 += QI8_1/2) {
  1795. int sumi_sc = 0;
  1796. for (int i = i0; i < i0 + QI8_1/2; ++i) {
  1797. sumi_sc = dpct::dp4a(v[i], u[i], sumi_sc); // SIMD dot product
  1798. }
  1799. sumi += sumi_sc * scales[i0 / (QI8_1/2)];
  1800. }
  1801. return d3*d8 * sumi;
  1802. }
  1803. #define VDR_Q4_K_Q8_1_MMVQ 2
  1804. #define VDR_Q4_K_Q8_1_MMQ 8
  1805. // contiguous v/x values
  1806. static __dpct_inline__ float vec_dot_q4_K_q8_1_impl_vmmq(
  1807. const int *__restrict__ v, const int *__restrict__ u,
  1808. const uint8_t *__restrict__ sc, const uint8_t *__restrict__ m,
  1809. const sycl::half2 &dm4, const float *__restrict__ d8) {
  1810. float sumf_d = 0.0f;
  1811. float sumf_m = 0.0f;
  1812. #pragma unroll
  1813. for (int i = 0; i < QR4_K; ++i) {
  1814. const int v0i = (v[0] >> (4*i)) & 0x0F0F0F0F;
  1815. const int v1i = (v[1] >> (4*i)) & 0x0F0F0F0F;
  1816. const int dot1 =
  1817. dpct::dp4a(v1i, u[2 * i + 1],
  1818. dpct::dp4a(v0i, u[2 * i + 0], 0)); // SIMD dot product
  1819. const int dot2 =
  1820. dpct::dp4a(0x01010101, u[2 * i + 1],
  1821. dpct::dp4a(0x01010101, u[2 * i + 0], 0)); // sum of u
  1822. sumf_d += d8[i] * (dot1 * sc[i]);
  1823. sumf_m += d8[i] * (dot2 * m[i]); // multiply constant part of q4_K with sum of q8_1 values
  1824. }
  1825. const sycl::float2 dm4f =
  1826. dm4.convert<float, sycl::rounding_mode::automatic>();
  1827. return dm4f.x() * sumf_d - dm4f.y() * sumf_m;
  1828. }
  1829. // contiguous u/y values
  1830. static __dpct_inline__ float vec_dot_q4_K_q8_1_impl_mmq(
  1831. const int *__restrict__ v, const int *__restrict__ u,
  1832. const uint8_t *__restrict__ sc, const uint8_t *__restrict__ m,
  1833. const sycl::half2 &dm4, const sycl::half2 *__restrict__ ds8) {
  1834. float sumf_d = 0.0f;
  1835. float sumf_m = 0.0f;
  1836. #pragma unroll
  1837. for (int i = 0; i < QR4_K*VDR_Q4_K_Q8_1_MMQ/QI8_1; ++i) {
  1838. int sumi_d = 0;
  1839. #pragma unroll
  1840. for (int j = 0; j < QI8_1; ++j) {
  1841. sumi_d = dpct::dp4a((v[j] >> (4 * i)) & 0x0F0F0F0F,
  1842. u[i * QI8_1 + j], sumi_d); // SIMD dot product
  1843. }
  1844. const sycl::float2 ds8f =
  1845. ds8[i].convert<float, sycl::rounding_mode::automatic>();
  1846. sumf_d += ds8f.x() * (sc[i] * sumi_d);
  1847. sumf_m += ds8f.y() * m[i]; // sum of q8_1 block * q4_K min val
  1848. }
  1849. const sycl::float2 dm4f =
  1850. dm4.convert<float, sycl::rounding_mode::automatic>();
  1851. return dm4f.x() * sumf_d - dm4f.y() * sumf_m;
  1852. }
  1853. #define VDR_Q5_K_Q8_1_MMVQ 2
  1854. #define VDR_Q5_K_Q8_1_MMQ 8
  1855. // contiguous v/x values
  1856. static __dpct_inline__ float vec_dot_q5_K_q8_1_impl_vmmq(
  1857. const int *__restrict__ vl, const int *__restrict__ vh,
  1858. const int *__restrict__ u, const uint8_t *__restrict__ sc,
  1859. const uint8_t *__restrict__ m, const sycl::half2 &dm5,
  1860. const float *__restrict__ d8) {
  1861. float sumf_d = 0.0f;
  1862. float sumf_m = 0.0f;
  1863. #pragma unroll
  1864. for (int i = 0; i < QR5_K; ++i) {
  1865. const int vl0i = (vl[0] >> (4*i)) & 0x0F0F0F0F;
  1866. const int vl1i = (vl[1] >> (4*i)) & 0x0F0F0F0F;
  1867. const int vh0i = ((vh[0] >> i) << 4) & 0x10101010;
  1868. const int vh1i = ((vh[1] >> i) << 4) & 0x10101010;
  1869. const int v0i = vl0i | vh0i;
  1870. const int v1i = vl1i | vh1i;
  1871. const int dot1 =
  1872. dpct::dp4a(v0i, u[2 * i + 0],
  1873. dpct::dp4a(v1i, u[2 * i + 1], 0)); // SIMD dot product
  1874. const int dot2 =
  1875. dpct::dp4a(0x01010101, u[2 * i + 0],
  1876. dpct::dp4a(0x01010101, u[2 * i + 1], 0)); // sum of u
  1877. sumf_d += d8[i] * (dot1 * sc[i]);
  1878. sumf_m += d8[i] * (dot2 * m[i]);
  1879. }
  1880. const sycl::float2 dm5f =
  1881. dm5.convert<float, sycl::rounding_mode::automatic>();
  1882. return dm5f.x() * sumf_d - dm5f.y() * sumf_m;
  1883. }
  1884. // contiguous u/y values
  1885. static __dpct_inline__ float vec_dot_q5_K_q8_1_impl_mmq(
  1886. const int *__restrict__ v, const int *__restrict__ u,
  1887. const uint8_t *__restrict__ sc, const uint8_t *__restrict__ m,
  1888. const sycl::half2 &dm4, const sycl::half2 *__restrict__ ds8) {
  1889. float sumf_d = 0.0f;
  1890. float sumf_m = 0.0f;
  1891. #pragma unroll
  1892. for (int i = 0; i < QR5_K*VDR_Q5_K_Q8_1_MMQ/QI8_1; ++i) {
  1893. int sumi_d = 0;
  1894. #pragma unroll
  1895. for (int j = 0; j < QI8_1; ++j) {
  1896. sumi_d = dpct::dp4a(v[i * QI8_1 + j], u[i * QI8_1 + j],
  1897. sumi_d); // SIMD dot product
  1898. }
  1899. const sycl::float2 ds8f =
  1900. ds8[i].convert<float, sycl::rounding_mode::automatic>();
  1901. sumf_d += ds8f.x() * (sc[i] * sumi_d);
  1902. sumf_m += ds8f.y() * m[i]; // sum of q8_1 block * q4_K min val
  1903. }
  1904. const sycl::float2 dm4f =
  1905. dm4.convert<float, sycl::rounding_mode::automatic>();
  1906. return dm4f.x() * sumf_d - dm4f.y() * sumf_m;
  1907. }
  1908. #define VDR_Q6_K_Q8_1_MMVQ 1
  1909. #define VDR_Q6_K_Q8_1_MMQ 8
  1910. // contiguous v/x values
  1911. static __dpct_inline__ float
  1912. vec_dot_q6_K_q8_1_impl_mmvq(const int &vl, const int &vh,
  1913. const int *__restrict__ u,
  1914. const int8_t *__restrict__ scales, const float &d,
  1915. const float *__restrict__ d8) {
  1916. float sumf = 0.0f;
  1917. #pragma unroll
  1918. for (int i = 0; i < QR6_K; ++i) {
  1919. const int sc = scales[4*i];
  1920. const int vil = (vl >> (4*i)) & 0x0F0F0F0F;
  1921. const int vih = ((vh >> (4*i)) << 4) & 0x30303030;
  1922. const int vi = dpct::vectorized_binary<sycl::char4>(
  1923. (vil | vih), 0x20202020, dpct::sub_sat()); // vi = (vil | vih) - 32
  1924. sumf += d8[i] * (dpct::dp4a(vi, u[i], 0) * sc); // SIMD dot product
  1925. }
  1926. return d*sumf;
  1927. }
  1928. // contiguous u/y values
  1929. static __dpct_inline__ float
  1930. vec_dot_q6_K_q8_1_impl_mmq(const int *__restrict__ v, const int *__restrict__ u,
  1931. const int8_t *__restrict__ sc, const float &d6,
  1932. const float *__restrict__ d8) {
  1933. float sumf_d = 0.0f;
  1934. #pragma unroll
  1935. for (int i0 = 0; i0 < VDR_Q6_K_Q8_1_MMQ; i0 += 4) {
  1936. sycl::int2 sumi_d = {0, 0}; // 2 q6_K scales per q8_1 scale
  1937. #pragma unroll
  1938. for (int i = i0; i < i0 + 2; ++i) {
  1939. sumi_d.x() = dpct::dp4a(v[2 * i + 0], u[2 * i + 0],
  1940. sumi_d.x()); // SIMD dot product
  1941. sumi_d.x() = dpct::dp4a(v[2 * i + 1], u[2 * i + 1],
  1942. sumi_d.x()); // SIMD dot product
  1943. sumi_d.y() = dpct::dp4a(v[2 * i + 4], u[2 * i + 4],
  1944. sumi_d.y()); // SIMD dot product
  1945. sumi_d.y() = dpct::dp4a(v[2 * i + 5], u[2 * i + 5],
  1946. sumi_d.y()); // SIMD dot product
  1947. }
  1948. sumf_d += d8[i0 / 4] *
  1949. (sc[i0 / 2 + 0] * sumi_d.x() + sc[i0 / 2 + 1] * sumi_d.y());
  1950. }
  1951. return d6 * sumf_d;
  1952. }
  1953. static __dpct_inline__ float
  1954. vec_dot_q4_0_q8_1(const void *__restrict__ vbq,
  1955. const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
  1956. const block_q4_0 * bq4_0 = (const block_q4_0 *) vbq;
  1957. int v[VDR_Q4_0_Q8_1_MMVQ];
  1958. int u[2*VDR_Q4_0_Q8_1_MMVQ];
  1959. #pragma unroll
  1960. for (int i = 0; i < VDR_Q4_0_Q8_1_MMVQ; ++i) {
  1961. v[i] = get_int_from_uint8(bq4_0->qs, iqs + i);
  1962. u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
  1963. u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_0);
  1964. }
  1965. return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMVQ>(v, u, bq4_0->d, bq8_1->ds);
  1966. }
  1967. template <int mmq_y>
  1968. static __dpct_inline__ void
  1969. allocate_tiles_q4_0(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
  1970. int *tile_x_qs_q4_0, float *tile_x_d_q4_0) {
  1971. (void)x_qh; (void)x_sc;
  1972. *x_ql = tile_x_qs_q4_0;
  1973. *x_dm = (sycl::half2 *)tile_x_d_q4_0;
  1974. }
  1975. template <int mmq_y, int nwarps, bool need_check>
  1976. static __dpct_inline__ void
  1977. load_tiles_q4_0(const void *__restrict__ vx, int *__restrict__ x_ql,
  1978. sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
  1979. int *__restrict__ x_sc, const int &i_offset, const int &i_max,
  1980. const int &k, const int &blocks_per_row) {
  1981. (void)x_qh; (void)x_sc;
  1982. GGML_SYCL_ASSUME(i_offset >= 0);
  1983. GGML_SYCL_ASSUME(i_offset < nwarps);
  1984. GGML_SYCL_ASSUME(k >= 0);
  1985. GGML_SYCL_ASSUME(k < WARP_SIZE);
  1986. const int kbx = k / QI4_0;
  1987. const int kqsx = k % QI4_0;
  1988. const block_q4_0 * bx0 = (const block_q4_0 *) vx;
  1989. float * x_dmf = (float *) x_dm;
  1990. #pragma unroll
  1991. for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
  1992. int i = i0 + i_offset;
  1993. if (need_check) {
  1994. i = sycl::min(i, i_max);
  1995. }
  1996. const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbx;
  1997. x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx);
  1998. // x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbx] = bxi->d;
  1999. }
  2000. const int blocks_per_tile_x_row = WARP_SIZE / QI4_0;
  2001. const int kbxd = k % blocks_per_tile_x_row;
  2002. #pragma unroll
  2003. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_0) {
  2004. int i = i0 + i_offset * QI4_0 + k / blocks_per_tile_x_row;
  2005. if (need_check) {
  2006. i = sycl::min(i, i_max);
  2007. }
  2008. const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbxd;
  2009. x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbxd] = bxi->d;
  2010. }
  2011. }
  2012. static __dpct_inline__ float vec_dot_q4_0_q8_1_mul_mat(
  2013. const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
  2014. const int *__restrict__ x_qh, const int *__restrict__ x_sc,
  2015. const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
  2016. const int &i, const int &j, const int &k) {
  2017. (void)x_qh; (void)x_sc;
  2018. const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
  2019. const float * x_dmf = (const float *) x_dm;
  2020. int u[2*VDR_Q4_0_Q8_1_MMQ];
  2021. #pragma unroll
  2022. for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) {
  2023. u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
  2024. u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_0) % WARP_SIZE];
  2025. }
  2026. return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMQ>
  2027. (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dmf[i * (WARP_SIZE/QI4_0) + i/QI4_0 + k/QI4_0],
  2028. y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
  2029. }
  2030. static __dpct_inline__ float
  2031. vec_dot_q4_1_q8_1(const void *__restrict__ vbq,
  2032. const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
  2033. const block_q4_1 * bq4_1 = (const block_q4_1 *) vbq;
  2034. int v[VDR_Q4_1_Q8_1_MMVQ];
  2035. int u[2*VDR_Q4_1_Q8_1_MMVQ];
  2036. #pragma unroll
  2037. for (int i = 0; i < VDR_Q4_1_Q8_1_MMVQ; ++i) {
  2038. v[i] = get_int_from_uint8_aligned(bq4_1->qs, iqs + i);
  2039. u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
  2040. u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_1);
  2041. }
  2042. return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMVQ>(v, u, bq4_1->dm, bq8_1->ds);
  2043. }
  2044. template <int mmq_y>
  2045. static __dpct_inline__ void
  2046. allocate_tiles_q4_1(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
  2047. int *tile_x_qs_q4_1, sycl::half2 *tile_x_dm_q4_1) {
  2048. (void)x_qh; (void)x_sc;
  2049. *x_ql = tile_x_qs_q4_1;
  2050. *x_dm = tile_x_dm_q4_1;
  2051. }
  2052. template <int mmq_y, int nwarps, bool need_check>
  2053. static __dpct_inline__ void
  2054. load_tiles_q4_1(const void *__restrict__ vx, int *__restrict__ x_ql,
  2055. sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
  2056. int *__restrict__ x_sc, const int &i_offset, const int &i_max,
  2057. const int &k, const int &blocks_per_row) {
  2058. (void)x_qh; (void)x_sc;
  2059. GGML_SYCL_ASSUME(i_offset >= 0);
  2060. GGML_SYCL_ASSUME(i_offset < nwarps);
  2061. GGML_SYCL_ASSUME(k >= 0);
  2062. GGML_SYCL_ASSUME(k < WARP_SIZE);
  2063. const int kbx = k / QI4_1;
  2064. const int kqsx = k % QI4_1;
  2065. const block_q4_1 * bx0 = (const block_q4_1 *) vx;
  2066. #pragma unroll
  2067. for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
  2068. int i = i0 + i_offset;
  2069. if (need_check) {
  2070. i = sycl::min(i, i_max);
  2071. }
  2072. const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbx;
  2073. x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
  2074. }
  2075. const int blocks_per_tile_x_row = WARP_SIZE / QI4_1;
  2076. const int kbxd = k % blocks_per_tile_x_row;
  2077. #pragma unroll
  2078. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_1) {
  2079. int i = i0 + i_offset * QI4_1 + k / blocks_per_tile_x_row;
  2080. if (need_check) {
  2081. i = sycl::min(i, i_max);
  2082. }
  2083. const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbxd;
  2084. x_dm[i * (WARP_SIZE/QI4_1) + i / QI4_1 + kbxd] = bxi->dm;
  2085. }
  2086. }
  2087. static __dpct_inline__ float vec_dot_q4_1_q8_1_mul_mat(
  2088. const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
  2089. const int *__restrict__ x_qh, const int *__restrict__ x_sc,
  2090. const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
  2091. const int &i, const int &j, const int &k) {
  2092. (void)x_qh; (void)x_sc;
  2093. const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
  2094. int u[2*VDR_Q4_1_Q8_1_MMQ];
  2095. #pragma unroll
  2096. for (int l = 0; l < VDR_Q4_1_Q8_1_MMQ; ++l) {
  2097. u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
  2098. u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_1) % WARP_SIZE];
  2099. }
  2100. return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMQ>
  2101. (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dm[i * (WARP_SIZE/QI4_1) + i/QI4_1 + k/QI4_1],
  2102. y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
  2103. }
  2104. static __dpct_inline__ float
  2105. vec_dot_q5_0_q8_1(const void *__restrict__ vbq,
  2106. const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
  2107. const block_q5_0 * bq5_0 = (const block_q5_0 *) vbq;
  2108. int vl[VDR_Q5_0_Q8_1_MMVQ];
  2109. int vh[VDR_Q5_0_Q8_1_MMVQ];
  2110. int u[2*VDR_Q5_0_Q8_1_MMVQ];
  2111. #pragma unroll
  2112. for (int i = 0; i < VDR_Q5_0_Q8_1_MMVQ; ++i) {
  2113. vl[i] = get_int_from_uint8(bq5_0->qs, iqs + i);
  2114. vh[i] = get_int_from_uint8(bq5_0->qh, 0) >> (4 * (iqs + i));
  2115. u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
  2116. u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_0);
  2117. }
  2118. return vec_dot_q5_0_q8_1_impl<VDR_Q5_0_Q8_1_MMVQ>(vl, vh, u, bq5_0->d, bq8_1->ds);
  2119. }
  2120. template <int mmq_y>
  2121. static __dpct_inline__ void
  2122. allocate_tiles_q5_0(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
  2123. int *tile_x_ql_q5_0, float *tile_x_d_q5_0) {
  2124. (void)x_qh; (void)x_sc;
  2125. *x_ql = tile_x_ql_q5_0;
  2126. *x_dm = (sycl::half2 *)tile_x_d_q5_0;
  2127. }
  2128. template <int mmq_y, int nwarps, bool need_check>
  2129. static __dpct_inline__ void
  2130. load_tiles_q5_0(const void *__restrict__ vx, int *__restrict__ x_ql,
  2131. sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
  2132. int *__restrict__ x_sc, const int &i_offset, const int &i_max,
  2133. const int &k, const int &blocks_per_row) {
  2134. (void)x_qh; (void)x_sc;
  2135. GGML_SYCL_ASSUME(i_offset >= 0);
  2136. GGML_SYCL_ASSUME(i_offset < nwarps);
  2137. GGML_SYCL_ASSUME(k >= 0);
  2138. GGML_SYCL_ASSUME(k < WARP_SIZE);
  2139. const int kbx = k / QI5_0;
  2140. const int kqsx = k % QI5_0;
  2141. const block_q5_0 * bx0 = (const block_q5_0 *) vx;
  2142. #pragma unroll
  2143. for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
  2144. int i = i0 + i_offset;
  2145. if (need_check) {
  2146. i = sycl::min(i, i_max);
  2147. }
  2148. const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbx;
  2149. const int ql = get_int_from_uint8(bxi->qs, kqsx);
  2150. const int qh = get_int_from_uint8(bxi->qh, 0) >> (4 * (k % QI5_0));
  2151. int qs0 = (ql >> 0) & 0x0F0F0F0F;
  2152. qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
  2153. qs0 |= (qh << 11) & 0x00001000; // 1 -> 12
  2154. qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
  2155. qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
  2156. qs0 = dpct::vectorized_binary<sycl::char4>(
  2157. qs0, 0x10101010, dpct::sub_sat()); // subtract 16
  2158. x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0;
  2159. int qs1 = (ql >> 4) & 0x0F0F0F0F;
  2160. qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
  2161. qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12
  2162. qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
  2163. qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
  2164. qs1 = dpct::vectorized_binary<sycl::char4>(
  2165. qs1, 0x10101010, dpct::sub_sat()); // subtract 16
  2166. x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1;
  2167. }
  2168. const int blocks_per_tile_x_row = WARP_SIZE / QI5_0;
  2169. const int kbxd = k % blocks_per_tile_x_row;
  2170. float * x_dmf = (float *) x_dm;
  2171. #pragma unroll
  2172. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_0) {
  2173. int i = i0 + i_offset * QI5_0 + k / blocks_per_tile_x_row;
  2174. if (need_check) {
  2175. i = sycl::min(i, i_max);
  2176. }
  2177. const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbxd;
  2178. x_dmf[i * (WARP_SIZE/QI5_0) + i / QI5_0 + kbxd] = bxi->d;
  2179. }
  2180. }
  2181. static __dpct_inline__ float vec_dot_q5_0_q8_1_mul_mat(
  2182. const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
  2183. const int *__restrict__ x_qh, const int *__restrict__ x_sc,
  2184. const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
  2185. const int &i, const int &j, const int &k) {
  2186. (void)x_qh; (void)x_sc;
  2187. const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
  2188. const int index_bx = i * (WARP_SIZE/QI5_0) + i/QI5_0 + k/QI5_0;
  2189. const float * x_dmf = (const float *) x_dm;
  2190. const float * y_df = (const float *) y_ds;
  2191. int u[2*VDR_Q5_0_Q8_1_MMQ];
  2192. #pragma unroll
  2193. for (int l = 0; l < VDR_Q5_0_Q8_1_MMQ; ++l) {
  2194. u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
  2195. u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_0) % WARP_SIZE];
  2196. }
  2197. return vec_dot_q8_0_q8_1_impl<QR5_0*VDR_Q5_0_Q8_1_MMQ>
  2198. (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
  2199. }
  2200. static __dpct_inline__ float
  2201. vec_dot_q5_1_q8_1(const void *__restrict__ vbq,
  2202. const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
  2203. const block_q5_1 * bq5_1 = (const block_q5_1 *) vbq;
  2204. int vl[VDR_Q5_1_Q8_1_MMVQ];
  2205. int vh[VDR_Q5_1_Q8_1_MMVQ];
  2206. int u[2*VDR_Q5_1_Q8_1_MMVQ];
  2207. #pragma unroll
  2208. for (int i = 0; i < VDR_Q5_1_Q8_1_MMVQ; ++i) {
  2209. vl[i] = get_int_from_uint8_aligned(bq5_1->qs, iqs + i);
  2210. vh[i] = get_int_from_uint8_aligned(bq5_1->qh, 0) >> (4 * (iqs + i));
  2211. u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
  2212. u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_1);
  2213. }
  2214. return vec_dot_q5_1_q8_1_impl<VDR_Q5_1_Q8_1_MMVQ>(vl, vh, u, bq5_1->dm, bq8_1->ds);
  2215. }
  2216. template <int mmq_y>
  2217. static __dpct_inline__ void
  2218. allocate_tiles_q5_1(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
  2219. int *tile_x_ql_q5_1, sycl::half2 *tile_x_dm_q5_1) {
  2220. (void)x_qh; (void)x_sc;
  2221. *x_ql = tile_x_ql_q5_1;
  2222. *x_dm = tile_x_dm_q5_1;
  2223. }
  2224. template <int mmq_y, int nwarps, bool need_check>
  2225. static __dpct_inline__ void
  2226. load_tiles_q5_1(const void *__restrict__ vx, int *__restrict__ x_ql,
  2227. sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
  2228. int *__restrict__ x_sc, const int &i_offset, const int &i_max,
  2229. const int &k, const int &blocks_per_row) {
  2230. (void)x_qh; (void)x_sc;
  2231. GGML_SYCL_ASSUME(i_offset >= 0);
  2232. GGML_SYCL_ASSUME(i_offset < nwarps);
  2233. GGML_SYCL_ASSUME(k >= 0);
  2234. GGML_SYCL_ASSUME(k < WARP_SIZE);
  2235. const int kbx = k / QI5_1;
  2236. const int kqsx = k % QI5_1;
  2237. const block_q5_1 * bx0 = (const block_q5_1 *) vx;
  2238. #pragma unroll
  2239. for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
  2240. int i = i0 + i_offset;
  2241. if (need_check) {
  2242. i = sycl::min(i, i_max);
  2243. }
  2244. const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbx;
  2245. const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
  2246. const int qh = get_int_from_uint8_aligned(bxi->qh, 0) >> (4 * (k % QI5_1));
  2247. int qs0 = (ql >> 0) & 0x0F0F0F0F;
  2248. qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
  2249. qs0 |= (qh << 11) & 0x00001000; // 1 -> 12
  2250. qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
  2251. qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
  2252. x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0;
  2253. int qs1 = (ql >> 4) & 0x0F0F0F0F;
  2254. qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
  2255. qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12
  2256. qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
  2257. qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
  2258. x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1;
  2259. }
  2260. const int blocks_per_tile_x_row = WARP_SIZE / QI5_1;
  2261. const int kbxd = k % blocks_per_tile_x_row;
  2262. #pragma unroll
  2263. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_1) {
  2264. int i = i0 + i_offset * QI5_1 + k / blocks_per_tile_x_row;
  2265. if (need_check) {
  2266. i = sycl::min(i, i_max);
  2267. }
  2268. const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbxd;
  2269. x_dm[i * (WARP_SIZE/QI5_1) + i / QI5_1 + kbxd] = bxi->dm;
  2270. }
  2271. }
  2272. static __dpct_inline__ float vec_dot_q5_1_q8_1_mul_mat(
  2273. const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
  2274. const int *__restrict__ x_qh, const int *__restrict__ x_sc,
  2275. const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
  2276. const int &i, const int &j, const int &k) {
  2277. (void)x_qh; (void)x_sc;
  2278. const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
  2279. const int index_bx = i * (WARP_SIZE/QI5_1) + + i/QI5_1 + k/QI5_1;
  2280. int u[2*VDR_Q5_1_Q8_1_MMQ];
  2281. #pragma unroll
  2282. for (int l = 0; l < VDR_Q5_1_Q8_1_MMQ; ++l) {
  2283. u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
  2284. u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_1) % WARP_SIZE];
  2285. }
  2286. return vec_dot_q8_1_q8_1_impl<QR5_1*VDR_Q5_1_Q8_1_MMQ>
  2287. (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
  2288. }
  2289. static __dpct_inline__ float
  2290. vec_dot_q8_0_q8_1(const void *__restrict__ vbq,
  2291. const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
  2292. const block_q8_0 * bq8_0 = (const block_q8_0 *) vbq;
  2293. int v[VDR_Q8_0_Q8_1_MMVQ];
  2294. int u[VDR_Q8_0_Q8_1_MMVQ];
  2295. #pragma unroll
  2296. for (int i = 0; i < VDR_Q8_0_Q8_1_MMVQ; ++i) {
  2297. v[i] = get_int_from_int8(bq8_0->qs, iqs + i);
  2298. u[i] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
  2299. }
  2300. return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMVQ>(v, u, bq8_0->d,
  2301. bq8_1->ds[0]);
  2302. }
  2303. template <int mmq_y>
  2304. static __dpct_inline__ void
  2305. allocate_tiles_q8_0(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
  2306. int *tile_x_qs_q8_0, float *tile_x_d_q8_0) {
  2307. (void)x_qh; (void)x_sc;
  2308. *x_ql = tile_x_qs_q8_0;
  2309. *x_dm = (sycl::half2 *)tile_x_d_q8_0;
  2310. }
  2311. template <int mmq_y, int nwarps, bool need_check>
  2312. static __dpct_inline__ void
  2313. load_tiles_q8_0(const void *__restrict__ vx, int *__restrict__ x_ql,
  2314. sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
  2315. int *__restrict__ x_sc, const int &i_offset, const int &i_max,
  2316. const int &k, const int &blocks_per_row) {
  2317. (void)x_qh; (void)x_sc;
  2318. GGML_SYCL_ASSUME(i_offset >= 0);
  2319. GGML_SYCL_ASSUME(i_offset < nwarps);
  2320. GGML_SYCL_ASSUME(k >= 0);
  2321. GGML_SYCL_ASSUME(k < WARP_SIZE);
  2322. const int kbx = k / QI8_0;
  2323. const int kqsx = k % QI8_0;
  2324. float * x_dmf = (float *) x_dm;
  2325. const block_q8_0 * bx0 = (const block_q8_0 *) vx;
  2326. #pragma unroll
  2327. for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
  2328. int i = i0 + i_offset;
  2329. if (need_check) {
  2330. i = sycl::min(i, i_max);
  2331. }
  2332. const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbx;
  2333. x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_int8(bxi->qs, kqsx);
  2334. }
  2335. const int blocks_per_tile_x_row = WARP_SIZE / QI8_0;
  2336. const int kbxd = k % blocks_per_tile_x_row;
  2337. #pragma unroll
  2338. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI8_0) {
  2339. int i = i0 + i_offset * QI8_0 + k / blocks_per_tile_x_row;
  2340. if (need_check) {
  2341. i = sycl::min(i, i_max);
  2342. }
  2343. const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbxd;
  2344. x_dmf[i * (WARP_SIZE/QI8_0) + i / QI8_0 + kbxd] = bxi->d;
  2345. }
  2346. }
  2347. static __dpct_inline__ float vec_dot_q8_0_q8_1_mul_mat(
  2348. const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
  2349. const int *__restrict__ x_qh, const int *__restrict__ x_sc,
  2350. const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
  2351. const int &i, const int &j, const int &k) {
  2352. (void)x_qh; (void)x_sc;
  2353. const float * x_dmf = (const float *) x_dm;
  2354. const float * y_df = (const float *) y_ds;
  2355. return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMQ>
  2356. (&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[j * WARP_SIZE + k], x_dmf[i * (WARP_SIZE/QI8_0) + i/QI8_0 + k/QI8_0],
  2357. y_df[j * (WARP_SIZE/QI8_1) + k/QI8_1]);
  2358. }
  2359. static __dpct_inline__ float
  2360. vec_dot_q2_K_q8_1(const void *__restrict__ vbq,
  2361. const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
  2362. const block_q2_K * bq2_K = (const block_q2_K *) vbq;
  2363. const int bq8_offset = QR2_K * (iqs / QI8_1);
  2364. const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2);
  2365. const uint8_t * scales = bq2_K->scales + scale_offset;
  2366. const int v = get_int_from_uint8_aligned(bq2_K->qs, iqs);
  2367. int u[QR2_K];
  2368. float d8[QR2_K];
  2369. #pragma unroll
  2370. for (int i = 0; i < QR2_K; ++ i) {
  2371. u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1);
  2372. d8[i] = bq8_1[bq8_offset + i].ds[0];
  2373. }
  2374. return vec_dot_q2_K_q8_1_impl_mmvq(v, u, scales, bq2_K->dm, d8);
  2375. }
  2376. template <int mmq_y>
  2377. static __dpct_inline__ void
  2378. allocate_tiles_q2_K(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
  2379. int *tile_x_ql_q2_K, sycl::half2 *tile_x_dm_q2_K,
  2380. int *tile_x_sc_q2_K) {
  2381. (void)x_qh;
  2382. *x_ql = tile_x_ql_q2_K;
  2383. *x_dm = tile_x_dm_q2_K;
  2384. *x_sc = tile_x_sc_q2_K;
  2385. }
  2386. template <int mmq_y, int nwarps, bool need_check>
  2387. static __dpct_inline__ void
  2388. load_tiles_q2_K(const void *__restrict__ vx, int *__restrict__ x_ql,
  2389. sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
  2390. int *__restrict__ x_sc, const int &i_offset, const int &i_max,
  2391. const int &k, const int &blocks_per_row) {
  2392. (void)x_qh;
  2393. GGML_SYCL_ASSUME(i_offset >= 0);
  2394. GGML_SYCL_ASSUME(i_offset < nwarps);
  2395. GGML_SYCL_ASSUME(k >= 0);
  2396. GGML_SYCL_ASSUME(k < WARP_SIZE);
  2397. const int kbx = k / QI2_K;
  2398. const int kqsx = k % QI2_K;
  2399. const block_q2_K * bx0 = (const block_q2_K *) vx;
  2400. #pragma unroll
  2401. for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
  2402. int i = i0 + i_offset;
  2403. if (need_check) {
  2404. i = sycl::min(i, i_max);
  2405. }
  2406. const block_q2_K * bxi = bx0 + i*blocks_per_row + kbx;
  2407. x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
  2408. }
  2409. const int blocks_per_tile_x_row = WARP_SIZE / QI2_K;
  2410. const int kbxd = k % blocks_per_tile_x_row;
  2411. #pragma unroll
  2412. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI2_K) {
  2413. int i = (i0 + i_offset * QI2_K + k / blocks_per_tile_x_row) % mmq_y;
  2414. if (need_check) {
  2415. i = sycl::min(i, i_max);
  2416. }
  2417. const block_q2_K * bxi = bx0 + i*blocks_per_row + kbxd;
  2418. x_dm[i * (WARP_SIZE/QI2_K) + i / QI2_K + kbxd] = bxi->dm;
  2419. }
  2420. #pragma unroll
  2421. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
  2422. int i = i0 + i_offset * 4 + k / (WARP_SIZE/4);
  2423. if (need_check) {
  2424. i = sycl::min(i, i_max);
  2425. }
  2426. const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI2_K/4);
  2427. x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4));
  2428. }
  2429. }
  2430. static __dpct_inline__ float vec_dot_q2_K_q8_1_mul_mat(
  2431. const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
  2432. const int *__restrict__ x_qh, const int *__restrict__ x_sc,
  2433. const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
  2434. const int &i, const int &j, const int &k) {
  2435. (void)x_qh;
  2436. const int kbx = k / QI2_K;
  2437. const int ky = (k % QI2_K) * QR2_K;
  2438. const float * y_df = (const float *) y_ds;
  2439. int v[QR2_K*VDR_Q2_K_Q8_1_MMQ];
  2440. const int kqsx = i * (WARP_SIZE + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2);
  2441. const int shift = 2 * ((ky % (2*QI2_K)) / (QI2_K/2));
  2442. #pragma unroll
  2443. for (int l = 0; l < QR2_K*VDR_Q2_K_Q8_1_MMQ; ++l) {
  2444. v[l] = (x_ql[kqsx + l] >> shift) & 0x03030303;
  2445. }
  2446. const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE/4) + i/4 + kbx*4]) + ky/4;
  2447. const int index_y = j * WARP_SIZE + (QR2_K*k) % WARP_SIZE;
  2448. return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]);
  2449. }
  2450. static __dpct_inline__ float
  2451. vec_dot_q3_K_q8_1(const void *__restrict__ vbq,
  2452. const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
  2453. const block_q3_K * bq3_K = (const block_q3_K *) vbq;
  2454. const int bq8_offset = QR3_K * (iqs / (QI3_K/2));
  2455. const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2);
  2456. const float d = bq3_K->d;
  2457. const int vl = get_int_from_uint8(bq3_K->qs, iqs);
  2458. // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
  2459. const int vh = ~get_int_from_uint8(bq3_K->hmask, iqs % (QI3_K/2)) >> bq8_offset;
  2460. int u[QR3_K];
  2461. float d8[QR3_K];
  2462. #pragma unroll
  2463. for (int i = 0; i < QR3_K; ++i) {
  2464. u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1);
  2465. d8[i] = bq8_1[bq8_offset + i].ds[0];
  2466. }
  2467. return vec_dot_q3_K_q8_1_impl_mmvq(vl, vh, u, bq3_K->scales, scale_offset, d, d8);
  2468. }
  2469. template <int mmq_y>
  2470. static __dpct_inline__ void
  2471. allocate_tiles_q3_K(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
  2472. int *tile_x_ql_q3_K, sycl::half2 *tile_x_dm_q3_K,
  2473. int *tile_x_qh_q3_K, int *tile_x_sc_q3_K) {
  2474. *x_ql = tile_x_ql_q3_K;
  2475. *x_dm = tile_x_dm_q3_K;
  2476. *x_qh = tile_x_qh_q3_K;
  2477. *x_sc = tile_x_sc_q3_K;
  2478. }
  2479. template <int mmq_y, int nwarps, bool need_check>
  2480. static __dpct_inline__ void
  2481. load_tiles_q3_K(const void *__restrict__ vx, int *__restrict__ x_ql,
  2482. sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
  2483. int *__restrict__ x_sc, const int &i_offset, const int &i_max,
  2484. const int &k, const int &blocks_per_row) {
  2485. GGML_SYCL_ASSUME(i_offset >= 0);
  2486. GGML_SYCL_ASSUME(i_offset < nwarps);
  2487. GGML_SYCL_ASSUME(k >= 0);
  2488. GGML_SYCL_ASSUME(k < WARP_SIZE);
  2489. const int kbx = k / QI3_K;
  2490. const int kqsx = k % QI3_K;
  2491. const block_q3_K * bx0 = (const block_q3_K *) vx;
  2492. #pragma unroll
  2493. for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
  2494. int i = i0 + i_offset;
  2495. if (need_check) {
  2496. i = sycl::min(i, i_max);
  2497. }
  2498. const block_q3_K * bxi = bx0 + i*blocks_per_row + kbx;
  2499. x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx);
  2500. }
  2501. const int blocks_per_tile_x_row = WARP_SIZE / QI3_K;
  2502. const int kbxd = k % blocks_per_tile_x_row;
  2503. float * x_dmf = (float *) x_dm;
  2504. #pragma unroll
  2505. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI3_K) {
  2506. int i = (i0 + i_offset * QI3_K + k / blocks_per_tile_x_row) % mmq_y;
  2507. if (need_check) {
  2508. i = sycl::min(i, i_max);
  2509. }
  2510. const block_q3_K * bxi = bx0 + i*blocks_per_row + kbxd;
  2511. x_dmf[i * (WARP_SIZE/QI3_K) + i / QI3_K + kbxd] = bxi->d;
  2512. }
  2513. #pragma unroll
  2514. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 2) {
  2515. int i = i0 + i_offset * 2 + k / (WARP_SIZE/2);
  2516. if (need_check) {
  2517. i = sycl::min(i, i_max);
  2518. }
  2519. const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/2)) / (QI3_K/2);
  2520. // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
  2521. x_qh[i * (WARP_SIZE/2) + i / 2 + k % (WARP_SIZE/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2));
  2522. }
  2523. #pragma unroll
  2524. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
  2525. int i = i0 + i_offset * 4 + k / (WARP_SIZE/4);
  2526. if (need_check) {
  2527. i = sycl::min(i, i_max);
  2528. }
  2529. const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI3_K/4);
  2530. const int ksc = k % (QI3_K/4);
  2531. const int ksc_low = ksc % (QI3_K/8);
  2532. const int shift_low = 4 * (ksc / (QI3_K/8));
  2533. const int sc_low = (get_int_from_uint8(bxi->scales, ksc_low) >> shift_low) & 0x0F0F0F0F;
  2534. const int ksc_high = QI3_K/8;
  2535. const int shift_high = 2 * ksc;
  2536. const int sc_high = ((get_int_from_uint8(bxi->scales, ksc_high) >> shift_high) << 4) & 0x30303030;
  2537. const int sc = dpct::vectorized_binary<sycl::char4>(
  2538. sc_low | sc_high, 0x20202020, dpct::sub_sat());
  2539. x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = sc;
  2540. }
  2541. }
  2542. static __dpct_inline__ float vec_dot_q3_K_q8_1_mul_mat(
  2543. const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
  2544. const int *__restrict__ x_qh, const int *__restrict__ x_sc,
  2545. const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
  2546. const int &i, const int &j, const int &k) {
  2547. const int kbx = k / QI3_K;
  2548. const int ky = (k % QI3_K) * QR3_K;
  2549. const float * x_dmf = (const float *) x_dm;
  2550. const float * y_df = (const float *) y_ds;
  2551. const int8_t * scales = ((const int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4;
  2552. int v[QR3_K*VDR_Q3_K_Q8_1_MMQ];
  2553. #pragma unroll
  2554. for (int l = 0; l < QR3_K*VDR_Q3_K_Q8_1_MMQ; ++l) {
  2555. const int kqsx = i * (WARP_SIZE + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2);
  2556. const int shift = 2 * ((ky % 32) / 8);
  2557. const int vll = (x_ql[kqsx + l] >> shift) & 0x03030303;
  2558. const int vh = x_qh[i * (WARP_SIZE/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8);
  2559. const int vlh = (vh << 2) & 0x04040404;
  2560. v[l] = dpct::vectorized_binary<sycl::char4>(vll, vlh, dpct::sub_sat());
  2561. }
  2562. const int index_y = j * WARP_SIZE + (k*QR3_K) % WARP_SIZE;
  2563. return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]);
  2564. }
  2565. static __dpct_inline__ float
  2566. vec_dot_q4_K_q8_1(const void *__restrict__ vbq,
  2567. const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
  2568. const block_q4_K * bq4_K = (const block_q4_K *) vbq;
  2569. int v[2];
  2570. int u[2*QR4_K];
  2571. float d8[QR4_K];
  2572. // iqs is in 0,2..30. bq8_offset = iqs/4 -> bq8_offset = 0, 2, 4, 6
  2573. const int bq8_offset = QR4_K * ((iqs/2) / (QI8_1/2));
  2574. // iqs = 0....3 -> bq8_offset = 0, want q4_offset = 0, 4, 8, 12
  2575. // iqs = 4....7 -> bq8_offset = 2, want q4_offset = 32, 36, 40, 44
  2576. // iqs = 8...11 -> bq8_offset = 4, want q4_offset = 64, 68, 72, 76
  2577. // iqs = 12..15 -> bq8_offset = 6, want q4_offset = 96, 100, 104, 108
  2578. const int * q4 = (const int *)(bq4_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4));
  2579. v[0] = q4[0];
  2580. v[1] = q4[4];
  2581. const uint16_t * scales = (const uint16_t *)bq4_K->scales;
  2582. uint16_t aux[2];
  2583. const int j = bq8_offset/2;
  2584. if (j < 2) {
  2585. aux[0] = scales[j+0] & 0x3f3f;
  2586. aux[1] = scales[j+2] & 0x3f3f;
  2587. } else {
  2588. aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2);
  2589. aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2);
  2590. }
  2591. const uint8_t * sc = (const uint8_t *)aux;
  2592. const uint8_t * m = sc + 2;
  2593. for (int i = 0; i < QR4_K; ++i) {
  2594. const block_q8_1 * bq8i = bq8_1 + bq8_offset + i;
  2595. d8[i] = bq8i->ds[0];
  2596. const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4);
  2597. u[2*i+0] = q8[0];
  2598. u[2*i+1] = q8[4];
  2599. }
  2600. return vec_dot_q4_K_q8_1_impl_vmmq(v, u, sc, m, bq4_K->dm, d8);
  2601. }
  2602. template <int mmq_y>
  2603. static __dpct_inline__ void
  2604. allocate_tiles_q4_K(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
  2605. int *tile_x_ql_q4_K, sycl::half2 *tile_x_dm_q4_K,
  2606. int *tile_x_sc_q4_K) {
  2607. (void)x_qh;
  2608. *x_ql = tile_x_ql_q4_K;
  2609. *x_dm = tile_x_dm_q4_K;
  2610. *x_sc = tile_x_sc_q4_K;
  2611. }
  2612. template <int mmq_y, int nwarps, bool need_check>
  2613. static __dpct_inline__ void
  2614. load_tiles_q4_K(const void *__restrict__ vx, int *__restrict__ x_ql,
  2615. sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
  2616. int *__restrict__ x_sc, const int &i_offset, const int &i_max,
  2617. const int &k, const int &blocks_per_row) {
  2618. (void)x_qh;
  2619. GGML_SYCL_ASSUME(i_offset >= 0);
  2620. GGML_SYCL_ASSUME(i_offset < nwarps);
  2621. GGML_SYCL_ASSUME(k >= 0);
  2622. GGML_SYCL_ASSUME(k < WARP_SIZE);
  2623. const int kbx = k / QI4_K; // == 0 if QK_K == 256
  2624. const int kqsx = k % QI4_K; // == k if QK_K == 256
  2625. const block_q4_K * bx0 = (const block_q4_K *) vx;
  2626. #pragma unroll
  2627. for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
  2628. int i = i0 + i_offset;
  2629. if (need_check) {
  2630. i = sycl::min(i, i_max);
  2631. }
  2632. const block_q4_K * bxi = bx0 + i*blocks_per_row + kbx;
  2633. x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
  2634. }
  2635. const int blocks_per_tile_x_row = WARP_SIZE / QI4_K; // == 1 if QK_K == 256
  2636. const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
  2637. #pragma unroll
  2638. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_K) {
  2639. int i = (i0 + i_offset * QI4_K + k / blocks_per_tile_x_row) % mmq_y;
  2640. if (need_check) {
  2641. i = sycl::min(i, i_max);
  2642. }
  2643. const block_q4_K * bxi = bx0 + i*blocks_per_row + kbxd;
  2644. x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = bxi->dm;
  2645. }
  2646. #pragma unroll
  2647. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
  2648. int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
  2649. if (need_check) {
  2650. i = sycl::min(i, i_max);
  2651. }
  2652. const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI4_K/8);
  2653. const int * scales = (const int *) bxi->scales;
  2654. const int ksc = k % (WARP_SIZE/8);
  2655. // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8
  2656. int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits
  2657. scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits
  2658. x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8;
  2659. }
  2660. }
  2661. static __dpct_inline__ float vec_dot_q4_K_q8_1_mul_mat(
  2662. const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
  2663. const int *__restrict__ x_qh, const int *__restrict__ x_sc,
  2664. const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
  2665. const int &i, const int &j, const int &k) {
  2666. (void)x_qh;
  2667. const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2*((k % 16) / 8);
  2668. const int index_y = j * WARP_SIZE + (QR4_K*k) % WARP_SIZE;
  2669. return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[index_y], sc, sc+8,
  2670. x_dm[i * (WARP_SIZE/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]);
  2671. }
  2672. static __dpct_inline__ float
  2673. vec_dot_q5_K_q8_1(const void *__restrict__ vbq,
  2674. const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
  2675. const block_q5_K * bq5_K = (const block_q5_K *) vbq;
  2676. int vl[2];
  2677. int vh[2];
  2678. int u[2*QR5_K];
  2679. float d8[QR5_K];
  2680. const int bq8_offset = QR5_K * ((iqs/2) / (QI8_1/2));
  2681. const int * ql = (const int *)(bq5_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4));
  2682. const int * qh = (const int *)(bq5_K->qh + 4 * ((iqs/2)%4));
  2683. vl[0] = ql[0];
  2684. vl[1] = ql[4];
  2685. vh[0] = qh[0] >> bq8_offset;
  2686. vh[1] = qh[4] >> bq8_offset;
  2687. const uint16_t * scales = (const uint16_t *)bq5_K->scales;
  2688. uint16_t aux[2];
  2689. const int j = bq8_offset/2;
  2690. if (j < 2) {
  2691. aux[0] = scales[j+0] & 0x3f3f;
  2692. aux[1] = scales[j+2] & 0x3f3f;
  2693. } else {
  2694. aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2);
  2695. aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2);
  2696. }
  2697. const uint8_t * sc = (const uint8_t *)aux;
  2698. const uint8_t * m = sc + 2;
  2699. #pragma unroll
  2700. for (int i = 0; i < QR5_K; ++i) {
  2701. const block_q8_1 * bq8i = bq8_1 + bq8_offset + i;
  2702. d8[i] = bq8i->ds[0];
  2703. const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4);
  2704. u[2*i+0] = q8[0];
  2705. u[2*i+1] = q8[4];
  2706. }
  2707. return vec_dot_q5_K_q8_1_impl_vmmq(vl, vh, u, sc, m, bq5_K->dm, d8);
  2708. }
  2709. template <int mmq_y>
  2710. static __dpct_inline__ void
  2711. allocate_tiles_q5_K(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
  2712. int *tile_x_ql_q5_K, sycl::half2 *tile_x_dm_q5_K,
  2713. int *tile_x_sc_q5_K) {
  2714. (void)x_qh;
  2715. *x_ql = tile_x_ql_q5_K;
  2716. *x_dm = tile_x_dm_q5_K;
  2717. *x_sc = tile_x_sc_q5_K;
  2718. }
  2719. template <int mmq_y, int nwarps, bool need_check>
  2720. static __dpct_inline__ void
  2721. load_tiles_q5_K(const void *__restrict__ vx, int *__restrict__ x_ql,
  2722. sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
  2723. int *__restrict__ x_sc, const int &i_offset, const int &i_max,
  2724. const int &k, const int &blocks_per_row) {
  2725. (void)x_qh;
  2726. GGML_SYCL_ASSUME(i_offset >= 0);
  2727. GGML_SYCL_ASSUME(i_offset < nwarps);
  2728. GGML_SYCL_ASSUME(k >= 0);
  2729. GGML_SYCL_ASSUME(k < WARP_SIZE);
  2730. const int kbx = k / QI5_K; // == 0 if QK_K == 256
  2731. const int kqsx = k % QI5_K; // == k if QK_K == 256
  2732. const block_q5_K * bx0 = (const block_q5_K *) vx;
  2733. #pragma unroll
  2734. for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
  2735. int i = i0 + i_offset;
  2736. if (need_check) {
  2737. i = sycl::min(i, i_max);
  2738. }
  2739. const block_q5_K * bxi = bx0 + i*blocks_per_row + kbx;
  2740. const int ky = QR5_K*kqsx;
  2741. const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
  2742. const int ql0 = (ql >> 0) & 0x0F0F0F0F;
  2743. const int ql1 = (ql >> 4) & 0x0F0F0F0F;
  2744. const int qh = get_int_from_uint8_aligned(bxi->qh, kqsx % (QI5_K/4));
  2745. const int qh0 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 0)) << 4) & 0x10101010;
  2746. const int qh1 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 1)) << 4) & 0x10101010;
  2747. const int kq0 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + 0;
  2748. const int kq1 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + (QI5_K/4);
  2749. x_ql[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0;
  2750. x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1;
  2751. }
  2752. const int blocks_per_tile_x_row = WARP_SIZE / QI5_K; // == 1 if QK_K == 256
  2753. const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
  2754. #pragma unroll
  2755. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_K) {
  2756. int i = (i0 + i_offset * QI5_K + k / blocks_per_tile_x_row) % mmq_y;
  2757. if (need_check) {
  2758. i = sycl::min(i, i_max);
  2759. }
  2760. const block_q5_K * bxi = bx0 + i*blocks_per_row + kbxd;
  2761. x_dm[i * (WARP_SIZE/QI5_K) + i / QI5_K + kbxd] = bxi->dm;
  2762. }
  2763. #pragma unroll
  2764. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
  2765. int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
  2766. if (need_check) {
  2767. i = sycl::min(i, i_max);
  2768. }
  2769. const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI5_K/8);
  2770. const int * scales = (const int *) bxi->scales;
  2771. const int ksc = k % (WARP_SIZE/8);
  2772. // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8
  2773. int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits
  2774. scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits
  2775. x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8;
  2776. }
  2777. }
  2778. static __dpct_inline__ float vec_dot_q5_K_q8_1_mul_mat(
  2779. const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
  2780. const int *__restrict__ x_qh, const int *__restrict__ x_sc,
  2781. const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
  2782. const int &i, const int &j, const int &k) {
  2783. (void)x_qh;
  2784. const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2 * ((k % 16) / 8);
  2785. const int index_x = i * (QR5_K*WARP_SIZE + 1) + QR5_K*k;
  2786. const int index_y = j * WARP_SIZE + (QR5_K*k) % WARP_SIZE;
  2787. return vec_dot_q5_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, sc+8,
  2788. x_dm[i * (WARP_SIZE/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]);
  2789. }
  2790. static __dpct_inline__ float
  2791. vec_dot_q6_K_q8_1(const void *__restrict__ vbq,
  2792. const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
  2793. const block_q6_K * bq6_K = (const block_q6_K *) vbq;
  2794. const int bq8_offset = 2 * QR6_K * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/4);
  2795. const int scale_offset = (QI6_K/4) * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/8);
  2796. const int vh_shift = 2 * ((iqs % (QI6_K/2)) / (QI6_K/4));
  2797. const int vl = get_int_from_uint8(bq6_K->ql, iqs);
  2798. const int vh = get_int_from_uint8(bq6_K->qh, (QI6_K/4) * (iqs / (QI6_K/2)) + iqs % (QI6_K/4)) >> vh_shift;
  2799. const int8_t * scales = bq6_K->scales + scale_offset;
  2800. int u[QR6_K];
  2801. float d8[QR6_K];
  2802. #pragma unroll
  2803. for (int i = 0; i < QR6_K; ++i) {
  2804. u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + 2*i].qs, iqs % QI8_1);
  2805. d8[i] = bq8_1[bq8_offset + 2 * i].ds[0];
  2806. }
  2807. return vec_dot_q6_K_q8_1_impl_mmvq(vl, vh, u, scales, bq6_K->d, d8);
  2808. }
  2809. template <int mmq_y>
  2810. static __dpct_inline__ void
  2811. allocate_tiles_q6_K(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
  2812. int *tile_x_ql, sycl::half2 *tile_x_dm, int *tile_x_sc) {
  2813. (void)x_qh;
  2814. *x_ql = tile_x_ql;
  2815. *x_dm = tile_x_dm;
  2816. *x_sc = tile_x_sc;
  2817. }
  2818. template <int mmq_y, int nwarps, bool need_check>
  2819. static __dpct_inline__ void
  2820. load_tiles_q6_K(const void *__restrict__ vx, int *__restrict__ x_ql,
  2821. sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
  2822. int *__restrict__ x_sc, const int &i_offset, const int &i_max,
  2823. const int &k, const int &blocks_per_row) {
  2824. (void)x_qh;
  2825. GGML_SYCL_ASSUME(i_offset >= 0);
  2826. GGML_SYCL_ASSUME(i_offset < nwarps);
  2827. GGML_SYCL_ASSUME(k >= 0);
  2828. GGML_SYCL_ASSUME(k < WARP_SIZE);
  2829. const int kbx = k / QI6_K; // == 0 if QK_K == 256
  2830. const int kqsx = k % QI6_K; // == k if QK_K == 256
  2831. const block_q6_K * bx0 = (const block_q6_K *) vx;
  2832. #pragma unroll
  2833. for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
  2834. int i = i0 + i_offset;
  2835. if (need_check) {
  2836. i = sycl::min(i, i_max);
  2837. }
  2838. const block_q6_K * bxi = bx0 + i*blocks_per_row + kbx;
  2839. const int ky = QR6_K*kqsx;
  2840. const int ql = get_int_from_uint8(bxi->ql, kqsx);
  2841. const int ql0 = (ql >> 0) & 0x0F0F0F0F;
  2842. const int ql1 = (ql >> 4) & 0x0F0F0F0F;
  2843. const int qh = get_int_from_uint8(bxi->qh, (QI6_K/4) * (kqsx / (QI6_K/2)) + kqsx % (QI6_K/4));
  2844. const int qh0 = ((qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) << 4) & 0x30303030;
  2845. const int qh1 = (qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) & 0x30303030;
  2846. const int kq0 = ky - ky % QI6_K + k % (QI6_K/2) + 0;
  2847. const int kq1 = ky - ky % QI6_K + k % (QI6_K/2) + (QI6_K/2);
  2848. x_ql[i * (2 * WARP_SIZE + 1) + kq0] =
  2849. dpct::vectorized_binary<sycl::char4>(ql0 | qh0, 0x20202020,
  2850. dpct::sub_sat());
  2851. x_ql[i * (2 * WARP_SIZE + 1) + kq1] =
  2852. dpct::vectorized_binary<sycl::char4>(ql1 | qh1, 0x20202020,
  2853. dpct::sub_sat());
  2854. }
  2855. const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256
  2856. const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
  2857. float * x_dmf = (float *) x_dm;
  2858. #pragma unroll
  2859. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI6_K) {
  2860. int i = (i0 + i_offset * QI6_K + k / blocks_per_tile_x_row) % mmq_y;
  2861. if (need_check) {
  2862. i = sycl::min(i, i_max);
  2863. }
  2864. const block_q6_K * bxi = bx0 + i*blocks_per_row + kbxd;
  2865. x_dmf[i * (WARP_SIZE/QI6_K) + i / QI6_K + kbxd] = bxi->d;
  2866. }
  2867. #pragma unroll
  2868. for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
  2869. int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
  2870. if (need_check) {
  2871. i = sycl::min(i, i_max);
  2872. }
  2873. const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / 4;
  2874. x_sc[i * (WARP_SIZE/8) + i / 8 + k % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8));
  2875. }
  2876. }
  2877. static __dpct_inline__ float vec_dot_q6_K_q8_1_mul_mat(
  2878. const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
  2879. const int *__restrict__ x_qh, const int *__restrict__ x_sc,
  2880. const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
  2881. const int &i, const int &j, const int &k) {
  2882. (void)x_qh;
  2883. const float * x_dmf = (const float *) x_dm;
  2884. const float * y_df = (const float *) y_ds;
  2885. const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/8]);
  2886. const int index_x = i * (QR6_K*WARP_SIZE + 1) + QR6_K*k;
  2887. const int index_y = j * WARP_SIZE + (QR6_K*k) % WARP_SIZE;
  2888. return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]);
  2889. }
  2890. static __dpct_inline__ float
  2891. vec_dot_iq2_xxs_q8_1(const void *__restrict__ vbq,
  2892. const block_q8_1 *__restrict__ bq8_1, const int &iqs,
  2893. const uint64_t *iq2xxs_grid, const uint8_t *ksigns_iq2xs,
  2894. const uint8_t *kmask_iq2xs) {
  2895. const block_iq2_xxs * bq2 = (const block_iq2_xxs *) vbq;
  2896. #if QR2_XXS == 8
  2897. const int ib32 = iqs;
  2898. const uint16_t * q2 = bq2->qs + 4*ib32;
  2899. const uint8_t * aux8 = (const uint8_t *)q2;
  2900. const int8_t * q8 = bq8_1[ib32].qs;
  2901. uint32_t aux32 = q2[2] | (q2[3] << 16);
  2902. int sumi = 0;
  2903. for (int l = 0; l < 4; ++l) {
  2904. const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
  2905. const uint8_t signs = ksigns_iq2xs[aux32 & 127];
  2906. for (int j = 0; j < 8; ++j) {
  2907. sumi += q8[j] * grid[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  2908. }
  2909. q8 += 8;
  2910. aux32 >>= 7;
  2911. }
  2912. const float d = (float)bq2->d * (0.5f + aux32) * bq8_1[ib32].ds[0] * 0.25f;
  2913. return d * sumi;
  2914. #else
  2915. // iqs is 0...15
  2916. const int ib32 = iqs/2;
  2917. const int il = iqs%2;
  2918. const uint16_t * q2 = bq2->qs + 4*ib32;
  2919. const uint8_t * aux8 = (const uint8_t *)q2;
  2920. const uint8_t * grid1 = (const uint8_t *)(iq2xxs_grid + aux8[2*il+0]);
  2921. const uint8_t * grid2 = (const uint8_t *)(iq2xxs_grid + aux8[2*il+1]);
  2922. const uint32_t aux32 = q2[2] | (q2[3] << 16);
  2923. const float d = (float)bq2->d * (0.5f + (aux32 >> 28)) * bq8_1[ib32].ds[0] * 0.25f;
  2924. const uint8_t signs1 = ksigns_iq2xs[(aux32 >> 14*il) & 127];
  2925. const uint8_t signs2 = ksigns_iq2xs[(aux32 >> (14*il + 7)) & 127];
  2926. const int8_t * q8 = bq8_1[ib32].qs + 16*il;
  2927. int sumi1 = 0, sumi2 = 0;
  2928. for (int j = 0; j < 8; ++j) {
  2929. sumi1 += q8[j+0] * grid1[j] * (signs1 & kmask_iq2xs[j] ? -1 : 1);
  2930. sumi2 += q8[j+8] * grid2[j] * (signs2 & kmask_iq2xs[j] ? -1 : 1);
  2931. }
  2932. return d * (sumi1 + sumi2);
  2933. #endif
  2934. }
  2935. static __dpct_inline__ float
  2936. vec_dot_iq2_xs_q8_1(const void *__restrict__ vbq,
  2937. const block_q8_1 *__restrict__ bq8_1, const int &iqs,
  2938. const uint64_t *iq2xs_grid, const uint64_t *ksigns64) {
  2939. #if DPCT_COMPATIBILITY_TEMP >= \
  2940. MIN_CC_DP4A // lowest compute capability for integer intrinsics
  2941. const block_iq2_xs * bq2 = (const block_iq2_xs *) vbq;
  2942. const int ib32 = iqs;
  2943. const uint16_t * q2 = bq2->qs + 4*ib32;
  2944. const int8_t * q8 = bq8_1[ib32].qs;
  2945. const uint8_t ls1 = bq2->scales[ib32] & 0xf;
  2946. const uint8_t ls2 = bq2->scales[ib32] >> 4;
  2947. int sumi1 = 0;
  2948. for (int l = 0; l < 2; ++l) {
  2949. const uint32_t * grid = (const uint32_t *)(iq2xs_grid + (q2[l] & 511));
  2950. const uint32_t * signs = (const uint32_t *)(ksigns64 + (q2[l] >> 9));
  2951. const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
  2952. grid[0] ^ signs[0], signs[0], std::minus<>());
  2953. const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
  2954. grid[1] ^ signs[1], signs[1], std::minus<>());
  2955. sumi1 = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi1);
  2956. sumi1 = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi1);
  2957. q8 += 8;
  2958. }
  2959. int sumi2 = 0;
  2960. for (int l = 2; l < 4; ++l) {
  2961. const uint32_t * grid = (const uint32_t *)(iq2xs_grid + (q2[l] & 511));
  2962. const uint32_t * signs = (const uint32_t *)(ksigns64 + (q2[l] >> 9));
  2963. const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
  2964. grid[0] ^ signs[0], signs[0], std::minus<>());
  2965. const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
  2966. grid[1] ^ signs[1], signs[1], std::minus<>());
  2967. sumi2 = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi2);
  2968. sumi2 = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi2);
  2969. q8 += 8;
  2970. }
  2971. const float d = (float)bq2->d * bq8_1[ib32].ds[0] * 0.25f;
  2972. return d * ((0.5f + ls1) * sumi1 + (0.5f + ls2) * sumi2);
  2973. #else
  2974. assert(false);
  2975. return 0.f;
  2976. #endif
  2977. }
  2978. static __dpct_inline__ float
  2979. vec_dot_iq2_s_q8_1(const void *__restrict__ vbq,
  2980. const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
  2981. const block_iq2_s * bq2 = (const block_iq2_s *) vbq;
  2982. const int ib32 = iqs;
  2983. const int8_t * q8 = bq8_1[ib32].qs;
  2984. const uint8_t * signs = bq2->qs + QK_K/8 + 4*ib32;
  2985. const uint8_t ls1 = bq2->scales[ib32] & 0xf;
  2986. const uint8_t ls2 = bq2->scales[ib32] >> 4;
  2987. int sumi1 = 0;
  2988. for (int l = 0; l < 2; ++l) {
  2989. const uint32_t * grid = (const uint32_t *)(iq2s_grid + (bq2->qs[4*ib32+l] | ((bq2->qh[ib32] << (8-2*l)) & 0x300)));
  2990. const uint32_t signs0 = dpct::vectorized_binary<sycl::uchar4>(
  2991. ((signs[l] & 0xf) * 0x01010101) & 0x08040201, 0x08040201,
  2992. std::equal_to<>());
  2993. const uint32_t signs1 = dpct::vectorized_binary<sycl::uchar4>(
  2994. ((signs[l] >> 4) * 0x01010101) & 0x08040201, 0x08040201,
  2995. std::equal_to<>());
  2996. const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
  2997. grid[0] ^ signs0, signs0, std::minus<>());
  2998. const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
  2999. grid[1] ^ signs1, signs1, std::minus<>());
  3000. sumi1 = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi1);
  3001. sumi1 = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi1);
  3002. q8 += 8;
  3003. }
  3004. int sumi2 = 0;
  3005. for (int l = 2; l < 4; ++l) {
  3006. const uint32_t * grid = (const uint32_t *)(iq2s_grid + (bq2->qs[4*ib32+l] | ((bq2->qh[ib32] << (8-2*l)) & 0x300)));
  3007. const uint32_t signs0 = dpct::vectorized_binary<sycl::uchar4>(
  3008. ((signs[l] & 0xf) * 0x01010101) & 0x08040201, 0x08040201,
  3009. std::equal_to<>());
  3010. const uint32_t signs1 = dpct::vectorized_binary<sycl::uchar4>(
  3011. ((signs[l] >> 4) * 0x01010101) & 0x08040201, 0x08040201,
  3012. std::equal_to<>());
  3013. const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
  3014. grid[0] ^ signs0, signs0, std::minus<>());
  3015. const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
  3016. grid[1] ^ signs1, signs1, std::minus<>());
  3017. sumi2 = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi2);
  3018. sumi2 = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi2);
  3019. q8 += 8;
  3020. }
  3021. const float d = (float)bq2->d * bq8_1[ib32].ds[0] * 0.25f;
  3022. return d * ((0.5f + ls1) * sumi1 + (0.5f + ls2) * sumi2);
  3023. }
  3024. static __dpct_inline__ float
  3025. vec_dot_iq3_xxs_q8_1(const void *__restrict__ vbq,
  3026. const block_q8_1 *__restrict__ bq8_1, const int &iqs,
  3027. const uint32_t *iq3xxs_grid, const uint64_t *ksigns64) {
  3028. #if DPCT_COMPATIBILITY_TEMP >= \
  3029. MIN_CC_DP4A // lowest compute capability for integer intrinsics
  3030. const block_iq3_xxs * bq2 = (const block_iq3_xxs *) vbq;
  3031. const int ib32 = iqs;
  3032. const uint8_t * q3 = bq2->qs + 8*ib32;
  3033. const uint16_t * gas = (const uint16_t *)(bq2->qs + QK_K/4) + 2*ib32;
  3034. const int8_t * q8 = bq8_1[ib32].qs;
  3035. uint32_t aux32 = gas[0] | (gas[1] << 16);
  3036. int sumi = 0;
  3037. for (int l = 0; l < 4; ++l) {
  3038. const uint32_t * grid1 = iq3xxs_grid + q3[2*l+0];
  3039. const uint32_t * grid2 = iq3xxs_grid + q3[2*l+1];
  3040. const uint32_t * signs = (const uint32_t *)(ksigns64 + (aux32 & 127));
  3041. const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
  3042. grid1[0] ^ signs[0], signs[0], std::minus<>());
  3043. const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
  3044. grid2[0] ^ signs[1], signs[1], std::minus<>());
  3045. sumi = dpct::dp4a(grid_l, *((int *)q8 + 0), sumi);
  3046. sumi = dpct::dp4a(grid_h, *((int *)q8 + 1), sumi);
  3047. q8 += 8;
  3048. aux32 >>= 7;
  3049. }
  3050. const float d = (float)bq2->d * (0.5f + aux32) * bq8_1[ib32].ds[0] * 0.5f;
  3051. return d * sumi;
  3052. #else
  3053. assert(false);
  3054. return 0.f;
  3055. #endif
  3056. }
  3057. static __dpct_inline__ float
  3058. vec_dot_iq3_s_q8_1(const void *__restrict__ vbq,
  3059. const block_q8_1 *__restrict__ bq8_1, const int &iqs,
  3060. const uint32_t *iq3s_grid) {
  3061. const block_iq3_s * bq2 = (const block_iq3_s *) vbq;
  3062. const int ib32 = iqs;
  3063. const uint8_t * qs = bq2->qs + 8*ib32;
  3064. const int8_t * q8 = bq8_1[ib32].qs;
  3065. int sumi = 0;
  3066. for (int l = 0; l < 4; ++l) {
  3067. const uint32_t * grid1 = iq3s_grid + (qs[2*l+0] | ((bq2->qh[ib32] << (8 - 2*l)) & 256));
  3068. const uint32_t * grid2 = iq3s_grid + (qs[2*l+1] | ((bq2->qh[ib32] << (7 - 2*l)) & 256));
  3069. uint32_t signs0 = dpct::vectorized_binary<sycl::uchar4>(
  3070. ((bq2->signs[4 * ib32 + l] & 0xf) * 0x01010101) & 0x08040201,
  3071. 0x08040201, std::equal_to<>());
  3072. uint32_t signs1 = dpct::vectorized_binary<sycl::uchar4>(
  3073. ((bq2->signs[4 * ib32 + l] >> 4) * 0x01010101) & 0x08040201,
  3074. 0x08040201, std::equal_to<>());
  3075. const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
  3076. grid1[0] ^ signs0, signs0, std::minus<>());
  3077. const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
  3078. grid2[0] ^ signs1, signs1, std::minus<>());
  3079. sumi = dpct::dp4a(grid_l, *((int *)q8 + 0), sumi);
  3080. sumi = dpct::dp4a(grid_h, *((int *)q8 + 1), sumi);
  3081. q8 += 8;
  3082. }
  3083. const float d =
  3084. (float)bq2->d *
  3085. (1 + 2 * ((bq2->scales[ib32 / 2] >> 4 * (ib32 % 2)) & 0xf)) *
  3086. bq8_1[ib32].ds[0];
  3087. return d * sumi;
  3088. }
  3089. static __dpct_inline__ float
  3090. vec_dot_iq1_s_q8_1(const void *__restrict__ vbq,
  3091. const block_q8_1 *__restrict__ bq8_1, const int &iqs,
  3092. const uint32_t *iq1s_grid_gpu) {
  3093. const block_iq1_s * bq1 = (const block_iq1_s *) vbq;
  3094. const int ib32 = iqs;
  3095. int sumi = 0;
  3096. const int * q8 = (const int *)bq8_1[ib32].qs;
  3097. for (int l = 0; l < 4; ++l) {
  3098. const int * grid = (const int *)(iq1s_grid_gpu + (bq1->qs[4*ib32+l] | (((bq1->qh[ib32] >> 3*l) & 7) << 8)));
  3099. int grid0 = grid[0] & 0x0f0f0f0f;
  3100. int grid1 = (grid[0] >> 4) & 0x0f0f0f0f;
  3101. sumi = dpct::dp4a(q8[2 * l + 1], grid1,
  3102. dpct::dp4a(q8[2 * l + 0], grid0, sumi));
  3103. }
  3104. const float delta = bq1->qh[ib32] & 0x8000 ? -1-IQ1S_DELTA : -1+IQ1S_DELTA;
  3105. const float d1q = (float)bq1->d * (2*((bq1->qh[ib32] >> 12) & 7) + 1);
  3106. const float d = d1q * bq8_1[ib32].ds[0];
  3107. const float m = d1q * bq8_1[ib32].ds[1];
  3108. return d * sumi + m * delta;
  3109. }
  3110. static __dpct_inline__ float
  3111. vec_dot_iq1_m_q8_1(const void *__restrict__ vbq,
  3112. const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
  3113. const block_iq1_m * bq1 = (const block_iq1_m *) vbq;
  3114. const int ib32 = iqs;
  3115. int sumi[2] = {0, 0};
  3116. float sumf[2] = {0.f, 0.f};
  3117. const int * q8 = (const int *)bq8_1[ib32].qs;
  3118. for (int l = 0; l < 4; ++l) {
  3119. const int * grid = (const int *)(iq1s_grid_gpu + (bq1->qs[4*ib32+l] | (((bq1->qh[2*ib32+l/2] >> 4*(l%2)) & 7) << 8)));
  3120. int grid0 = grid[0] & 0x0f0f0f0f;
  3121. int grid1 = (grid[0] >> 4) & 0x0f0f0f0f;
  3122. sumi[l / 2] = dpct::dp4a(q8[2 * l + 1], grid1,
  3123. dpct::dp4a(q8[2 * l + 0], grid0, sumi[l / 2]));
  3124. const float delta = (bq1->qh[2*ib32+l/2] >> 4*(l%2)) & 0x08 ? -1-IQ1M_DELTA : -1+IQ1M_DELTA;
  3125. const int sumy = dpct::dp4a(q8[2 * l + 1], 0x01010101,
  3126. dpct::dp4a(q8[2 * l + 0], 0x01010101, 0));
  3127. sumf[l/2] += delta*sumy;
  3128. }
  3129. iq1m_scale_t scale;
  3130. const uint16_t * sc = (const uint16_t *)bq1->scales;
  3131. scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
  3132. const float d = (float)scale.f16 * bq8_1[ib32].ds[0];
  3133. return d * ((sumi[0] + sumf[0]) * (2*((sc[ib32/2] >> 6*(ib32%2)) & 0x7) + 1) + (sumi[1] + sumf[1]) * (2*((sc[ib32/2] >> (6*(ib32%2)+3)) & 0x7) + 1));
  3134. }
  3135. static __dpct_inline__ void get_int_from_table_16(const uint32_t &q4,
  3136. const uint8_t *values,
  3137. int &val1, int &val2) {
  3138. uint32_t aux32; const uint8_t * q8 = (const uint8_t *)&aux32;
  3139. aux32 = q4 & 0x0f0f0f0f;
  3140. uint16_t v1 = values[q8[0]] | (values[q8[1]] << 8);
  3141. uint16_t v2 = values[q8[2]] | (values[q8[3]] << 8);
  3142. val1 = v1 | (v2 << 16);
  3143. aux32 = (q4 >> 4) & 0x0f0f0f0f;
  3144. v1 = values[q8[0]] | (values[q8[1]] << 8);
  3145. v2 = values[q8[2]] | (values[q8[3]] << 8);
  3146. val2 = v1 | (v2 << 16);
  3147. }
  3148. static __dpct_inline__ float
  3149. vec_dot_iq4_nl_q8_1(const void *__restrict__ vbq,
  3150. const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
  3151. const block_iq4_nl * bq = (const block_iq4_nl *) vbq;
  3152. const uint16_t * q4 = (const uint16_t *)bq->qs + 2*iqs;
  3153. const int32_t * q8 = (const int32_t *)bq8_1->qs + iqs;
  3154. const uint8_t * values = (const uint8_t *)kvalues_iq4nl;
  3155. int v1, v2;
  3156. int sumi1 = 0, sumi2 = 0;
  3157. for (int l = 0; l < VDR_Q4_0_Q8_1_MMVQ; ++l) {
  3158. const uint32_t aux = q4[2*l] | (q4[2*l+1] << 16);
  3159. get_int_from_table_16(aux, values, v1, v2);
  3160. sumi1 = dpct::dp4a(v1, q8[l + 0], sumi1);
  3161. sumi2 = dpct::dp4a(v2, q8[l + 4], sumi2);
  3162. }
  3163. const float d = (float)bq->d * bq8_1->ds[0];
  3164. return d * (sumi1 + sumi2);
  3165. }
  3166. static __dpct_inline__ float
  3167. vec_dot_iq4_xs_q8_1(const void *__restrict__ vbq,
  3168. const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
  3169. const block_iq4_xs * bq4 = (const block_iq4_xs *) vbq;
  3170. const uint8_t * values = (const uint8_t *)kvalues_iq4nl;
  3171. // iqs is 0...7
  3172. const int ib32 = iqs;
  3173. const int32_t * q8 = (const int *)bq8_1[ib32].qs;
  3174. const uint32_t * q4 = (const uint32_t *)bq4->qs + 4*ib32;
  3175. const int8_t ls = ((bq4->scales_l[ib32/2] >> 4*(ib32%2)) & 0xf) | (((bq4->scales_h >> 2*ib32) & 3) << 4);
  3176. const float d = (float)bq4->d * (ls - 32) * bq8_1[ib32].ds[0];
  3177. int v1, v2;
  3178. int sumi1 = 0, sumi2 = 0;
  3179. for (int j = 0; j < 4; ++j) {
  3180. get_int_from_table_16(q4[j], values, v1, v2);
  3181. sumi1 = dpct::dp4a(v1, q8[j + 0], sumi1);
  3182. sumi2 = dpct::dp4a(v2, q8[j + 4], sumi2);
  3183. }
  3184. return d * (sumi1 + sumi2);
  3185. }
  3186. template <int qk, int qr, int qi, bool need_sum, typename block_q_t, int mmq_x,
  3187. int mmq_y, int nwarps, load_tiles_sycl_t load_tiles, int vdr,
  3188. vec_dot_q_mul_mat_sycl_t vec_dot>
  3189. /*
  3190. DPCT1110:8: The total declared local variable size in device function mul_mat_q
  3191. exceeds 128 bytes and may cause high register pressure. Consult with your
  3192. hardware vendor to find the total register size available and adjust the code,
  3193. or use smaller sub-group size to avoid high register pressure.
  3194. */
  3195. static __dpct_inline__ void
  3196. mul_mat_q(const void *__restrict__ vx, const void *__restrict__ vy,
  3197. float *__restrict__ dst, const int ncols_x, const int nrows_x,
  3198. const int ncols_y, const int nrows_y, const int nrows_dst,
  3199. int *tile_x_ql, sycl::half2 *tile_x_dm, int *tile_x_qh,
  3200. int *tile_x_sc, const sycl::nd_item<3> &item_ct1, int *tile_y_qs,
  3201. sycl::half2 *tile_y_ds) {
  3202. const block_q_t * x = (const block_q_t *) vx;
  3203. const block_q8_1 * y = (const block_q8_1 *) vy;
  3204. const int blocks_per_row_x = ncols_x / qk;
  3205. const int blocks_per_col_y = nrows_y / QK8_1;
  3206. const int blocks_per_warp = WARP_SIZE / qi;
  3207. const int & ncols_dst = ncols_y;
  3208. const int row_dst_0 = item_ct1.get_group(2) * mmq_y;
  3209. const int & row_x_0 = row_dst_0;
  3210. const int col_dst_0 = item_ct1.get_group(1) * mmq_x;
  3211. const int & col_y_0 = col_dst_0;
  3212. float sum[mmq_y/WARP_SIZE][mmq_x/nwarps] = {{0.0f}};
  3213. for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) {
  3214. load_tiles(x + row_x_0 * blocks_per_row_x + ib0, tile_x_ql, tile_x_dm,
  3215. tile_x_qh, tile_x_sc, item_ct1.get_local_id(1),
  3216. nrows_x - row_x_0 - 1, item_ct1.get_local_id(2),
  3217. blocks_per_row_x);
  3218. #pragma unroll
  3219. for (int ir = 0; ir < qr; ++ir) {
  3220. const int kqs = ir * WARP_SIZE + item_ct1.get_local_id(2);
  3221. const int kbxd = kqs / QI8_1;
  3222. #pragma unroll
  3223. for (int i = 0; i < mmq_x; i += nwarps) {
  3224. const int col_y_eff = dpct::min(
  3225. (unsigned int)(col_y_0 + item_ct1.get_local_id(1) + i),
  3226. ncols_y - 1); // to prevent out-of-bounds memory accesses
  3227. const block_q8_1 * by0 = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + kbxd];
  3228. const int index_y = (item_ct1.get_local_id(1) + i) * WARP_SIZE +
  3229. kqs % WARP_SIZE;
  3230. tile_y_qs[index_y] = get_int_from_int8_aligned(
  3231. by0->qs, item_ct1.get_local_id(2) % QI8_1);
  3232. }
  3233. #pragma unroll
  3234. for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) {
  3235. const int ids =
  3236. (ids0 + item_ct1.get_local_id(1) * QI8_1 +
  3237. item_ct1.get_local_id(2) / (WARP_SIZE / QI8_1)) %
  3238. mmq_x;
  3239. const int kby = item_ct1.get_local_id(2) % (WARP_SIZE / QI8_1);
  3240. const int col_y_eff = sycl::min(col_y_0 + ids, ncols_y - 1);
  3241. // if the sum is not needed it's faster to transform the scale to f32 ahead of time
  3242. const sycl::half2 *dsi_src =
  3243. &y[col_y_eff * blocks_per_col_y + ib0 * (qk / QK8_1) +
  3244. ir * (WARP_SIZE / QI8_1) + kby]
  3245. .ds;
  3246. sycl::half2 *dsi_dst =
  3247. &tile_y_ds[ids * (WARP_SIZE / QI8_1) + kby];
  3248. if (need_sum) {
  3249. *dsi_dst = *dsi_src;
  3250. } else {
  3251. float * dfi_dst = (float *) dsi_dst;
  3252. *dfi_dst = (*dsi_src)[0];
  3253. }
  3254. }
  3255. /*
  3256. DPCT1118:9: SYCL group functions and algorithms must be encountered
  3257. in converged control flow. You may need to adjust the code.
  3258. */
  3259. /*
  3260. DPCT1065:56: Consider replacing sycl::nd_item::barrier() with
  3261. sycl::nd_item::barrier(sycl::access::fence_space::local_space) for
  3262. better performance if there is no access to global memory.
  3263. */
  3264. item_ct1.barrier();
  3265. // #pragma unroll // unrolling this loop causes too much register pressure
  3266. for (int k = ir*WARP_SIZE/qr; k < (ir+1)*WARP_SIZE/qr; k += vdr) {
  3267. #pragma unroll
  3268. for (int j = 0; j < mmq_x; j += nwarps) {
  3269. #pragma unroll
  3270. for (int i = 0; i < mmq_y; i += WARP_SIZE) {
  3271. sum[i / WARP_SIZE][j / nwarps] += vec_dot(
  3272. tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc,
  3273. tile_y_qs, tile_y_ds, item_ct1.get_local_id(2) + i,
  3274. item_ct1.get_local_id(1) + j, k);
  3275. }
  3276. }
  3277. }
  3278. /*
  3279. DPCT1118:10: SYCL group functions and algorithms must be encountered
  3280. in converged control flow. You may need to adjust the code.
  3281. */
  3282. /*
  3283. DPCT1065:57: Consider replacing sycl::nd_item::barrier() with
  3284. sycl::nd_item::barrier(sycl::access::fence_space::local_space) for
  3285. better performance if there is no access to global memory.
  3286. */
  3287. item_ct1.barrier();
  3288. }
  3289. }
  3290. #pragma unroll
  3291. for (int j = 0; j < mmq_x; j += nwarps) {
  3292. const int col_dst = col_dst_0 + j + item_ct1.get_local_id(1);
  3293. if (col_dst >= ncols_dst) {
  3294. return;
  3295. }
  3296. #pragma unroll
  3297. for (int i = 0; i < mmq_y; i += WARP_SIZE) {
  3298. const int row_dst = row_dst_0 + item_ct1.get_local_id(2) + i;
  3299. if (row_dst >= nrows_dst) {
  3300. continue;
  3301. }
  3302. dst[col_dst*nrows_dst + row_dst] = sum[i/WARP_SIZE][j/nwarps];
  3303. }
  3304. }
  3305. }
  3306. #define MMQ_X_Q4_0_RDNA2 64
  3307. #define MMQ_Y_Q4_0_RDNA2 128
  3308. #define NWARPS_Q4_0_RDNA2 8
  3309. #define MMQ_X_Q4_0_RDNA1 64
  3310. #define MMQ_Y_Q4_0_RDNA1 64
  3311. #define NWARPS_Q4_0_RDNA1 8
  3312. #if defined(SYCL_USE_XMX)
  3313. #define MMQ_X_Q4_0_AMPERE 4
  3314. #define MMQ_Y_Q4_0_AMPERE 32
  3315. #define NWARPS_Q4_0_AMPERE 4
  3316. #else
  3317. #define MMQ_X_Q4_0_AMPERE 64
  3318. #define MMQ_Y_Q4_0_AMPERE 128
  3319. #define NWARPS_Q4_0_AMPERE 4
  3320. #endif
  3321. #define MMQ_X_Q4_0_PASCAL 64
  3322. #define MMQ_Y_Q4_0_PASCAL 64
  3323. #define NWARPS_Q4_0_PASCAL 8
  3324. template <bool need_check> static void
  3325. mul_mat_q4_0(
  3326. const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
  3327. const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
  3328. const sycl::nd_item<3> &item_ct1, int *tile_x_qs_q4_0, float *tile_x_d_q4_0,
  3329. int *tile_y_qs, sycl::half2 *tile_y_ds) {
  3330. int * tile_x_ql = nullptr;
  3331. sycl::half2 *tile_x_dm = nullptr;
  3332. int * tile_x_qh = nullptr;
  3333. int * tile_x_sc = nullptr;
  3334. //sycl_todo: change according to hardware
  3335. const int mmq_x = MMQ_X_Q4_0_AMPERE;
  3336. const int mmq_y = MMQ_Y_Q4_0_AMPERE;
  3337. const int nwarps = NWARPS_Q4_0_AMPERE;
  3338. allocate_tiles_q4_0<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
  3339. tile_x_qs_q4_0, tile_x_d_q4_0);
  3340. mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps,
  3341. load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ,
  3342. vec_dot_q4_0_q8_1_mul_mat>(
  3343. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
  3344. tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
  3345. }
  3346. #define MMQ_X_Q4_1_RDNA2 64
  3347. #define MMQ_Y_Q4_1_RDNA2 128
  3348. #define NWARPS_Q4_1_RDNA2 8
  3349. #define MMQ_X_Q4_1_RDNA1 64
  3350. #define MMQ_Y_Q4_1_RDNA1 64
  3351. #define NWARPS_Q4_1_RDNA1 8
  3352. #if defined(SYCL_USE_XMX)
  3353. #define MMQ_X_Q4_1_AMPERE 4
  3354. #define MMQ_Y_Q4_1_AMPERE 32
  3355. #define NWARPS_Q4_1_AMPERE 4
  3356. #else
  3357. #define MMQ_X_Q4_1_AMPERE 64
  3358. #define MMQ_Y_Q4_1_AMPERE 128
  3359. #define NWARPS_Q4_1_AMPERE 4
  3360. #endif
  3361. #define MMQ_X_Q4_1_PASCAL 64
  3362. #define MMQ_Y_Q4_1_PASCAL 64
  3363. #define NWARPS_Q4_1_PASCAL 8
  3364. template <bool need_check> static void
  3365. mul_mat_q4_1(
  3366. const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
  3367. const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
  3368. const sycl::nd_item<3> &item_ct1, int *tile_x_qs_q4_1,
  3369. sycl::half2 *tile_x_dm_q4_1, int *tile_y_qs, sycl::half2 *tile_y_ds) {
  3370. int * tile_x_ql = nullptr;
  3371. sycl::half2 *tile_x_dm = nullptr;
  3372. int * tile_x_qh = nullptr;
  3373. int * tile_x_sc = nullptr;
  3374. //sycl_todo: change according to hardware
  3375. const int mmq_x = MMQ_X_Q4_1_AMPERE;
  3376. const int mmq_y = MMQ_Y_Q4_1_AMPERE;
  3377. const int nwarps = NWARPS_Q4_1_AMPERE;
  3378. allocate_tiles_q4_1<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
  3379. tile_x_qs_q4_1, tile_x_dm_q4_1);
  3380. mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps,
  3381. load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ,
  3382. vec_dot_q4_1_q8_1_mul_mat>(
  3383. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
  3384. tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
  3385. }
  3386. #define MMQ_X_Q5_0_RDNA2 64
  3387. #define MMQ_Y_Q5_0_RDNA2 128
  3388. #define NWARPS_Q5_0_RDNA2 8
  3389. #define MMQ_X_Q5_0_RDNA1 64
  3390. #define MMQ_Y_Q5_0_RDNA1 64
  3391. #define NWARPS_Q5_0_RDNA1 8
  3392. #if defined(SYCL_USE_XMX)
  3393. #define MMQ_X_Q5_0_AMPERE 4
  3394. #define MMQ_Y_Q5_0_AMPERE 32
  3395. #define NWARPS_Q5_0_AMPERE 4
  3396. #else
  3397. #define MMQ_X_Q5_0_AMPERE 128
  3398. #define MMQ_Y_Q5_0_AMPERE 64
  3399. #define NWARPS_Q5_0_AMPERE 4
  3400. #endif
  3401. #define MMQ_X_Q5_0_PASCAL 64
  3402. #define MMQ_Y_Q5_0_PASCAL 64
  3403. #define NWARPS_Q5_0_PASCAL 8
  3404. template <bool need_check> static void
  3405. mul_mat_q5_0(
  3406. const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
  3407. const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
  3408. const sycl::nd_item<3> &item_ct1, int *tile_x_ql_q5_0, float *tile_x_d_q5_0,
  3409. int *tile_y_qs, sycl::half2 *tile_y_ds) {
  3410. int * tile_x_ql = nullptr;
  3411. sycl::half2 *tile_x_dm = nullptr;
  3412. int * tile_x_qh = nullptr;
  3413. int * tile_x_sc = nullptr;
  3414. //sycl_todo: change according to hardware
  3415. const int mmq_x = MMQ_X_Q5_0_AMPERE;
  3416. const int mmq_y = MMQ_Y_Q5_0_AMPERE;
  3417. const int nwarps = NWARPS_Q5_0_AMPERE;
  3418. allocate_tiles_q5_0<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
  3419. tile_x_ql_q5_0, tile_x_d_q5_0);
  3420. mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps,
  3421. load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ,
  3422. vec_dot_q5_0_q8_1_mul_mat>(
  3423. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
  3424. tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
  3425. }
  3426. #define MMQ_X_Q5_1_RDNA2 64
  3427. #define MMQ_Y_Q5_1_RDNA2 128
  3428. #define NWARPS_Q5_1_RDNA2 8
  3429. #define MMQ_X_Q5_1_RDNA1 64
  3430. #define MMQ_Y_Q5_1_RDNA1 64
  3431. #define NWARPS_Q5_1_RDNA1 8
  3432. #if defined(SYCL_USE_XMX)
  3433. #define MMQ_X_Q5_1_AMPERE 4
  3434. #define MMQ_Y_Q5_1_AMPERE 32
  3435. #define NWARPS_Q5_1_AMPERE 4
  3436. #else
  3437. #define MMQ_X_Q5_1_AMPERE 128
  3438. #define MMQ_Y_Q5_1_AMPERE 64
  3439. #define NWARPS_Q5_1_AMPERE 4
  3440. #endif
  3441. #define MMQ_X_Q5_1_PASCAL 64
  3442. #define MMQ_Y_Q5_1_PASCAL 64
  3443. #define NWARPS_Q5_1_PASCAL 8
  3444. template <bool need_check> static void
  3445. mul_mat_q5_1(
  3446. const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
  3447. const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
  3448. const sycl::nd_item<3> &item_ct1, int *tile_x_ql_q5_1,
  3449. sycl::half2 *tile_x_dm_q5_1, int *tile_y_qs, sycl::half2 *tile_y_ds) {
  3450. int * tile_x_ql = nullptr;
  3451. sycl::half2 *tile_x_dm = nullptr;
  3452. int * tile_x_qh = nullptr;
  3453. int * tile_x_sc = nullptr;
  3454. //sycl_todo: change according to hardware
  3455. const int mmq_x = MMQ_X_Q5_1_AMPERE;
  3456. const int mmq_y = MMQ_Y_Q5_1_AMPERE;
  3457. const int nwarps = NWARPS_Q5_1_AMPERE;
  3458. allocate_tiles_q5_1<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
  3459. tile_x_ql_q5_1, tile_x_dm_q5_1);
  3460. mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps,
  3461. load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ,
  3462. vec_dot_q5_1_q8_1_mul_mat>(
  3463. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
  3464. tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
  3465. }
  3466. #define MMQ_X_Q8_0_RDNA2 64
  3467. #define MMQ_Y_Q8_0_RDNA2 128
  3468. #define NWARPS_Q8_0_RDNA2 8
  3469. #define MMQ_X_Q8_0_RDNA1 64
  3470. #define MMQ_Y_Q8_0_RDNA1 64
  3471. #define NWARPS_Q8_0_RDNA1 8
  3472. #if defined(SYCL_USE_XMX)
  3473. #define MMQ_X_Q8_0_AMPERE 4
  3474. #define MMQ_Y_Q8_0_AMPERE 32
  3475. #define NWARPS_Q8_0_AMPERE 4
  3476. #else
  3477. #define MMQ_X_Q8_0_AMPERE 128
  3478. #define MMQ_Y_Q8_0_AMPERE 64
  3479. #define NWARPS_Q8_0_AMPERE 4
  3480. #endif
  3481. #define MMQ_X_Q8_0_PASCAL 64
  3482. #define MMQ_Y_Q8_0_PASCAL 64
  3483. #define NWARPS_Q8_0_PASCAL 8
  3484. template <bool need_check> static void
  3485. mul_mat_q8_0(
  3486. const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
  3487. const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
  3488. const sycl::nd_item<3> &item_ct1, int *tile_x_qs_q8_0, float *tile_x_d_q8_0,
  3489. int *tile_y_qs, sycl::half2 *tile_y_ds) {
  3490. int * tile_x_ql = nullptr;
  3491. sycl::half2 *tile_x_dm = nullptr;
  3492. int * tile_x_qh = nullptr;
  3493. int * tile_x_sc = nullptr;
  3494. //sycl_todo: change according to hardware
  3495. const int mmq_x = MMQ_X_Q8_0_AMPERE;
  3496. const int mmq_y = MMQ_Y_Q8_0_AMPERE;
  3497. const int nwarps = NWARPS_Q8_0_AMPERE;
  3498. allocate_tiles_q8_0<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
  3499. tile_x_qs_q8_0, tile_x_d_q8_0);
  3500. mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps,
  3501. load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ,
  3502. vec_dot_q8_0_q8_1_mul_mat>(
  3503. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
  3504. tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
  3505. }
  3506. #define MMQ_X_Q2_K_RDNA2 64
  3507. #define MMQ_Y_Q2_K_RDNA2 128
  3508. #define NWARPS_Q2_K_RDNA2 8
  3509. #define MMQ_X_Q2_K_RDNA1 128
  3510. #define MMQ_Y_Q2_K_RDNA1 32
  3511. #define NWARPS_Q2_K_RDNA1 8
  3512. #if defined(SYCL_USE_XMX)
  3513. #define MMQ_X_Q2_K_AMPERE 4
  3514. #define MMQ_Y_Q2_K_AMPERE 32
  3515. #define NWARPS_Q2_K_AMPERE 4
  3516. #else
  3517. #define MMQ_X_Q2_K_AMPERE 64
  3518. #define MMQ_Y_Q2_K_AMPERE 128
  3519. #define NWARPS_Q2_K_AMPERE 4
  3520. #endif
  3521. #define MMQ_X_Q2_K_PASCAL 64
  3522. #define MMQ_Y_Q2_K_PASCAL 64
  3523. #define NWARPS_Q2_K_PASCAL 8
  3524. template <bool need_check> static void
  3525. mul_mat_q2_K(
  3526. const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
  3527. const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
  3528. const sycl::nd_item<3> &item_ct1, int *tile_x_ql_q2_K,
  3529. sycl::half2 *tile_x_dm_q2_K, int *tile_x_sc_q2_K, int *tile_y_qs,
  3530. sycl::half2 *tile_y_ds) {
  3531. int * tile_x_ql = nullptr;
  3532. sycl::half2 *tile_x_dm = nullptr;
  3533. int * tile_x_qh = nullptr;
  3534. int * tile_x_sc = nullptr;
  3535. //sycl_todo: change according to hardware
  3536. const int mmq_x = MMQ_X_Q2_K_AMPERE;
  3537. const int mmq_y = MMQ_Y_Q2_K_AMPERE;
  3538. const int nwarps = NWARPS_Q2_K_AMPERE;
  3539. allocate_tiles_q2_K<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
  3540. tile_x_ql_q2_K, tile_x_dm_q2_K, tile_x_sc_q2_K);
  3541. mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps,
  3542. load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ,
  3543. vec_dot_q2_K_q8_1_mul_mat>(
  3544. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
  3545. tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
  3546. }
  3547. #define MMQ_X_Q3_K_RDNA2 128
  3548. #define MMQ_Y_Q3_K_RDNA2 64
  3549. #define NWARPS_Q3_K_RDNA2 8
  3550. #define MMQ_X_Q3_K_RDNA1 32
  3551. #define MMQ_Y_Q3_K_RDNA1 128
  3552. #define NWARPS_Q3_K_RDNA1 8
  3553. #if defined(SYCL_USE_XMX)
  3554. #define MMQ_X_Q3_K_AMPERE 4
  3555. #define MMQ_Y_Q3_K_AMPERE 32
  3556. #define NWARPS_Q3_K_AMPERE 4
  3557. #else
  3558. #define MMQ_X_Q3_K_AMPERE 128
  3559. #define MMQ_Y_Q3_K_AMPERE 128
  3560. #define NWARPS_Q3_K_AMPERE 4
  3561. #endif
  3562. #define MMQ_X_Q3_K_PASCAL 64
  3563. #define MMQ_Y_Q3_K_PASCAL 64
  3564. #define NWARPS_Q3_K_PASCAL 8
  3565. template <bool need_check> static void
  3566. mul_mat_q3_K(
  3567. const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
  3568. const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
  3569. const sycl::nd_item<3> &item_ct1, int *tile_x_ql_q3_K,
  3570. sycl::half2 *tile_x_dm_q3_K, int *tile_x_qh_q3_K, int *tile_x_sc_q3_K,
  3571. int *tile_y_qs, sycl::half2 *tile_y_ds) {
  3572. int * tile_x_ql = nullptr;
  3573. sycl::half2 *tile_x_dm = nullptr;
  3574. int * tile_x_qh = nullptr;
  3575. int * tile_x_sc = nullptr;
  3576. //sycl_todo: change according to hardware
  3577. const int mmq_x = MMQ_X_Q3_K_AMPERE;
  3578. const int mmq_y = MMQ_Y_Q3_K_AMPERE;
  3579. const int nwarps = NWARPS_Q3_K_AMPERE;
  3580. allocate_tiles_q3_K<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
  3581. tile_x_ql_q3_K, tile_x_dm_q3_K, tile_x_qh_q3_K,
  3582. tile_x_sc_q3_K);
  3583. mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps,
  3584. load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ,
  3585. vec_dot_q3_K_q8_1_mul_mat>(
  3586. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
  3587. tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
  3588. }
  3589. #define MMQ_X_Q4_K_RDNA2 64
  3590. #define MMQ_Y_Q4_K_RDNA2 128
  3591. #define NWARPS_Q4_K_RDNA2 8
  3592. #define MMQ_X_Q4_K_RDNA1 32
  3593. #define MMQ_Y_Q4_K_RDNA1 64
  3594. #define NWARPS_Q4_K_RDNA1 8
  3595. #if defined(SYCL_USE_XMX)
  3596. #define MMQ_X_Q4_K_AMPERE 4
  3597. #define MMQ_Y_Q4_K_AMPERE 32
  3598. #define NWARPS_Q4_K_AMPERE 4
  3599. #else
  3600. #define MMQ_X_Q4_K_AMPERE 64
  3601. #define MMQ_Y_Q4_K_AMPERE 128
  3602. #define NWARPS_Q4_K_AMPERE 4
  3603. #endif
  3604. #define MMQ_X_Q4_K_PASCAL 64
  3605. #define MMQ_Y_Q4_K_PASCAL 64
  3606. #define NWARPS_Q4_K_PASCAL 8
  3607. template <bool need_check> static void
  3608. mul_mat_q4_K(
  3609. const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
  3610. const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
  3611. const sycl::nd_item<3> &item_ct1, int *tile_x_ql_q4_K,
  3612. sycl::half2 *tile_x_dm_q4_K, int *tile_x_sc_q4_K, int *tile_y_qs,
  3613. sycl::half2 *tile_y_ds) {
  3614. int * tile_x_ql = nullptr;
  3615. sycl::half2 *tile_x_dm = nullptr;
  3616. int * tile_x_qh = nullptr;
  3617. int * tile_x_sc = nullptr;
  3618. //sycl_todo: change according to hardware
  3619. const int mmq_x = MMQ_X_Q4_K_AMPERE;
  3620. const int mmq_y = MMQ_Y_Q4_K_AMPERE;
  3621. const int nwarps = NWARPS_Q4_K_AMPERE;
  3622. allocate_tiles_q4_K<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
  3623. tile_x_ql_q4_K, tile_x_dm_q4_K, tile_x_sc_q4_K);
  3624. mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps,
  3625. load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ,
  3626. vec_dot_q4_K_q8_1_mul_mat>(
  3627. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
  3628. tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
  3629. }
  3630. #define MMQ_X_Q5_K_RDNA2 64
  3631. #define MMQ_Y_Q5_K_RDNA2 128
  3632. #define NWARPS_Q5_K_RDNA2 8
  3633. #define MMQ_X_Q5_K_RDNA1 32
  3634. #define MMQ_Y_Q5_K_RDNA1 64
  3635. #define NWARPS_Q5_K_RDNA1 8
  3636. #if defined(SYCL_USE_XMX)
  3637. #define MMQ_X_Q5_K_AMPERE 4
  3638. #define MMQ_Y_Q5_K_AMPERE 32
  3639. #define NWARPS_Q5_K_AMPERE 4
  3640. #else
  3641. #define MMQ_X_Q5_K_AMPERE 64
  3642. #define MMQ_Y_Q5_K_AMPERE 128
  3643. #define NWARPS_Q5_K_AMPERE 4
  3644. #endif
  3645. #define MMQ_X_Q5_K_PASCAL 64
  3646. #define MMQ_Y_Q5_K_PASCAL 64
  3647. #define NWARPS_Q5_K_PASCAL 8
  3648. template <bool need_check> static void
  3649. mul_mat_q5_K(
  3650. const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
  3651. const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
  3652. const sycl::nd_item<3> &item_ct1, int *tile_x_ql_q5_K,
  3653. sycl::half2 *tile_x_dm_q5_K, int *tile_x_sc_q5_K, int *tile_y_qs,
  3654. sycl::half2 *tile_y_ds) {
  3655. int * tile_x_ql = nullptr;
  3656. sycl::half2 *tile_x_dm = nullptr;
  3657. int * tile_x_qh = nullptr;
  3658. int * tile_x_sc = nullptr;
  3659. //sycl_todo: change according to hardware
  3660. const int mmq_x = MMQ_X_Q5_K_AMPERE;
  3661. const int mmq_y = MMQ_Y_Q5_K_AMPERE;
  3662. const int nwarps = NWARPS_Q5_K_AMPERE;
  3663. allocate_tiles_q5_K<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
  3664. tile_x_ql_q5_K, tile_x_dm_q5_K, tile_x_sc_q5_K);
  3665. mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps,
  3666. load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ,
  3667. vec_dot_q5_K_q8_1_mul_mat>(
  3668. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
  3669. tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
  3670. }
  3671. #define MMQ_X_Q6_K_RDNA2 64
  3672. #define MMQ_Y_Q6_K_RDNA2 128
  3673. #define NWARPS_Q6_K_RDNA2 8
  3674. #define MMQ_X_Q6_K_RDNA1 32
  3675. #define MMQ_Y_Q6_K_RDNA1 64
  3676. #define NWARPS_Q6_K_RDNA1 8
  3677. #if defined(SYCL_USE_XMX)
  3678. #define MMQ_X_Q6_K_AMPERE 4
  3679. #define MMQ_Y_Q6_K_AMPERE 32
  3680. #define NWARPS_Q6_K_AMPERE 4
  3681. #else
  3682. #define MMQ_X_Q6_K_AMPERE 64
  3683. #define MMQ_Y_Q6_K_AMPERE 64
  3684. #define NWARPS_Q6_K_AMPERE 4
  3685. #endif
  3686. #define MMQ_X_Q6_K_PASCAL 64
  3687. #define MMQ_Y_Q6_K_PASCAL 64
  3688. #define NWARPS_Q6_K_PASCAL 8
  3689. template <bool need_check> static void
  3690. mul_mat_q6_K(
  3691. const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
  3692. const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
  3693. const sycl::nd_item<3> &item_ct1, int *tile_x_ql, sycl::half2 *tile_x_dm,
  3694. int *tile_x_sc, int *tile_y_qs, sycl::half2 *tile_y_ds) {
  3695. // int * tile_x_ql = nullptr;
  3696. // sycl::half2 *tile_x_dm = nullptr;
  3697. int * tile_x_qh = nullptr;
  3698. // int * tile_x_sc = nullptr;
  3699. //sycl_todo: change according to hardware
  3700. const int mmq_x = MMQ_X_Q6_K_AMPERE;
  3701. const int mmq_y = MMQ_Y_Q6_K_AMPERE;
  3702. const int nwarps = NWARPS_Q6_K_AMPERE;
  3703. allocate_tiles_q6_K<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
  3704. tile_x_ql, tile_x_dm, tile_x_sc);
  3705. mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps,
  3706. load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ,
  3707. vec_dot_q6_K_q8_1_mul_mat>(
  3708. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
  3709. tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
  3710. }
  3711. template <int qk, int qi, typename block_q_t, int vdr, vec_dot_q_sycl_t vec_dot_q_sycl>
  3712. static void mul_mat_vec_q(const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols, const int nrows,
  3713. const sycl::nd_item<3> &item_ct1) {
  3714. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  3715. item_ct1.get_local_id(1);
  3716. if (row >= nrows) {
  3717. return;
  3718. }
  3719. const int blocks_per_row = ncols / qk;
  3720. const int blocks_per_warp = vdr * WARP_SIZE / qi;
  3721. const int qi_vdr = (qi / vdr); // N_threads processing 1 qk block
  3722. // partial sum for each thread
  3723. float tmp = 0.0f;
  3724. const block_q_t * x = (const block_q_t *) vx;
  3725. const block_q8_1 * y = (const block_q8_1 *) vy;
  3726. for (int i = item_ct1.get_local_id(2) / qi_vdr; i < blocks_per_row;
  3727. i += blocks_per_warp) {
  3728. const int ibx = row * blocks_per_row + i; // x block index
  3729. const int iby = i * (qk / QK8_1); // y block index that aligns with ibx
  3730. const int iqs =
  3731. vdr *
  3732. (item_ct1.get_local_id(2) -
  3733. i * qi_vdr); // x block quant index when casting the quants to int
  3734. tmp += vec_dot_q_sycl(&x[ibx], &y[iby], iqs);
  3735. }
  3736. // sum up partial sums and write back result
  3737. #pragma unroll
  3738. for (int mask = 16; mask > 0; mask >>= 1) {
  3739. tmp +=
  3740. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  3741. }
  3742. if (item_ct1.get_local_id(2) == 0) {
  3743. dst[row] = tmp;
  3744. }
  3745. }
  3746. template <int qk, int qi, typename block_q_t, int vdr>
  3747. static void mul_mat_vec_q_iq2_xxs_q8_1(const void *__restrict__ vx,
  3748. const void *__restrict__ vy,
  3749. float *__restrict__ dst, const int ncols,
  3750. const int nrows,
  3751. const sycl::nd_item<3> &item_ct1) {
  3752. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  3753. item_ct1.get_local_id(1);
  3754. if (row >= nrows) {
  3755. return;
  3756. }
  3757. const int blocks_per_row = ncols / qk;
  3758. const int blocks_per_warp = vdr * WARP_SIZE / qi;
  3759. // partial sum for each thread
  3760. float tmp = 0.0f;
  3761. const block_q_t * x = (const block_q_t *) vx;
  3762. const block_q8_1 * y = (const block_q8_1 *) vy;
  3763. for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
  3764. i += blocks_per_warp) {
  3765. const int ibx = row*blocks_per_row + i; // x block index
  3766. const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
  3767. const int iqs =
  3768. vdr *
  3769. (item_ct1.get_local_id(2) %
  3770. (qi / vdr)); // x block quant index when casting the quants to int
  3771. tmp += vec_dot_iq2_xxs_q8_1(&x[ibx], &y[iby], iqs, iq2xxs_grid, ksigns_iq2xs, kmask_iq2xs);
  3772. }
  3773. // sum up partial sums and write back result
  3774. #pragma unroll
  3775. for (int mask = 16; mask > 0; mask >>= 1) {
  3776. tmp +=
  3777. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  3778. }
  3779. if (item_ct1.get_local_id(2) == 0) {
  3780. dst[row] = tmp;
  3781. }
  3782. }
  3783. template <int qk, int qi, typename block_q_t, int vdr>
  3784. static void mul_mat_vec_q_iq2_xs_q8_1(const void *__restrict__ vx,
  3785. const void *__restrict__ vy,
  3786. float *__restrict__ dst, const int ncols,
  3787. const int nrows,
  3788. const sycl::nd_item<3> &item_ct1) {
  3789. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  3790. item_ct1.get_local_id(1);
  3791. if (row >= nrows) {
  3792. return;
  3793. }
  3794. const int blocks_per_row = ncols / qk;
  3795. const int blocks_per_warp = vdr * WARP_SIZE / qi;
  3796. // partial sum for each thread
  3797. float tmp = 0.0f;
  3798. const block_q_t * x = (const block_q_t *) vx;
  3799. const block_q8_1 * y = (const block_q8_1 *) vy;
  3800. for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
  3801. i += blocks_per_warp) {
  3802. const int ibx = row*blocks_per_row + i; // x block index
  3803. const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
  3804. const int iqs =
  3805. vdr *
  3806. (item_ct1.get_local_id(2) %
  3807. (qi / vdr)); // x block quant index when casting the quants to int
  3808. tmp += vec_dot_iq2_xs_q8_1(&x[ibx], &y[iby], iqs, iq2xs_grid, ksigns64);
  3809. }
  3810. // sum up partial sums and write back result
  3811. #pragma unroll
  3812. for (int mask = 16; mask > 0; mask >>= 1) {
  3813. tmp +=
  3814. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  3815. }
  3816. if (item_ct1.get_local_id(2) == 0) {
  3817. dst[row] = tmp;
  3818. }
  3819. }
  3820. template <int qk, int qi, typename block_q_t, int vdr>
  3821. static void mul_mat_vec_q_iq2_s_q8_1(const void *__restrict__ vx,
  3822. const void *__restrict__ vy,
  3823. float *__restrict__ dst, const int ncols,
  3824. const int nrows,
  3825. const sycl::nd_item<3> &item_ct1) {
  3826. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  3827. item_ct1.get_local_id(1);
  3828. if (row >= nrows) {
  3829. return;
  3830. }
  3831. const int blocks_per_row = ncols / qk;
  3832. const int blocks_per_warp = vdr * WARP_SIZE / qi;
  3833. // partial sum for each thread
  3834. float tmp = 0.0f;
  3835. const block_q_t * x = (const block_q_t *) vx;
  3836. const block_q8_1 * y = (const block_q8_1 *) vy;
  3837. for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
  3838. i += blocks_per_warp) {
  3839. const int ibx = row*blocks_per_row + i; // x block index
  3840. const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
  3841. const int iqs =
  3842. vdr *
  3843. (item_ct1.get_local_id(2) %
  3844. (qi / vdr)); // x block quant index when casting the quants to int
  3845. tmp += vec_dot_iq2_s_q8_1(&x[ibx], &y[iby], iqs);
  3846. }
  3847. // sum up partial sums and write back result
  3848. #pragma unroll
  3849. for (int mask = 16; mask > 0; mask >>= 1) {
  3850. tmp +=
  3851. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  3852. }
  3853. if (item_ct1.get_local_id(2) == 0) {
  3854. dst[row] = tmp;
  3855. }
  3856. }
  3857. template <int qk, int qi, typename block_q_t, int vdr>
  3858. static void mul_mat_vec_q_iq3_xxs_q8_1(const void *__restrict__ vx,
  3859. const void *__restrict__ vy,
  3860. float *__restrict__ dst, const int ncols,
  3861. const int nrows,
  3862. const sycl::nd_item<3> &item_ct1) {
  3863. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  3864. item_ct1.get_local_id(1);
  3865. if (row >= nrows) {
  3866. return;
  3867. }
  3868. const int blocks_per_row = ncols / qk;
  3869. const int blocks_per_warp = vdr * WARP_SIZE / qi;
  3870. // partial sum for each thread
  3871. float tmp = 0.0f;
  3872. const block_q_t * x = (const block_q_t *) vx;
  3873. const block_q8_1 * y = (const block_q8_1 *) vy;
  3874. for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
  3875. i += blocks_per_warp) {
  3876. const int ibx = row*blocks_per_row + i; // x block index
  3877. const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
  3878. const int iqs =
  3879. vdr *
  3880. (item_ct1.get_local_id(2) %
  3881. (qi / vdr)); // x block quant index when casting the quants to int
  3882. tmp += vec_dot_iq3_xxs_q8_1(&x[ibx], &y[iby], iqs, iq3xxs_grid, ksigns64);
  3883. }
  3884. // sum up partial sums and write back result
  3885. #pragma unroll
  3886. for (int mask = 16; mask > 0; mask >>= 1) {
  3887. tmp +=
  3888. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  3889. }
  3890. if (item_ct1.get_local_id(2) == 0) {
  3891. dst[row] = tmp;
  3892. }
  3893. }
  3894. template <int qk, int qi, typename block_q_t, int vdr>
  3895. static void mul_mat_vec_q_iq3_s_q8_1(const void *__restrict__ vx,
  3896. const void *__restrict__ vy,
  3897. float *__restrict__ dst, const int ncols,
  3898. const int nrows,
  3899. const sycl::nd_item<3> &item_ct1) {
  3900. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  3901. item_ct1.get_local_id(1);
  3902. if (row >= nrows) {
  3903. return;
  3904. }
  3905. const int blocks_per_row = ncols / qk;
  3906. const int blocks_per_warp = vdr * WARP_SIZE / qi;
  3907. // partial sum for each thread
  3908. float tmp = 0.0f;
  3909. const block_q_t * x = (const block_q_t *) vx;
  3910. const block_q8_1 * y = (const block_q8_1 *) vy;
  3911. for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
  3912. i += blocks_per_warp) {
  3913. const int ibx = row*blocks_per_row + i; // x block index
  3914. const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
  3915. const int iqs =
  3916. vdr *
  3917. (item_ct1.get_local_id(2) %
  3918. (qi / vdr)); // x block quant index when casting the quants to int
  3919. tmp += vec_dot_iq3_s_q8_1(&x[ibx], &y[iby], iqs, iq3s_grid);
  3920. }
  3921. // sum up partial sums and write back result
  3922. #pragma unroll
  3923. for (int mask = 16; mask > 0; mask >>= 1) {
  3924. tmp +=
  3925. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  3926. }
  3927. if (item_ct1.get_local_id(2) == 0) {
  3928. dst[row] = tmp;
  3929. }
  3930. }
  3931. template <int qk, int qi, typename block_q_t, int vdr>
  3932. static void mul_mat_vec_q_iq1_s_q8_1(const void *__restrict__ vx,
  3933. const void *__restrict__ vy,
  3934. float *__restrict__ dst, const int ncols,
  3935. const int nrows,
  3936. const sycl::nd_item<3> &item_ct1) {
  3937. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  3938. item_ct1.get_local_id(1);
  3939. if (row >= nrows) {
  3940. return;
  3941. }
  3942. const int blocks_per_row = ncols / qk;
  3943. const int blocks_per_warp = vdr * WARP_SIZE / qi;
  3944. // partial sum for each thread
  3945. float tmp = 0.0f;
  3946. const block_q_t * x = (const block_q_t *) vx;
  3947. const block_q8_1 * y = (const block_q8_1 *) vy;
  3948. for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
  3949. i += blocks_per_warp) {
  3950. const int ibx = row*blocks_per_row + i; // x block index
  3951. const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
  3952. const int iqs =
  3953. vdr *
  3954. (item_ct1.get_local_id(2) %
  3955. (qi / vdr)); // x block quant index when casting the quants to int
  3956. tmp += vec_dot_iq1_s_q8_1(&x[ibx], &y[iby], iqs, iq1s_grid_gpu);
  3957. }
  3958. // sum up partial sums and write back result
  3959. #pragma unroll
  3960. for (int mask = 16; mask > 0; mask >>= 1) {
  3961. tmp +=
  3962. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  3963. }
  3964. if (item_ct1.get_local_id(2) == 0) {
  3965. dst[row] = tmp;
  3966. }
  3967. }
  3968. template <int qk, int qi, typename block_q_t, int vdr>
  3969. static void mul_mat_vec_q_iq1_m_q8_1(const void *__restrict__ vx,
  3970. const void *__restrict__ vy,
  3971. float *__restrict__ dst, const int ncols,
  3972. const int nrows,
  3973. const sycl::nd_item<3> &item_ct1) {
  3974. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  3975. item_ct1.get_local_id(1);
  3976. if (row >= nrows) {
  3977. return;
  3978. }
  3979. const int blocks_per_row = ncols / qk;
  3980. const int blocks_per_warp = vdr * WARP_SIZE / qi;
  3981. // partial sum for each thread
  3982. float tmp = 0.0f;
  3983. const block_q_t * x = (const block_q_t *) vx;
  3984. const block_q8_1 * y = (const block_q8_1 *) vy;
  3985. for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
  3986. i += blocks_per_warp) {
  3987. const int ibx = row*blocks_per_row + i; // x block index
  3988. const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
  3989. const int iqs =
  3990. vdr *
  3991. (item_ct1.get_local_id(2) %
  3992. (qi / vdr)); // x block quant index when casting the quants to int
  3993. tmp += vec_dot_iq1_m_q8_1(&x[ibx], &y[iby], iqs);
  3994. }
  3995. // sum up partial sums and write back result
  3996. #pragma unroll
  3997. for (int mask = 16; mask > 0; mask >>= 1) {
  3998. tmp +=
  3999. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  4000. }
  4001. if (item_ct1.get_local_id(2) == 0) {
  4002. dst[row] = tmp;
  4003. }
  4004. }
  4005. template <int qk, int qi, typename block_q_t, int vdr>
  4006. static void mul_mat_vec_q_iq4_nl_q8_1(const void *__restrict__ vx,
  4007. const void *__restrict__ vy,
  4008. float *__restrict__ dst, const int ncols,
  4009. const int nrows,
  4010. const sycl::nd_item<3> &item_ct1) {
  4011. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  4012. item_ct1.get_local_id(1);
  4013. if (row >= nrows) {
  4014. return;
  4015. }
  4016. const int blocks_per_row = ncols / qk;
  4017. const int blocks_per_warp = vdr * WARP_SIZE / qi;
  4018. // partial sum for each thread
  4019. float tmp = 0.0f;
  4020. const block_q_t * x = (const block_q_t *) vx;
  4021. const block_q8_1 * y = (const block_q8_1 *) vy;
  4022. for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
  4023. i += blocks_per_warp) {
  4024. const int ibx = row*blocks_per_row + i; // x block index
  4025. const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
  4026. const int iqs =
  4027. vdr *
  4028. (item_ct1.get_local_id(2) %
  4029. (qi / vdr)); // x block quant index when casting the quants to int
  4030. tmp += vec_dot_iq4_nl_q8_1(&x[ibx], &y[iby], iqs);
  4031. }
  4032. // sum up partial sums and write back result
  4033. #pragma unroll
  4034. for (int mask = 16; mask > 0; mask >>= 1) {
  4035. tmp +=
  4036. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  4037. }
  4038. if (item_ct1.get_local_id(2) == 0) {
  4039. dst[row] = tmp;
  4040. }
  4041. }
  4042. template <int qk, int qi, typename block_q_t, int vdr>
  4043. static void mul_mat_vec_q_iq4_xs_q8_1(const void *__restrict__ vx,
  4044. const void *__restrict__ vy,
  4045. float *__restrict__ dst, const int ncols,
  4046. const int nrows,
  4047. const sycl::nd_item<3> &item_ct1) {
  4048. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  4049. item_ct1.get_local_id(1);
  4050. if (row >= nrows) {
  4051. return;
  4052. }
  4053. const int blocks_per_row = ncols / qk;
  4054. const int blocks_per_warp = vdr * WARP_SIZE / qi;
  4055. // partial sum for each thread
  4056. float tmp = 0.0f;
  4057. const block_q_t * x = (const block_q_t *) vx;
  4058. const block_q8_1 * y = (const block_q8_1 *) vy;
  4059. for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
  4060. i += blocks_per_warp) {
  4061. const int ibx = row*blocks_per_row + i; // x block index
  4062. const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
  4063. const int iqs =
  4064. vdr *
  4065. (item_ct1.get_local_id(2) %
  4066. (qi / vdr)); // x block quant index when casting the quants to int
  4067. tmp += vec_dot_iq4_xs_q8_1(&x[ibx], &y[iby], iqs);
  4068. }
  4069. // sum up partial sums and write back result
  4070. #pragma unroll
  4071. for (int mask = 16; mask > 0; mask >>= 1) {
  4072. tmp +=
  4073. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  4074. }
  4075. if (item_ct1.get_local_id(2) == 0) {
  4076. dst[row] = tmp;
  4077. }
  4078. }
  4079. template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
  4080. static void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows,
  4081. const sycl::nd_item<3> &item_ct1) {
  4082. // qk = quantized weights per x block
  4083. // qr = number of quantized weights per data value in x block
  4084. const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
  4085. item_ct1.get_local_id(1);
  4086. if (row >= nrows) {
  4087. return;
  4088. }
  4089. const int tid = item_ct1.get_local_id(2);
  4090. const int iter_stride = 2*GGML_SYCL_DMMV_X;
  4091. const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter
  4092. const int y_offset = qr == 1 ? 1 : qk/2;
  4093. // partial sum for each thread
  4094. #ifdef GGML_SYCL_F16
  4095. sycl::half2 tmp = {0.0f, 0.0f}; // two sums for f16 to take advantage of half2 intrinsics
  4096. #else
  4097. float tmp = 0.0f;
  4098. #endif // GGML_SYCL_F16
  4099. for (int i = 0; i < ncols; i += iter_stride) {
  4100. const int col = i + vals_per_iter*tid;
  4101. const int ib = (row*ncols + col)/qk; // x block index
  4102. const int iqs = (col%qk)/qr; // x quant index
  4103. const int iybs = col - col%qk; // y block start index
  4104. // processing >2 values per i iter is faster for fast GPUs
  4105. #pragma unroll
  4106. for (int j = 0; j < vals_per_iter; j += 2) {
  4107. // process 2 vals per j iter
  4108. // dequantize
  4109. // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val
  4110. dfloat2 v;
  4111. dequantize_kernel(vx, ib, iqs + j/qr, v);
  4112. // matrix multiplication
  4113. // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2
  4114. #ifdef GGML_SYCL_F16
  4115. dfloat2 t1{y[iybs + iqs + j / qr + 0],
  4116. y[iybs + iqs + j / qr + y_offset]};
  4117. tmp += v * t1;
  4118. #else
  4119. tmp += v.x() * y[iybs + iqs + j / qr + 0];
  4120. tmp += v.y() * y[iybs + iqs + j / qr + y_offset];
  4121. #endif // GGML_SYCL_F16
  4122. }
  4123. }
  4124. // sum up partial sums and write back result
  4125. #pragma unroll
  4126. for (int mask = 16; mask > 0; mask >>= 1) {
  4127. tmp +=
  4128. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  4129. }
  4130. if (tid == 0) {
  4131. #ifdef GGML_SYCL_F16
  4132. dst[row] = tmp.x() + tmp.y();
  4133. #else
  4134. dst[row] = tmp;
  4135. #endif // GGML_SYCL_F16
  4136. }
  4137. }
  4138. static void mul_mat_p021_f16_f32(
  4139. const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst,
  4140. const int ncols_x, const int nrows_x, const int nchannels_x, const int nchannels_y,
  4141. const sycl::nd_item<3> &item_ct1) {
  4142. const sycl::half *x = (const sycl::half *)vx;
  4143. const int row_x = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
  4144. item_ct1.get_local_id(1);
  4145. const int channel = item_ct1.get_local_range(0) * item_ct1.get_group(0) +
  4146. item_ct1.get_local_id(0);
  4147. const int channel_x = channel / (nchannels_y / nchannels_x);
  4148. const int nrows_y = ncols_x;
  4149. const int nrows_dst = nrows_x;
  4150. const int row_dst = row_x;
  4151. float tmp = 0.0f;
  4152. for (int col_x0 = 0; col_x0 < ncols_x;
  4153. col_x0 += item_ct1.get_local_range(2)) {
  4154. const int col_x = col_x0 + item_ct1.get_local_id(2);
  4155. if (col_x >= ncols_x) {
  4156. break;
  4157. }
  4158. // x is transposed and permuted
  4159. const int ix = row_x*nchannels_x*ncols_x + channel_x*ncols_x + col_x;
  4160. const float xi =
  4161. sycl::vec<sycl::half, 1>(x[ix])
  4162. .convert<float, sycl::rounding_mode::automatic>()[0];
  4163. const int row_y = col_x;
  4164. // y is not transposed but permuted
  4165. const int iy = channel*nrows_y + row_y;
  4166. tmp += xi * y[iy];
  4167. }
  4168. // dst is not transposed and not permuted
  4169. const int idst = channel*nrows_dst + row_dst;
  4170. // sum up partial sums and write back result
  4171. #pragma unroll
  4172. for (int mask = 16; mask > 0; mask >>= 1) {
  4173. tmp +=
  4174. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  4175. }
  4176. if (item_ct1.get_local_id(2) == 0) {
  4177. dst[idst] = tmp;
  4178. }
  4179. }
  4180. static void mul_mat_vec_nc_f16_f32( // nc == non-contiguous
  4181. const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst, const int ncols_x, const int nrows_x,
  4182. const int row_stride_x, const int channel_stride_x, const int channel_x_divisor,
  4183. const sycl::nd_item<3> &item_ct1) {
  4184. const sycl::half *x = (const sycl::half *)vx;
  4185. const int row_x = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
  4186. item_ct1.get_local_id(1);
  4187. const int channel = item_ct1.get_local_range(0) * item_ct1.get_group(0) +
  4188. item_ct1.get_local_id(0);
  4189. const int channel_x = channel / channel_x_divisor;
  4190. const int nrows_y = ncols_x;
  4191. const int nrows_dst = nrows_x;
  4192. const int row_dst = row_x;
  4193. const int idst = channel*nrows_dst + row_dst;
  4194. float tmp = 0.0f;
  4195. for (int col_x0 = 0; col_x0 < ncols_x;
  4196. col_x0 += item_ct1.get_local_range(2)) {
  4197. const int col_x = col_x0 + item_ct1.get_local_id(2);
  4198. if (col_x >= ncols_x) {
  4199. break;
  4200. }
  4201. const int row_y = col_x;
  4202. const int ix = channel_x*channel_stride_x + row_x*row_stride_x + col_x;
  4203. const int iy = channel*nrows_y + row_y;
  4204. const float xi =
  4205. sycl::vec<sycl::half, 1>(x[ix])
  4206. .convert<float, sycl::rounding_mode::automatic>()[0];
  4207. tmp += xi * y[iy];
  4208. }
  4209. // sum up partial sums and write back result
  4210. #pragma unroll
  4211. for (int mask = 16; mask > 0; mask >>= 1) {
  4212. tmp +=
  4213. dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
  4214. }
  4215. if (item_ct1.get_local_id(2) == 0) {
  4216. dst[idst] = tmp;
  4217. }
  4218. }
  4219. static void cpy_1_f32_f32(const char * cxi, char * cdsti) {
  4220. const float * xi = (const float *) cxi;
  4221. float * dsti = (float *) cdsti;
  4222. *dsti = *xi;
  4223. }
  4224. static void cpy_1_f32_f16(const char * cxi, char * cdsti) {
  4225. const float * xi = (const float *) cxi;
  4226. sycl::half *dsti = (sycl::half *)cdsti;
  4227. *dsti = sycl::vec<float, 1>(*xi)
  4228. .convert<sycl::half, sycl::rounding_mode::automatic>()[0];
  4229. }
  4230. static void cpy_1_f16_f16(const char * cxi, char * cdsti) {
  4231. const sycl::half *xi = (const sycl::half *)cxi;
  4232. sycl::half *dsti = (sycl::half *)cdsti;
  4233. *dsti = *xi;
  4234. }
  4235. static void cpy_1_f16_f32(const char * cxi, char * cdsti) {
  4236. const sycl::half *xi = (const sycl::half *)cxi;
  4237. float * dsti = (float *) cdsti;
  4238. *dsti = *xi;
  4239. }
  4240. static void cpy_1_i16_i16(const char * cxi, char * cdsti) {
  4241. const int16_t *xi = (const int16_t *)cxi;
  4242. int16_t *dsti = (int16_t *)cdsti;
  4243. *dsti = *xi;
  4244. }
  4245. static void cpy_1_i32_i32(const char * cxi, char * cdsti) {
  4246. const int32_t *xi = (const int32_t *)cxi;
  4247. int32_t *dsti = (int32_t *)cdsti;
  4248. *dsti = *xi;
  4249. }
  4250. template <cpy_kernel_t cpy_1>
  4251. static void cpy_f32_f16(const char * cx, char * cdst, const int ne,
  4252. const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
  4253. const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
  4254. const int nb12, const int nb13, const sycl::nd_item<3> &item_ct1) {
  4255. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  4256. item_ct1.get_local_id(2);
  4257. if (i >= ne) {
  4258. return;
  4259. }
  4260. // determine indices i02/i12, i01/i11, i00/i10 as a function of index i of flattened tensor
  4261. // then combine those indices with the corresponding byte offsets to get the total offsets
  4262. const int i03 = i/(ne00 * ne01 * ne02);
  4263. const int i02 = (i - i03*ne00*ne01*ne02 )/ (ne00*ne01);
  4264. const int i01 = (i - i03*ne00*ne01*ne02 - i02*ne01*ne00) / ne00;
  4265. const int i00 = i - i03*ne00*ne01*ne02 - i02*ne01*ne00 - i01*ne00;
  4266. const int x_offset = i00*nb00 + i01*nb01 + i02*nb02 + i03 * nb03;
  4267. const int i13 = i/(ne10 * ne11 * ne12);
  4268. const int i12 = (i - i13*ne10*ne11*ne12) / (ne10*ne11);
  4269. const int i11 = (i - i13*ne10*ne11*ne12 - i12*ne10*ne11) / ne10;
  4270. const int i10 = i - i13*ne10*ne11*ne12 - i12*ne10*ne11 - i11*ne10;
  4271. const int dst_offset = i10*nb10 + i11*nb11 + i12*nb12 + i13 * nb13;
  4272. cpy_1(cx + x_offset, cdst + dst_offset);
  4273. }
  4274. static void cpy_blck_f32_q8_0(const char * cxi, char * cdsti) {
  4275. const float * xi = (const float *) cxi;
  4276. block_q8_0 * dsti = (block_q8_0 *) cdsti;
  4277. float amax = 0.0f; // absolute max
  4278. for (int j = 0; j < QK8_0; j++) {
  4279. const float v = xi[j];
  4280. amax = sycl::fmax(amax, sycl::fabs((float)v));
  4281. }
  4282. const float d = amax / ((1 << 7) - 1);
  4283. const float id = d ? 1.0f/d : 0.0f;
  4284. dsti->d = d;
  4285. for (int j = 0; j < QK8_0; ++j) {
  4286. const float x0 = xi[j]*id;
  4287. dsti->qs[j] = sycl::round((float)x0);
  4288. }
  4289. }
  4290. static void cpy_blck_f32_q4_0(const char * cxi, char * cdsti) {
  4291. const float * xi = (const float *) cxi;
  4292. block_q4_0 * dsti = (block_q4_0 *) cdsti;
  4293. float amax = 0.0f;
  4294. float vmax = 0.0f;
  4295. for (int j = 0; j < QK4_0; ++j) {
  4296. const float v = xi[j];
  4297. if (amax < sycl::fabs((float)v)) {
  4298. amax = sycl::fabs((float)v);
  4299. vmax = v;
  4300. }
  4301. }
  4302. const float d = vmax / -8;
  4303. const float id = d ? 1.0f/d : 0.0f;
  4304. dsti->d = d;
  4305. for (int j = 0; j < QK4_0/2; ++j) {
  4306. const float x0 = xi[0 + j]*id;
  4307. const float x1 = xi[QK4_0/2 + j]*id;
  4308. const uint8_t xi0 = dpct::min(15, (int8_t)(x0 + 8.5f));
  4309. const uint8_t xi1 = dpct::min(15, (int8_t)(x1 + 8.5f));
  4310. dsti->qs[j] = xi0;
  4311. dsti->qs[j] |= xi1 << 4;
  4312. }
  4313. }
  4314. static void cpy_blck_f32_q4_1(const char * cxi, char * cdsti) {
  4315. const float * xi = (const float *) cxi;
  4316. block_q4_1 * dsti = (block_q4_1 *) cdsti;
  4317. float vmin = FLT_MAX;
  4318. float vmax = -FLT_MAX;
  4319. for (int j = 0; j < QK4_1; ++j) {
  4320. const float v = xi[j];
  4321. if (v < vmin) vmin = v;
  4322. if (v > vmax) vmax = v;
  4323. }
  4324. const float d = (vmax - vmin) / ((1 << 4) - 1);
  4325. const float id = d ? 1.0f/d : 0.0f;
  4326. dsti->dm.x() = d;
  4327. dsti->dm.y() = vmin;
  4328. for (int j = 0; j < QK4_1/2; ++j) {
  4329. const float x0 = (xi[0 + j] - vmin)*id;
  4330. const float x1 = (xi[QK4_1/2 + j] - vmin)*id;
  4331. const uint8_t xi0 = dpct::min(15, (int8_t)(x0 + 0.5f));
  4332. const uint8_t xi1 = dpct::min(15, (int8_t)(x1 + 0.5f));
  4333. dsti->qs[j] = xi0;
  4334. dsti->qs[j] |= xi1 << 4;
  4335. }
  4336. }
  4337. template <cpy_kernel_t cpy_blck, int qk>
  4338. static void cpy_f32_q(const char * cx, char * cdst, const int ne,
  4339. const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
  4340. const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
  4341. const int nb12, const int nb13, const sycl::nd_item<3> &item_ct1) {
  4342. const int i = (item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  4343. item_ct1.get_local_id(2)) *
  4344. qk;
  4345. if (i >= ne) {
  4346. return;
  4347. }
  4348. const int i03 = i/(ne00 * ne01 * ne02);
  4349. const int i02 = (i - i03*ne00*ne01*ne02 )/ (ne00*ne01);
  4350. const int i01 = (i - i03*ne00*ne01*ne02 - i02*ne01*ne00) / ne00;
  4351. const int i00 = i - i03*ne00*ne01*ne02 - i02*ne01*ne00 - i01*ne00;
  4352. const int x_offset = i00*nb00 + i01*nb01 + i02*nb02 + i03 * nb03;
  4353. const int i13 = i/(ne10 * ne11 * ne12);
  4354. const int i12 = (i - i13*ne10*ne11*ne12) / (ne10*ne11);
  4355. const int i11 = (i - i13*ne10*ne11*ne12 - i12*ne10*ne11) / ne10;
  4356. const int i10 = i - i13*ne10*ne11*ne12 - i12*ne10*ne11 - i11*ne10;
  4357. const int dst_offset = (i10/qk)*nb10 + i11*nb11 + i12*nb12 + i13*nb13;
  4358. cpy_blck(cx + x_offset, cdst + dst_offset);
  4359. }
  4360. static float rope_yarn_ramp(const float low, const float high, const int i0) {
  4361. const float y = (i0 / 2 - low) / sycl::max(0.001f, high - low);
  4362. return 1.0f - sycl::min(1.0f, sycl::max(0.0f, y));
  4363. }
  4364. struct rope_corr_dims {
  4365. float v[4];
  4366. };
  4367. // YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn
  4368. // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
  4369. static void rope_yarn(
  4370. float theta_extrap, float freq_scale, rope_corr_dims corr_dims, int64_t i0, float ext_factor, float mscale,
  4371. float * cos_theta, float * sin_theta
  4372. ) {
  4373. // Get n-d rotational scaling corrected for extrapolation
  4374. float theta_interp = freq_scale * theta_extrap;
  4375. float theta = theta_interp;
  4376. if (ext_factor != 0.0f) {
  4377. float ramp_mix = rope_yarn_ramp(corr_dims.v[0], corr_dims.v[1], i0) * ext_factor;
  4378. theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
  4379. // Get n-d magnitude scaling corrected for interpolation
  4380. mscale *= 1.0f + 0.1f * sycl::log(1.0f / freq_scale);
  4381. }
  4382. *cos_theta = sycl::cos(theta) * mscale;
  4383. *sin_theta = sycl::sin(theta) * mscale;
  4384. }
  4385. // rope == RoPE == rotary positional embedding
  4386. template<typename T, bool has_pos>
  4387. static void rope(
  4388. const T * x, T * dst, int ncols, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base,
  4389. float ext_factor, float attn_factor, rope_corr_dims corr_dims
  4390. ,
  4391. const sycl::nd_item<3> &item_ct1) {
  4392. const int col = 2 * (item_ct1.get_local_range(1) * item_ct1.get_group(1) +
  4393. item_ct1.get_local_id(1));
  4394. if (col >= ncols) {
  4395. return;
  4396. }
  4397. const int row = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  4398. item_ct1.get_local_id(2);
  4399. const int i = row*ncols + col;
  4400. const int i2 = row/p_delta_rows;
  4401. const int p = has_pos ? pos[i2] : 0;
  4402. const float theta_base = p * dpct::pow(freq_base, -float(col) / ncols);
  4403. float cos_theta, sin_theta;
  4404. rope_yarn(theta_base, freq_scale, corr_dims, col, ext_factor, attn_factor, &cos_theta, &sin_theta);
  4405. const float x0 = x[i + 0];
  4406. const float x1 = x[i + 1];
  4407. dst[i + 0] = x0*cos_theta - x1*sin_theta;
  4408. dst[i + 1] = x0*sin_theta + x1*cos_theta;
  4409. }
  4410. template<typename T, bool has_pos, bool has_freq_facs>
  4411. static void rope_neox(
  4412. const T * x, T * dst, int ncols, int n_dims, const int32_t * pos, float freq_scale, int p_delta_rows,
  4413. float ext_factor, float attn_factor, rope_corr_dims corr_dims, float theta_scale, float inv_ndims,
  4414. const float * freq_factors, const sycl::nd_item<3> &item_ct1) {
  4415. const int col = 2 * (item_ct1.get_local_range(1) * item_ct1.get_group(1) +
  4416. item_ct1.get_local_id(1));
  4417. if (col >= ncols) {
  4418. return;
  4419. }
  4420. const int row = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  4421. item_ct1.get_local_id(2);
  4422. const int ib = col / n_dims;
  4423. const int ic = col % n_dims;
  4424. if (ib > 0) {
  4425. const int i = row*ncols + ib*n_dims + ic;
  4426. dst[i + 0] = x[i + 0];
  4427. dst[i + 1] = x[i + 1];
  4428. return;
  4429. }
  4430. const int i = row*ncols + ib*n_dims + ic/2;
  4431. const int i2 = row/p_delta_rows;
  4432. float cur_rot = inv_ndims * ic - ib;
  4433. const int p = has_pos ? pos[i2] : 0;
  4434. const float freq_factor = has_freq_facs ? freq_factors[ic/2] : 1.0f;
  4435. const float theta_base =
  4436. p * freq_scale * dpct::pow(theta_scale, col / 2.0f)/freq_factor;
  4437. float cos_theta, sin_theta;
  4438. rope_yarn(theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta);
  4439. const float x0 = x[i + 0];
  4440. const float x1 = x[i + n_dims/2];
  4441. dst[i + 0] = x0*cos_theta - x1*sin_theta;
  4442. dst[i + n_dims/2] = x0*sin_theta + x1*cos_theta;
  4443. }
  4444. static void k_sum_rows_f32(const float * x, float * dst, const int ncols,
  4445. const sycl::nd_item<3> &item_ct1) {
  4446. const int row = item_ct1.get_group(1);
  4447. const int col = item_ct1.get_local_id(2);
  4448. float sum = 0.0f;
  4449. for (int i = col; i < ncols; i += item_ct1.get_local_range(2)) {
  4450. sum += x[row * ncols + i];
  4451. }
  4452. sum = warp_reduce_sum(sum, item_ct1);
  4453. if (col == 0) {
  4454. dst[row] = sum;
  4455. }
  4456. }
  4457. template<typename T>
  4458. static inline void ggml_sycl_swap(T & a, T & b) {
  4459. T tmp = a;
  4460. a = b;
  4461. b = tmp;
  4462. }
  4463. template <ggml_sort_order order>
  4464. __dpct_inline__ static void
  4465. k_argsort_f32_i32(const float *x, int *dst, const int ncols, int ncols_pad,
  4466. const sycl::nd_item<3> &item_ct1, uint8_t *dpct_local) {
  4467. // bitonic sort
  4468. int col = item_ct1.get_local_id(2);
  4469. int row = item_ct1.get_group(1);
  4470. if (col >= ncols_pad) {
  4471. return;
  4472. }
  4473. const float * x_row = x + row * ncols;
  4474. auto dst_row = (int *)dpct_local;
  4475. // initialize indices
  4476. dst_row[col] = col;
  4477. item_ct1.barrier(sycl::access::fence_space::local_space);
  4478. for (int k = 2; k <= ncols_pad; k *= 2) {
  4479. for (int j = k / 2; j > 0; j /= 2) {
  4480. int ixj = col ^ j;
  4481. if (ixj > col) {
  4482. if ((col & k) == 0) {
  4483. if (dst_row[col] >= ncols ||
  4484. (dst_row[ixj] < ncols && (order == GGML_SORT_ORDER_ASC ?
  4485. x_row[dst_row[col]] > x_row[dst_row[ixj]] :
  4486. x_row[dst_row[col]] < x_row[dst_row[ixj]]))
  4487. ) {
  4488. ggml_sycl_swap(dst_row[col], dst_row[ixj]);
  4489. }
  4490. } else {
  4491. if (dst_row[ixj] >= ncols ||
  4492. (dst_row[col] < ncols && (order == GGML_SORT_ORDER_ASC ?
  4493. x_row[dst_row[col]] < x_row[dst_row[ixj]] :
  4494. x_row[dst_row[col]] > x_row[dst_row[ixj]]))
  4495. ) {
  4496. ggml_sycl_swap(dst_row[col], dst_row[ixj]);
  4497. }
  4498. }
  4499. }
  4500. /*
  4501. DPCT1118:1: SYCL group functions and algorithms must be encountered
  4502. in converged control flow. You may need to adjust the code.
  4503. */
  4504. item_ct1.barrier(sycl::access::fence_space::local_space);
  4505. }
  4506. }
  4507. // copy the result to dst without the padding
  4508. if (col < ncols) {
  4509. dst[row * ncols + col] = dst_row[col];
  4510. }
  4511. }
  4512. static void diag_mask_inf_f32(const float * x, float * dst, const int ncols, const int rows_per_channel, const int n_past,
  4513. const sycl::nd_item<3> &item_ct1) {
  4514. const int col = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
  4515. item_ct1.get_local_id(1);
  4516. const int row = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  4517. item_ct1.get_local_id(2);
  4518. if (col >= ncols) {
  4519. return;
  4520. }
  4521. const int i = row*ncols + col;
  4522. //dst[i] = col > (n_past + row % rows_per_channel) ? -INFINITY : x[i];
  4523. //dst[i] = x[i] - (col > n_past + row % rows_per_channel) * INT_MAX; // equivalent within rounding error but slightly faster on GPU
  4524. dst[i] = x[i] - (col > n_past + row % rows_per_channel) * FLT_MAX;
  4525. }
  4526. template <bool vals_smem, int ncols_template, int block_size_template>
  4527. static void soft_max_f32(const float * x, const float * mask, float * dst, const int ncols_par,
  4528. const int nrows_y, const float scale, const float max_bias, const float m0,
  4529. const float m1, uint32_t n_head_log2, const sycl::nd_item<3> &item_ct1, float *buf) {
  4530. const int ncols = ncols_template == 0 ? ncols_par : ncols_template;
  4531. const int tid = item_ct1.get_local_id(2);
  4532. const int rowx = item_ct1.get_group(2);
  4533. const int rowy = rowx % nrows_y; // broadcast the mask (y) in the row dimension
  4534. const int block_size = block_size_template == 0 ? item_ct1.get_local_range(2) : block_size_template;
  4535. const int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
  4536. const int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
  4537. float slope = 1.0f;
  4538. // ALiBi
  4539. if (max_bias > 0.0f) {
  4540. const uint32_t h = rowx/nrows_y; // head index
  4541. const float base = h < n_head_log2 ? m0 : m1;
  4542. const int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
  4543. slope = sycl::pow(base, float(exp));
  4544. }
  4545. float * vals = vals_smem ? buf + WARP_SIZE : dst + rowx*ncols;
  4546. float max_val = -INFINITY;
  4547. for (int col0 = 0; col0 < ncols; col0 += block_size) {
  4548. const int col = col0 + tid;
  4549. if (ncols_template == 0 && col >= ncols) {
  4550. break;
  4551. }
  4552. const int ix = rowx*ncols + col;
  4553. const int iy = rowy*ncols + col;
  4554. const float val = x[ix]*scale + (mask ? slope*mask[iy] : 0.0f);
  4555. vals[col] = val;
  4556. max_val = sycl::max(max_val, val);
  4557. }
  4558. // find the max value in the block
  4559. max_val = warp_reduce_max(max_val, item_ct1);
  4560. if (block_size > WARP_SIZE) {
  4561. if (warp_id == 0) {
  4562. buf[lane_id] = -INFINITY;
  4563. }
  4564. item_ct1.barrier(sycl::access::fence_space::local_space);
  4565. if (lane_id == 0) {
  4566. buf[warp_id] = max_val;
  4567. }
  4568. item_ct1.barrier(sycl::access::fence_space::local_space);
  4569. max_val = buf[lane_id];
  4570. max_val = warp_reduce_max(max_val, item_ct1);
  4571. }
  4572. float tmp = 0.f;
  4573. #pragma unroll
  4574. for (int col0 = 0; col0 < ncols; col0 += block_size) {
  4575. const int col = col0 + tid;
  4576. if (ncols_template == 0 && col >= ncols) {
  4577. break;
  4578. }
  4579. const float val = sycl::native::exp(vals[col] - max_val);
  4580. tmp += val;
  4581. vals[col] = val;
  4582. }
  4583. // find the sum of exps in the block
  4584. tmp = warp_reduce_sum(tmp, item_ct1);
  4585. if (block_size > WARP_SIZE) {
  4586. item_ct1.barrier(sycl::access::fence_space::local_space);
  4587. if (warp_id == 0) {
  4588. buf[lane_id] = 0.f;
  4589. }
  4590. item_ct1.barrier(sycl::access::fence_space::local_space);
  4591. if (lane_id == 0) {
  4592. buf[warp_id] = tmp;
  4593. }
  4594. item_ct1.barrier(sycl::access::fence_space::local_space);
  4595. tmp = buf[lane_id];
  4596. tmp = warp_reduce_sum(tmp, item_ct1);
  4597. }
  4598. const float inv_sum = 1.f / tmp;
  4599. #pragma unroll
  4600. for (int col0 = 0; col0 < ncols; col0 += block_size) {
  4601. const int col = col0 + tid;
  4602. if (ncols_template == 0 && col >= ncols) {
  4603. return;
  4604. }
  4605. const int idst = rowx*ncols + col;
  4606. dst[idst] = vals[col] * inv_sum;
  4607. }
  4608. }
  4609. static void scale_f32(const float * x, float * dst, const float scale, const int k,
  4610. const sycl::nd_item<3> &item_ct1) {
  4611. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  4612. item_ct1.get_local_id(2);
  4613. if (i >= k) {
  4614. return;
  4615. }
  4616. dst[i] = scale * x[i];
  4617. }
  4618. static void clamp_f32(const float * x, float * dst, const float min, const float max, const int k,
  4619. const sycl::nd_item<3> &item_ct1) {
  4620. const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
  4621. item_ct1.get_local_id(2);
  4622. if (i >= k) {
  4623. return;
  4624. }
  4625. dst[i] = x[i] < min ? min : (x[i] > max ? max : x[i]);
  4626. }
  4627. template <typename T>
  4628. static void im2col_kernel(const float *x, T *dst, int offset_delta,
  4629. int IW, int IH, int OW, int KW, int KH,
  4630. int pelements, int CHW, int s0, int s1, int p0,
  4631. int p1, int d0, int d1,
  4632. const sycl::nd_item<3> &item_ct1) {
  4633. const int i = item_ct1.get_local_id(2) +
  4634. item_ct1.get_group(2) * item_ct1.get_local_range(2);
  4635. if (i >= pelements) {
  4636. return;
  4637. }
  4638. const int ksize = OW * (KH > 1 ? KW : 1);
  4639. const int kx = i / ksize;
  4640. const int kd = kx * ksize;
  4641. const int ky = (i - kd) / OW;
  4642. const int ix = i % OW;
  4643. const int64_t iiw = ix * s0 + kx * d0 - p0;
  4644. const int64_t iih = item_ct1.get_group(1) * s1 + ky * d1 - p1;
  4645. const int64_t offset_dst =
  4646. (item_ct1.get_group(1) * OW + ix) * CHW +
  4647. (item_ct1.get_group(0) * (KW * KH) + ky * KW + kx);
  4648. if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
  4649. dst[offset_dst] =
  4650. sycl::vec<float, 1>(0.0f)
  4651. .convert<sycl::half, sycl::rounding_mode::automatic>()[0];
  4652. } else {
  4653. const int64_t offset_src = item_ct1.get_group(0) * offset_delta;
  4654. dst[offset_dst] =
  4655. sycl::vec<float, 1>(x[offset_src + iih * IW + iiw])
  4656. .convert<sycl::half, sycl::rounding_mode::automatic>()[0];
  4657. }
  4658. }
  4659. template <typename Ti, typename To>
  4660. static void pool2d_nchw_kernel(
  4661. const int ih, const int iw, const int oh, const int ow,
  4662. const int kh, const int kw, const int sh, const int sw,
  4663. const int ph, const int pw, const int parallel_elements,
  4664. const Ti* src, To* dst, const enum ggml_op_pool op,
  4665. const sycl::nd_item<3> &item_ct1) {
  4666. int idx = item_ct1.get_local_id(2) +
  4667. item_ct1.get_group(2) * item_ct1.get_local_range(2);
  4668. if (idx >= parallel_elements) {
  4669. return;
  4670. }
  4671. const int I_HW = ih * iw;
  4672. const int O_HW = oh * ow;
  4673. const int nc = idx / O_HW;
  4674. const int cur_oh = idx % O_HW / ow;
  4675. const int cur_ow = idx % O_HW % ow;
  4676. const Ti* i_ptr = src + nc * I_HW;
  4677. To* o_ptr = dst + nc * O_HW;
  4678. const int start_h = cur_oh * sh - ph;
  4679. const int bh = sycl::max(0, start_h);
  4680. const int eh = sycl::min(ih, start_h + kh);
  4681. const int start_w = cur_ow * sw - pw;
  4682. const int bw = sycl::max(0, start_w);
  4683. const int ew = sycl::min(iw, start_w + kw);
  4684. To res = 0;
  4685. switch (op) {
  4686. case GGML_OP_POOL_AVG: res = 0; break;
  4687. case GGML_OP_POOL_MAX: res = -FLT_MAX; break;
  4688. }
  4689. for (int i = bh; i < eh; i += 1) {
  4690. for (int j = bw; j < ew; j += 1) {
  4691. #if DPCT_COMPATIBILITY_TEMP >= 350
  4692. /*
  4693. DPCT1098:106: The '*' expression is used instead of the __ldg
  4694. call. These two expressions do not provide the exact same
  4695. functionality. Check the generated code for potential precision
  4696. and/or performance issues.
  4697. */
  4698. Ti cur = *(i_ptr + i * iw + j);
  4699. #else
  4700. Ti cur = i_ptr[i * iw + j];
  4701. #endif
  4702. switch (op) {
  4703. case GGML_OP_POOL_AVG: res += (cur / (kh * kw)); break;
  4704. case GGML_OP_POOL_MAX: res = sycl::max(res, (To)cur); break;
  4705. }
  4706. }
  4707. }
  4708. o_ptr[cur_oh * ow + cur_ow] = res;
  4709. }
  4710. template <int qk, int qr, dequantize_kernel_t dq>
  4711. static void get_rows_sycl(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  4712. ggml_tensor *dst, const void *src0_dd,
  4713. const int32_t *src1_dd, float *dst_dd,
  4714. queue_ptr stream) {
  4715. GGML_TENSOR_BINARY_OP_LOCALS
  4716. const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE);
  4717. const int block_num_x = (ne00 + 2*SYCL_GET_ROWS_BLOCK_SIZE - 1) / (2*SYCL_GET_ROWS_BLOCK_SIZE);
  4718. const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x);
  4719. // strides in elements
  4720. //const size_t s0 = nb0 / ggml_element_size(dst);
  4721. const size_t s1 = nb1 / ggml_element_size(dst);
  4722. const size_t s2 = nb2 / ggml_element_size(dst);
  4723. const size_t s3 = nb3 / ggml_element_size(dst);
  4724. const size_t s10 = nb10 / ggml_element_size(src1);
  4725. const size_t s11 = nb11 / ggml_element_size(src1);
  4726. const size_t s12 = nb12 / ggml_element_size(src1);
  4727. //const size_t s13 = nb13 / ggml_element_size(src1);
  4728. GGML_ASSERT(ne00 % 2 == 0);
  4729. stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
  4730. [=](sycl::nd_item<3> item_ct1) {
  4731. k_get_rows<qk, qr, dq>(
  4732. src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2,
  4733. s3, nb01, nb02, nb03, s10, s11, s12, item_ct1);
  4734. });
  4735. (void) dst;
  4736. }
  4737. template <typename src0_t>
  4738. static void get_rows_sycl_float(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  4739. const ggml_tensor *src1, ggml_tensor *dst,
  4740. const src0_t *src0_dd, const int32_t *src1_dd,
  4741. float *dst_dd, queue_ptr stream) {
  4742. GGML_TENSOR_BINARY_OP_LOCALS
  4743. const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE);
  4744. const int block_num_x = (ne00 + SYCL_GET_ROWS_BLOCK_SIZE - 1) / SYCL_GET_ROWS_BLOCK_SIZE;
  4745. const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x);
  4746. // strides in elements
  4747. //const size_t s0 = nb0 / ggml_element_size(dst);
  4748. const size_t s1 = nb1 / ggml_element_size(dst);
  4749. const size_t s2 = nb2 / ggml_element_size(dst);
  4750. const size_t s3 = nb3 / ggml_element_size(dst);
  4751. const size_t s10 = nb10 / ggml_element_size(src1);
  4752. const size_t s11 = nb11 / ggml_element_size(src1);
  4753. const size_t s12 = nb12 / ggml_element_size(src1);
  4754. //const size_t s13 = nb13 / ggml_element_size(src1);
  4755. {
  4756. dpct::has_capability_or_fail(stream->get_device(),
  4757. {sycl::aspect::fp16});
  4758. stream->parallel_for(
  4759. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  4760. [=](sycl::nd_item<3> item_ct1) {
  4761. k_get_rows_float(src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2,
  4762. s3, nb01, nb02, nb03, s10, s11, s12, item_ct1);
  4763. });
  4764. }
  4765. (void) dst;
  4766. }
  4767. template<float (*bin_op)(const float, const float)>
  4768. struct bin_bcast_sycl {
  4769. template <typename src0_t, typename src1_t, typename dst_t>
  4770. void operator()(ggml_backend_sycl_context & ctx,
  4771. const struct ggml_tensor *src0,
  4772. const struct ggml_tensor *src1, struct ggml_tensor *dst,
  4773. const src0_t *src0_dd, const src1_t *src1_dd, dst_t *dst_dd,
  4774. queue_ptr stream) {
  4775. GGML_TENSOR_BINARY_OP_LOCALS
  4776. int nr0 = ne10/ne0;
  4777. int nr1 = ne11/ne1;
  4778. int nr2 = ne12/ne2;
  4779. int nr3 = ne13/ne3;
  4780. int nr[4] = { nr0, nr1, nr2, nr3 };
  4781. // collapse dimensions until first broadcast dimension
  4782. int64_t cne0[] = {ne0, ne1, ne2, ne3};
  4783. int64_t cne1[] = {ne10, ne11, ne12, ne13};
  4784. size_t cnb0[] = {nb0, nb1, nb2, nb3};
  4785. size_t cnb1[] = {nb10, nb11, nb12, nb13};
  4786. auto collapse = [](int64_t cne[]) {
  4787. cne[0] *= cne[1];
  4788. cne[1] = cne[2];
  4789. cne[2] = cne[3];
  4790. cne[3] = 1;
  4791. };
  4792. auto collapse_nb = [](size_t cnb[], int64_t cne[]) {
  4793. cnb[1] *= cne[1];
  4794. cnb[2] *= cne[2];
  4795. cnb[3] *= cne[3];
  4796. };
  4797. for (int i = 0; i < 4; i++) {
  4798. if (nr[i] != 1) {
  4799. break;
  4800. }
  4801. if (i > 0) {
  4802. collapse_nb(cnb0, cne0);
  4803. collapse_nb(cnb1, cne1);
  4804. collapse(cne0);
  4805. collapse(cne1);
  4806. }
  4807. }
  4808. {
  4809. int64_t ne0 = cne0[0];
  4810. int64_t ne1 = cne0[1];
  4811. int64_t ne2 = cne0[2];
  4812. int64_t ne3 = cne0[3];
  4813. int64_t ne10 = cne1[0];
  4814. int64_t ne11 = cne1[1];
  4815. int64_t ne12 = cne1[2];
  4816. int64_t ne13 = cne1[3];
  4817. size_t nb0 = cnb0[0];
  4818. size_t nb1 = cnb0[1];
  4819. size_t nb2 = cnb0[2];
  4820. size_t nb3 = cnb0[3];
  4821. size_t nb10 = cnb1[0];
  4822. size_t nb11 = cnb1[1];
  4823. size_t nb12 = cnb1[2];
  4824. size_t nb13 = cnb1[3];
  4825. size_t s0 = nb0 / sizeof(dst_t);
  4826. size_t s1 = nb1 / sizeof(dst_t);
  4827. size_t s2 = nb2 / sizeof(dst_t);
  4828. size_t s3 = nb3 / sizeof(dst_t);
  4829. size_t s10 = nb10 / sizeof(src1_t);
  4830. size_t s11 = nb11 / sizeof(src1_t);
  4831. size_t s12 = nb12 / sizeof(src1_t);
  4832. size_t s13 = nb13 / sizeof(src1_t);
  4833. GGML_ASSERT(s0 == 1);
  4834. GGML_ASSERT(s10 == 1);
  4835. const int block_size = 128;
  4836. int64_t hne0 = std::max(ne0/2LL, 1LL);
  4837. sycl::range<3> block_dims(1, 1, 1);
  4838. block_dims[2] = std::min<unsigned int>(hne0, block_size);
  4839. block_dims[1] = std::min<unsigned int>(
  4840. ne1, block_size / (unsigned int)block_dims[2]);
  4841. block_dims[0] = std::min(
  4842. std::min<unsigned int>(
  4843. ne2 * ne3, block_size / (unsigned int)block_dims[2] /
  4844. (unsigned int)block_dims[1]),
  4845. 64U);
  4846. sycl::range<3> block_nums(
  4847. (ne2 * ne3 + block_dims[0] - 1) / block_dims[0],
  4848. (ne1 + block_dims[1] - 1) / block_dims[1],
  4849. (hne0 + block_dims[2] - 1) / block_dims[2]);
  4850. if (block_nums[0] > 65535) {
  4851. // this is the maximum number of blocks in z direction, fallback to 1D grid kernel
  4852. int block_num = (ne0*ne1*ne2*ne3 + block_size - 1) / block_size;
  4853. {
  4854. dpct::has_capability_or_fail(stream->get_device(),
  4855. {sycl::aspect::fp16});
  4856. stream->parallel_for(
  4857. sycl::nd_range<3>(sycl::range<3>(1, 1, block_num) *
  4858. sycl::range<3>(1, 1, block_size),
  4859. sycl::range<3>(1, 1, block_size)),
  4860. [=](sycl::nd_item<3> item_ct1) {
  4861. k_bin_bcast_unravel<bin_op>(
  4862. src0_dd, src1_dd, dst_dd, ne0, ne1, ne2, ne3,
  4863. ne10, ne11, ne12, ne13, s1, s2, s3, s11, s12,
  4864. s13, item_ct1);
  4865. });
  4866. }
  4867. } else {
  4868. /*
  4869. DPCT1049:16: The work-group size passed to the SYCL kernel may
  4870. exceed the limit. To get the device limit, query
  4871. info::device::max_work_group_size. Adjust the work-group size if
  4872. needed.
  4873. */
  4874. dpct::has_capability_or_fail(stream->get_device(),
  4875. {sycl::aspect::fp16});
  4876. stream->parallel_for(
  4877. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  4878. [=](sycl::nd_item<3> item_ct1) {
  4879. k_bin_bcast<bin_op>(src0_dd, src1_dd, dst_dd, ne0, ne1,
  4880. ne2, ne3, ne10, ne11, ne12, ne13,
  4881. s1, s2, s3, s11, s12, s13,
  4882. item_ct1);
  4883. });
  4884. }
  4885. }
  4886. }
  4887. };
  4888. static void acc_f32_sycl(const float *x, const float *y, float *dst,
  4889. const int n_elements, const int ne10, const int ne11,
  4890. const int ne12, const int nb1, const int nb2,
  4891. const int offset, queue_ptr stream) {
  4892. int num_blocks = (n_elements + SYCL_ACC_BLOCK_SIZE - 1) / SYCL_ACC_BLOCK_SIZE;
  4893. stream->parallel_for(
  4894. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  4895. sycl::range<3>(1, 1, SYCL_ACC_BLOCK_SIZE),
  4896. sycl::range<3>(1, 1, SYCL_ACC_BLOCK_SIZE)),
  4897. [=](sycl::nd_item<3> item_ct1) {
  4898. acc_f32(x, y, dst, n_elements, ne10, ne11, ne12, nb1, nb2, offset,
  4899. item_ct1);
  4900. });
  4901. }
  4902. static void gelu_f32_sycl(const float *x, float *dst, const int k,
  4903. queue_ptr stream) {
  4904. const int num_blocks = (k + SYCL_GELU_BLOCK_SIZE - 1) / SYCL_GELU_BLOCK_SIZE;
  4905. stream->parallel_for(
  4906. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  4907. sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE),
  4908. sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)),
  4909. [=](sycl::nd_item<3> item_ct1) {
  4910. gelu_f32(x, dst, k, item_ct1);
  4911. });
  4912. }
  4913. static void silu_f32_sycl(const float *x, float *dst, const int k,
  4914. queue_ptr stream) {
  4915. const int num_blocks = (k + SYCL_SILU_BLOCK_SIZE - 1) / SYCL_SILU_BLOCK_SIZE;
  4916. stream->parallel_for(
  4917. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  4918. sycl::range<3>(1, 1, SYCL_SILU_BLOCK_SIZE),
  4919. sycl::range<3>(1, 1, SYCL_SILU_BLOCK_SIZE)),
  4920. [=](sycl::nd_item<3> item_ct1) {
  4921. silu_f32(x, dst, k, item_ct1);
  4922. });
  4923. }
  4924. static void gelu_quick_f32_sycl(const float *x, float *dst, const int k,
  4925. queue_ptr stream) {
  4926. const int num_blocks = (k + SYCL_GELU_BLOCK_SIZE - 1) / SYCL_GELU_BLOCK_SIZE;
  4927. stream->parallel_for(
  4928. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  4929. sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE),
  4930. sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)),
  4931. [=](sycl::nd_item<3> item_ct1) {
  4932. gelu_quick_f32(x, dst, k, item_ct1);
  4933. });
  4934. }
  4935. static void tanh_f32_sycl(const float *x, float *dst, const int k,
  4936. queue_ptr stream) {
  4937. const int num_blocks = (k + SYCL_TANH_BLOCK_SIZE - 1) / SYCL_TANH_BLOCK_SIZE;
  4938. stream->parallel_for(
  4939. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  4940. sycl::range<3>(1, 1, SYCL_TANH_BLOCK_SIZE),
  4941. sycl::range<3>(1, 1, SYCL_TANH_BLOCK_SIZE)),
  4942. [=](sycl::nd_item<3> item_ct1) {
  4943. tanh_f32(x, dst, k, item_ct1);
  4944. });
  4945. }
  4946. static void relu_f32_sycl(const float *x, float *dst, const int k,
  4947. queue_ptr stream) {
  4948. const int num_blocks = (k + SYCL_RELU_BLOCK_SIZE - 1) / SYCL_RELU_BLOCK_SIZE;
  4949. stream->parallel_for(
  4950. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  4951. sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE),
  4952. sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE)),
  4953. [=](sycl::nd_item<3> item_ct1) {
  4954. relu_f32(x, dst, k, item_ct1);
  4955. });
  4956. }
  4957. static void hardsigmoid_f32_sycl(const float *x, float *dst, const int k,
  4958. queue_ptr stream) {
  4959. const int num_blocks = (k + SYCL_HARDSIGMOID_BLOCK_SIZE - 1) / SYCL_HARDSIGMOID_BLOCK_SIZE;
  4960. stream->parallel_for(
  4961. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  4962. sycl::range<3>(1, 1, SYCL_HARDSIGMOID_BLOCK_SIZE),
  4963. sycl::range<3>(1, 1, SYCL_HARDSIGMOID_BLOCK_SIZE)),
  4964. [=](sycl::nd_item<3> item_ct1) {
  4965. hardsigmoid_f32(x, dst, k, item_ct1);
  4966. });
  4967. }
  4968. static void hardswish_f32_sycl(const float *x, float *dst, const int k,
  4969. queue_ptr stream) {
  4970. const int num_blocks = (k + SYCL_HARDSWISH_BLOCK_SIZE - 1) / SYCL_HARDSWISH_BLOCK_SIZE;
  4971. stream->parallel_for(
  4972. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  4973. sycl::range<3>(1, 1, SYCL_HARDSWISH_BLOCK_SIZE),
  4974. sycl::range<3>(1, 1, SYCL_HARDSWISH_BLOCK_SIZE)),
  4975. [=](sycl::nd_item<3> item_ct1) {
  4976. hardswish_f32(x, dst, k, item_ct1);
  4977. });
  4978. }
  4979. static void leaky_relu_f32_sycl(const float *x, float *dst, const int k,
  4980. const float negative_slope,
  4981. queue_ptr stream) {
  4982. const int num_blocks = (k + SYCL_RELU_BLOCK_SIZE - 1) / SYCL_RELU_BLOCK_SIZE;
  4983. stream->parallel_for(
  4984. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  4985. sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE),
  4986. sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE)),
  4987. [=](sycl::nd_item<3> item_ct1) {
  4988. leaky_relu_f32(x, dst, k, negative_slope, item_ct1);
  4989. });
  4990. }
  4991. static void sqr_f32_sycl(const float *x, float *dst, const int k,
  4992. queue_ptr stream) {
  4993. const int num_blocks = (k + SYCL_SQR_BLOCK_SIZE - 1) / SYCL_SQR_BLOCK_SIZE;
  4994. stream->parallel_for(
  4995. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  4996. sycl::range<3>(1, 1, SYCL_SQR_BLOCK_SIZE),
  4997. sycl::range<3>(1, 1, SYCL_SQR_BLOCK_SIZE)),
  4998. [=](sycl::nd_item<3> item_ct1) {
  4999. sqr_f32(x, dst, k, item_ct1);
  5000. });
  5001. }
  5002. static void norm_f32_sycl(const float *x, float *dst, const int ncols,
  5003. const int nrows, const float eps,
  5004. queue_ptr stream) {
  5005. GGML_ASSERT(ncols % WARP_SIZE == 0);
  5006. if (ncols < 1024) {
  5007. const sycl::range<3> block_dims(1, 1, WARP_SIZE);
  5008. stream->submit([&](sycl::handler &cgh) {
  5009. sycl::local_accessor<sycl::float2, 1> s_sum_acc_ct1(
  5010. sycl::range<1>(32), cgh);
  5011. cgh.parallel_for(
  5012. sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims,
  5013. block_dims),
  5014. [=](sycl::nd_item<3> item_ct1)
  5015. [[intel::reqd_sub_group_size(32)]] {
  5016. norm_f32(x, dst, ncols, eps, item_ct1,
  5017. s_sum_acc_ct1.get_pointer(), WARP_SIZE);
  5018. });
  5019. });
  5020. } else {
  5021. // FIXME: 1024 from cuda
  5022. const int work_group_size = GROUP_SIZE;
  5023. const sycl::range<3> block_dims(1, 1, work_group_size);
  5024. /*
  5025. DPCT1049:17: The work-group size passed to the SYCL kernel may exceed
  5026. the limit. To get the device limit, query
  5027. info::device::max_work_group_size. Adjust the work-group size if needed.
  5028. */
  5029. stream->submit([&](sycl::handler &cgh) {
  5030. sycl::local_accessor<sycl::float2, 1> s_sum_acc_ct1(
  5031. sycl::range<1>(32), cgh);
  5032. cgh.parallel_for(
  5033. sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims,
  5034. block_dims),
  5035. [=](sycl::nd_item<3> item_ct1)
  5036. [[intel::reqd_sub_group_size(32)]] {
  5037. norm_f32(x, dst, ncols, eps, item_ct1,
  5038. s_sum_acc_ct1.get_pointer(), work_group_size);
  5039. });
  5040. });
  5041. }
  5042. }
  5043. static void group_norm_f32_sycl(const float *x, float *dst,
  5044. const int num_groups, const int group_size,
  5045. const int ne_elements, queue_ptr stream) {
  5046. static const float eps = 1e-6f;
  5047. if (group_size < 1024) {
  5048. const sycl::range<3> block_dims(1, 1, WARP_SIZE);
  5049. stream->submit([&](sycl::handler &cgh) {
  5050. sycl::local_accessor<float, 1> s_sum_acc_ct1(sycl::range<1>(32),
  5051. cgh);
  5052. const float eps_ct4 = eps;
  5053. cgh.parallel_for(
  5054. sycl::nd_range<3>(sycl::range<3>(1, 1, num_groups) * block_dims,
  5055. block_dims),
  5056. [=](sycl::nd_item<3> item_ct1)
  5057. [[intel::reqd_sub_group_size(32)]] {
  5058. group_norm_f32(
  5059. x, dst, group_size, ne_elements, eps_ct4, item_ct1,
  5060. s_sum_acc_ct1.get_pointer(), WARP_SIZE);
  5061. });
  5062. });
  5063. } else {
  5064. const int work_group_size = GROUP_SIZE;
  5065. const sycl::range<3> block_dims(1, 1, work_group_size);
  5066. /*
  5067. DPCT1049:18: The work-group size passed to the SYCL kernel may exceed
  5068. the limit. To get the device limit, query
  5069. info::device::max_work_group_size. Adjust the work-group size if needed.
  5070. */
  5071. stream->submit([&](sycl::handler &cgh) {
  5072. sycl::local_accessor<float, 1> s_sum_acc_ct1(sycl::range<1>(32),
  5073. cgh);
  5074. const float eps_ct4 = eps;
  5075. cgh.parallel_for(
  5076. sycl::nd_range<3>(sycl::range<3>(1, 1, num_groups) * block_dims,
  5077. block_dims),
  5078. [=](sycl::nd_item<3> item_ct1)
  5079. [[intel::reqd_sub_group_size(32)]] {
  5080. group_norm_f32(x, dst, group_size, ne_elements,
  5081. eps_ct4, item_ct1,
  5082. s_sum_acc_ct1.get_pointer(), work_group_size);
  5083. });
  5084. });
  5085. }
  5086. }
  5087. static void concat_f32_sycl(const float *x, const float *y, float *dst,
  5088. const int ne0, int ne1, int ne2, int ne02,
  5089. queue_ptr stream) {
  5090. int num_blocks = (ne0 + SYCL_CONCAT_BLOCK_SIZE - 1) / SYCL_CONCAT_BLOCK_SIZE;
  5091. sycl::range<3> gridDim(ne2, ne1, num_blocks);
  5092. stream->parallel_for(
  5093. sycl::nd_range<3>(gridDim *
  5094. sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE),
  5095. sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)),
  5096. [=](sycl::nd_item<3> item_ct1) {
  5097. concat_f32(x, y, dst, ne0, ne02, item_ct1);
  5098. });
  5099. }
  5100. static void upscale_f32_sycl(const float *x, float *dst, const int nb00, const int nb01,
  5101. const int nb02, const int nb03, const int ne10, const int ne11,
  5102. const int ne12, const int ne13, const float sf0, const float sf1,
  5103. const float sf2, const float sf3, queue_ptr stream) {
  5104. int dst_size = ne10 * ne11 * ne12 * ne13;
  5105. int num_blocks = (dst_size + SYCL_UPSCALE_BLOCK_SIZE - 1) / SYCL_UPSCALE_BLOCK_SIZE;
  5106. sycl::range<1> gridDim(num_blocks * SYCL_UPSCALE_BLOCK_SIZE);
  5107. stream->parallel_for(
  5108. sycl::nd_range<1>(gridDim, sycl::range<1>(SYCL_UPSCALE_BLOCK_SIZE)),
  5109. [=](sycl::nd_item<1> item_ct1) {
  5110. upscale_f32(x, dst, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3, item_ct1);
  5111. });
  5112. }
  5113. static void pad_f32_sycl(const float *x, float *dst, const int ne00,
  5114. const int ne01, const int ne02, const int ne0,
  5115. const int ne1, const int ne2, queue_ptr stream) {
  5116. int num_blocks = (ne0 + SYCL_PAD_BLOCK_SIZE - 1) / SYCL_PAD_BLOCK_SIZE;
  5117. sycl::range<3> gridDim(ne2, ne1, num_blocks);
  5118. stream->parallel_for(
  5119. sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_PAD_BLOCK_SIZE),
  5120. sycl::range<3>(1, 1, SYCL_PAD_BLOCK_SIZE)),
  5121. [=](sycl::nd_item<3> item_ct1) {
  5122. pad_f32(x, dst, ne0, ne00, ne01, ne02, item_ct1);
  5123. });
  5124. }
  5125. static void rms_norm_f32_sycl(const float *x, float *dst, const int ncols,
  5126. const int nrows, const float eps,
  5127. queue_ptr stream) {
  5128. GGML_ASSERT(ncols % WARP_SIZE == 0);
  5129. // printf("%s ncols=%d, nrows=%d, WARP_SIZE=%d\n", __func__, ncols, nrows, WARP_SIZE);
  5130. if (ncols < 1024) {
  5131. const sycl::range<3> block_dims(1, 1, WARP_SIZE);
  5132. stream->submit([&](sycl::handler &cgh) {
  5133. sycl::local_accessor<float, 1> s_sum_acc_ct1(sycl::range<1>(32),
  5134. cgh);
  5135. cgh.parallel_for(
  5136. sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims,
  5137. block_dims),
  5138. [=](sycl::nd_item<3> item_ct1)
  5139. [[intel::reqd_sub_group_size(32)]] {
  5140. rms_norm_f32(x, dst, ncols, eps, item_ct1,
  5141. s_sum_acc_ct1.get_pointer(), WARP_SIZE);
  5142. });
  5143. });
  5144. } else {
  5145. const int work_group_size = GROUP_SIZE;
  5146. const sycl::range<3> block_dims(1, 1, work_group_size);
  5147. /*
  5148. DPCT1049:19: The work-group size passed to the SYCL kernel may exceed
  5149. the limit. To get the device limit, query
  5150. info::device::max_work_group_size. Adjust the work-group size if needed.
  5151. */
  5152. stream->submit([&](sycl::handler &cgh) {
  5153. sycl::local_accessor<float, 1> s_sum_acc_ct1(sycl::range<1>(32),
  5154. cgh);
  5155. cgh.parallel_for(
  5156. sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims,
  5157. block_dims),
  5158. [=](sycl::nd_item<3> item_ct1)
  5159. [[intel::reqd_sub_group_size(32)]] {
  5160. rms_norm_f32(x, dst, ncols, eps, item_ct1,
  5161. s_sum_acc_ct1.get_pointer(), work_group_size);
  5162. });
  5163. });
  5164. }
  5165. }
  5166. static void quantize_row_q8_1_sycl(const float *x, void *vy, const int kx,
  5167. const int ky, const int kx_padded,
  5168. queue_ptr stream) {
  5169. const int block_num_x = (kx_padded + SYCL_QUANTIZE_BLOCK_SIZE - 1) / SYCL_QUANTIZE_BLOCK_SIZE;
  5170. const sycl::range<3> num_blocks(1, ky, block_num_x);
  5171. const sycl::range<3> block_size(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE);
  5172. {
  5173. dpct::has_capability_or_fail(stream->get_device(),
  5174. {sycl::aspect::fp16});
  5175. stream->parallel_for(
  5176. sycl::nd_range<3>(num_blocks * block_size, block_size),
  5177. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  5178. quantize_q8_1(x, vy, kx, kx_padded, item_ct1);
  5179. });
  5180. }
  5181. }
  5182. template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
  5183. static void dequantize_block_sycl(const void *__restrict__ vx,
  5184. dst_t *__restrict__ y, const int k,
  5185. queue_ptr stream) {
  5186. const int num_blocks = (k + 2*SYCL_DEQUANTIZE_BLOCK_SIZE - 1) / (2*SYCL_DEQUANTIZE_BLOCK_SIZE);
  5187. {
  5188. dpct::has_capability_or_fail(stream->get_device(),
  5189. {sycl::aspect::fp16});
  5190. stream->parallel_for(
  5191. sycl::nd_range<3>(
  5192. sycl::range<3>(1, 1, num_blocks) *
  5193. sycl::range<3>(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE),
  5194. sycl::range<3>(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE)),
  5195. [=](sycl::nd_item<3> item_ct1) {
  5196. dequantize_block<qk, qr, dequantize_kernel>(vx, y, k, item_ct1);
  5197. });
  5198. }
  5199. }
  5200. template <typename dst_t>
  5201. static void dequantize_row_q2_K_sycl(const void *vx, dst_t *y, const int k,
  5202. queue_ptr stream) {
  5203. const int nb = k / QK_K;
  5204. {
  5205. dpct::has_capability_or_fail(stream->get_device(),
  5206. {sycl::aspect::fp16});
  5207. stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5208. sycl::range<3>(1, 1, 64),
  5209. sycl::range<3>(1, 1, 64)),
  5210. [=](sycl::nd_item<3> item_ct1) {
  5211. dequantize_block_q2_K(vx, y, item_ct1);
  5212. });
  5213. }
  5214. }
  5215. template <typename dst_t>
  5216. static void dequantize_row_q3_K_sycl(const void *vx, dst_t *y, const int k,
  5217. queue_ptr stream) {
  5218. const int nb = k / QK_K;
  5219. {
  5220. dpct::has_capability_or_fail(stream->get_device(),
  5221. {sycl::aspect::fp16});
  5222. stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5223. sycl::range<3>(1, 1, 64),
  5224. sycl::range<3>(1, 1, 64)),
  5225. [=](sycl::nd_item<3> item_ct1) {
  5226. dequantize_block_q3_K(vx, y, item_ct1);
  5227. });
  5228. }
  5229. }
  5230. template <typename dst_t>
  5231. static void dequantize_row_q4_0_sycl(const void *vx, dst_t *y, const int k,
  5232. queue_ptr stream) {
  5233. const int nb32 = k / 32;
  5234. const int nb = (k + 255) / 256;
  5235. {
  5236. dpct::has_capability_or_fail(stream->get_device(),
  5237. {sycl::aspect::fp16});
  5238. stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5239. sycl::range<3>(1, 1, 32),
  5240. sycl::range<3>(1, 1, 32)),
  5241. [=](sycl::nd_item<3> item_ct1) {
  5242. dequantize_block_q4_0(vx, y, nb32, item_ct1);
  5243. });
  5244. }
  5245. }
  5246. template <typename dst_t>
  5247. static void dequantize_row_q4_1_sycl(const void *vx, dst_t *y, const int k,
  5248. queue_ptr stream) {
  5249. const int nb32 = k / 32;
  5250. const int nb = (k + 255) / 256;
  5251. {
  5252. dpct::has_capability_or_fail(stream->get_device(),
  5253. {sycl::aspect::fp16});
  5254. stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5255. sycl::range<3>(1, 1, 32),
  5256. sycl::range<3>(1, 1, 32)),
  5257. [=](sycl::nd_item<3> item_ct1) {
  5258. dequantize_block_q4_1(vx, y, nb32, item_ct1);
  5259. });
  5260. }
  5261. }
  5262. template <typename dst_t>
  5263. static void dequantize_row_q4_K_sycl(const void *vx, dst_t *y, const int k,
  5264. queue_ptr stream) {
  5265. const int nb = k / QK_K;
  5266. {
  5267. dpct::has_capability_or_fail(stream->get_device(),
  5268. {sycl::aspect::fp16});
  5269. stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5270. sycl::range<3>(1, 1, 32),
  5271. sycl::range<3>(1, 1, 32)),
  5272. [=](sycl::nd_item<3> item_ct1) {
  5273. dequantize_block_q4_K(vx, y, item_ct1);
  5274. });
  5275. }
  5276. }
  5277. template <typename dst_t>
  5278. static void dequantize_row_q5_K_sycl(const void *vx, dst_t *y, const int k,
  5279. queue_ptr stream) {
  5280. const int nb = k / QK_K;
  5281. {
  5282. dpct::has_capability_or_fail(stream->get_device(),
  5283. {sycl::aspect::fp16});
  5284. stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5285. sycl::range<3>(1, 1, 64),
  5286. sycl::range<3>(1, 1, 64)),
  5287. [=](sycl::nd_item<3> item_ct1) {
  5288. dequantize_block_q5_K(vx, y, item_ct1);
  5289. });
  5290. }
  5291. }
  5292. template <typename dst_t>
  5293. static void dequantize_row_q6_K_sycl(const void *vx, dst_t *y, const int k,
  5294. queue_ptr stream) {
  5295. const int nb = k / QK_K;
  5296. {
  5297. dpct::has_capability_or_fail(stream->get_device(),
  5298. {sycl::aspect::fp16});
  5299. stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5300. sycl::range<3>(1, 1, 64),
  5301. sycl::range<3>(1, 1, 64)),
  5302. [=](sycl::nd_item<3> item_ct1) {
  5303. dequantize_block_q6_K(vx, y, item_ct1);
  5304. });
  5305. }
  5306. }
  5307. template <typename dst_t>
  5308. static void dequantize_row_iq1_s_sycl(const void *vx, dst_t *y, const int k,
  5309. queue_ptr stream) {
  5310. const int nb = k / QK_K;
  5311. {
  5312. dpct::has_capability_or_fail(stream->get_device(),
  5313. {sycl::aspect::fp16});
  5314. stream->submit([&](sycl::handler &cgh) {
  5315. cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5316. sycl::range<3>(1, 1, 32),
  5317. sycl::range<3>(1, 1, 32)),
  5318. [=](sycl::nd_item<3> item_ct1) {
  5319. dequantize_block_iq1_s(
  5320. vx, y, item_ct1, iq1s_grid_gpu
  5321. );
  5322. });
  5323. });
  5324. }
  5325. }
  5326. template <typename dst_t>
  5327. static void dequantize_row_iq1_m_sycl(const void *vx, dst_t *y, const int k,
  5328. queue_ptr stream) {
  5329. const int nb = k / QK_K;
  5330. {
  5331. dpct::has_capability_or_fail(stream->get_device(),
  5332. {sycl::aspect::fp16});
  5333. stream->submit([&](sycl::handler &cgh) {
  5334. cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5335. sycl::range<3>(1, 1, 32),
  5336. sycl::range<3>(1, 1, 32)),
  5337. [=](sycl::nd_item<3> item_ct1) {
  5338. dequantize_block_iq1_m(
  5339. vx, y, item_ct1, iq1s_grid_gpu
  5340. );
  5341. });
  5342. });
  5343. }
  5344. }
  5345. template <typename dst_t>
  5346. static void dequantize_row_iq2_xxs_sycl(const void *vx, dst_t *y, const int k,
  5347. queue_ptr stream) {
  5348. const int nb = k / QK_K;
  5349. {
  5350. dpct::has_capability_or_fail(stream->get_device(),
  5351. {sycl::aspect::fp16});
  5352. stream->submit([&](sycl::handler &cgh) {
  5353. cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5354. sycl::range<3>(1, 1, 32),
  5355. sycl::range<3>(1, 1, 32)),
  5356. [=](sycl::nd_item<3> item_ct1) {
  5357. dequantize_block_iq2_xxs(
  5358. vx, y, item_ct1, iq2xxs_grid,
  5359. ksigns_iq2xs, kmask_iq2xs);
  5360. });
  5361. });
  5362. }
  5363. }
  5364. template <typename dst_t>
  5365. static void dequantize_row_iq2_xs_sycl(const void *vx, dst_t *y, const int k,
  5366. queue_ptr stream) {
  5367. const int nb = k / QK_K;
  5368. {
  5369. dpct::has_capability_or_fail(stream->get_device(),
  5370. {sycl::aspect::fp16});
  5371. stream->submit([&](sycl::handler &cgh) {
  5372. cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5373. sycl::range<3>(1, 1, 32),
  5374. sycl::range<3>(1, 1, 32)),
  5375. [=](sycl::nd_item<3> item_ct1) {
  5376. dequantize_block_iq2_xs(
  5377. vx, y, item_ct1, iq2xs_grid,
  5378. ksigns_iq2xs, kmask_iq2xs);
  5379. });
  5380. });
  5381. }
  5382. }
  5383. template <typename dst_t>
  5384. static void dequantize_row_iq2_s_sycl(const void *vx, dst_t *y, const int k,
  5385. queue_ptr stream) {
  5386. const int nb = k / QK_K;
  5387. {
  5388. dpct::has_capability_or_fail(stream->get_device(),
  5389. {sycl::aspect::fp16});
  5390. stream->submit([&](sycl::handler &cgh) {
  5391. cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5392. sycl::range<3>(1, 1, 32),
  5393. sycl::range<3>(1, 1, 32)),
  5394. [=](sycl::nd_item<3> item_ct1) {
  5395. dequantize_block_iq2_s(vx, y, item_ct1);
  5396. });
  5397. });
  5398. }
  5399. }
  5400. template <typename dst_t>
  5401. static void dequantize_row_iq3_xxs_sycl(const void *vx, dst_t *y, const int k,
  5402. queue_ptr stream) {
  5403. const int nb = k / QK_K;
  5404. {
  5405. dpct::has_capability_or_fail(stream->get_device(),
  5406. {sycl::aspect::fp16});
  5407. stream->submit([&](sycl::handler &cgh) {
  5408. cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5409. sycl::range<3>(1, 1, 32),
  5410. sycl::range<3>(1, 1, 32)),
  5411. [=](sycl::nd_item<3> item_ct1) {
  5412. dequantize_block_iq3_xxs(
  5413. vx, y, item_ct1, iq3xxs_grid,
  5414. ksigns_iq2xs, kmask_iq2xs);
  5415. });
  5416. });
  5417. }
  5418. }
  5419. template <typename dst_t>
  5420. static void dequantize_row_iq3_s_sycl(const void *vx, dst_t *y, const int k,
  5421. queue_ptr stream) {
  5422. const int nb = k / QK_K;
  5423. {
  5424. dpct::has_capability_or_fail(stream->get_device(),
  5425. {sycl::aspect::fp16});
  5426. stream->submit([&](sycl::handler &cgh) {
  5427. cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5428. sycl::range<3>(1, 1, 32),
  5429. sycl::range<3>(1, 1, 32)),
  5430. [=](sycl::nd_item<3> item_ct1) {
  5431. dequantize_block_iq3_s(
  5432. vx, y, item_ct1, kmask_iq2xs, iq3s_grid);
  5433. });
  5434. });
  5435. }
  5436. }
  5437. template <typename dst_t>
  5438. static void dequantize_row_iq4_xs_sycl(const void *vx, dst_t *y, const int k,
  5439. queue_ptr stream) {
  5440. const int nb = (k + QK_K - 1) / QK_K;
  5441. {
  5442. dpct::has_capability_or_fail(stream->get_device(),
  5443. {sycl::aspect::fp16});
  5444. stream->submit([&](sycl::handler &cgh) {
  5445. cgh.parallel_for(
  5446. sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5447. sycl::range<3>(1, 1, 32),
  5448. sycl::range<3>(1, 1, 32)),
  5449. [=](sycl::nd_item<3> item_ct1) {
  5450. dequantize_block_iq4_xs(vx, y, item_ct1);
  5451. });
  5452. });
  5453. }
  5454. }
  5455. template <typename dst_t>
  5456. static void dequantize_row_iq4_nl_sycl(const void *vx, dst_t *y, const int k,
  5457. queue_ptr stream) {
  5458. const int nb = (k + QK_K - 1) / QK_K;
  5459. {
  5460. dpct::has_capability_or_fail(stream->get_device(),
  5461. {sycl::aspect::fp16});
  5462. stream->submit([&](sycl::handler &cgh) {
  5463. cgh.parallel_for(
  5464. sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
  5465. sycl::range<3>(1, 1, 32),
  5466. sycl::range<3>(1, 1, 32)),
  5467. [=](sycl::nd_item<3> item_ct1) {
  5468. dequantize_block_iq4_nl(vx, y, item_ct1);
  5469. });
  5470. });
  5471. }
  5472. }
  5473. template <typename src_t, typename dst_t>
  5474. static void convert_unary_sycl(const void *__restrict__ vx,
  5475. dst_t *__restrict__ y, const int k,
  5476. queue_ptr stream) {
  5477. const int num_blocks = (k + SYCL_DEQUANTIZE_BLOCK_SIZE - 1) / SYCL_DEQUANTIZE_BLOCK_SIZE;
  5478. {
  5479. dpct::has_capability_or_fail(stream->get_device(),
  5480. {sycl::aspect::fp16});
  5481. stream->parallel_for(
  5482. sycl::nd_range<3>(
  5483. sycl::range<3>(1, 1, num_blocks) *
  5484. sycl::range<3>(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE),
  5485. sycl::range<3>(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE)),
  5486. [=](sycl::nd_item<3> item_ct1) {
  5487. convert_unary<src_t>(vx, y, k, item_ct1);
  5488. });
  5489. }
  5490. }
  5491. static to_fp16_sycl_t ggml_get_to_fp16_sycl(ggml_type type) try {
  5492. int id;
  5493. switch (type) {
  5494. case GGML_TYPE_Q4_0:
  5495. return dequantize_block_sycl<QK4_0, QR4_0, dequantize_q4_0>;
  5496. case GGML_TYPE_Q4_1:
  5497. return dequantize_block_sycl<QK4_1, QR4_1, dequantize_q4_1>;
  5498. case GGML_TYPE_Q5_0:
  5499. return dequantize_block_sycl<QK5_0, QR5_0, dequantize_q5_0>;
  5500. case GGML_TYPE_Q5_1:
  5501. return dequantize_block_sycl<QK5_1, QR5_1, dequantize_q5_1>;
  5502. case GGML_TYPE_Q8_0:
  5503. return dequantize_block_sycl<QK8_0, QR8_0, dequantize_q8_0>;
  5504. case GGML_TYPE_Q2_K:
  5505. return dequantize_row_q2_K_sycl;
  5506. case GGML_TYPE_Q3_K:
  5507. return dequantize_row_q3_K_sycl;
  5508. case GGML_TYPE_Q4_K:
  5509. return dequantize_row_q4_K_sycl;
  5510. case GGML_TYPE_Q5_K:
  5511. return dequantize_row_q5_K_sycl;
  5512. case GGML_TYPE_Q6_K:
  5513. return dequantize_row_q6_K_sycl;
  5514. case GGML_TYPE_IQ1_S:
  5515. return dequantize_row_iq1_s_sycl;
  5516. case GGML_TYPE_IQ1_M:
  5517. return dequantize_row_iq1_m_sycl;
  5518. case GGML_TYPE_IQ2_XXS:
  5519. return dequantize_row_iq2_xxs_sycl;
  5520. case GGML_TYPE_IQ2_XS:
  5521. return dequantize_row_iq2_xs_sycl;
  5522. case GGML_TYPE_IQ2_S:
  5523. return dequantize_row_iq2_s_sycl;
  5524. case GGML_TYPE_IQ3_XXS:
  5525. return dequantize_row_iq3_xxs_sycl;
  5526. case GGML_TYPE_IQ3_S:
  5527. return dequantize_row_iq3_s_sycl;
  5528. case GGML_TYPE_IQ4_XS:
  5529. return dequantize_row_iq4_xs_sycl;
  5530. case GGML_TYPE_IQ4_NL:
  5531. return dequantize_row_iq4_nl_sycl;
  5532. case GGML_TYPE_F32:
  5533. return convert_unary_sycl<float>;
  5534. default:
  5535. return nullptr;
  5536. }
  5537. }
  5538. catch (sycl::exception const &exc) {
  5539. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  5540. << ", line:" << __LINE__ << std::endl;
  5541. std::exit(1);
  5542. }
  5543. static to_fp32_sycl_t ggml_get_to_fp32_sycl(ggml_type type) {
  5544. switch (type) {
  5545. case GGML_TYPE_Q4_0:
  5546. return dequantize_row_q4_0_sycl;
  5547. case GGML_TYPE_Q4_1:
  5548. return dequantize_row_q4_1_sycl;
  5549. case GGML_TYPE_Q5_0:
  5550. return dequantize_block_sycl<QK5_0, QR5_0, dequantize_q5_0>;
  5551. case GGML_TYPE_Q5_1:
  5552. return dequantize_block_sycl<QK5_1, QR5_1, dequantize_q5_1>;
  5553. case GGML_TYPE_Q8_0:
  5554. return dequantize_block_sycl<QK8_0, QR8_0, dequantize_q8_0>;
  5555. case GGML_TYPE_Q2_K:
  5556. return dequantize_row_q2_K_sycl;
  5557. case GGML_TYPE_Q3_K:
  5558. return dequantize_row_q3_K_sycl;
  5559. case GGML_TYPE_Q4_K:
  5560. return dequantize_row_q4_K_sycl;
  5561. case GGML_TYPE_Q5_K:
  5562. return dequantize_row_q5_K_sycl;
  5563. case GGML_TYPE_Q6_K:
  5564. return dequantize_row_q6_K_sycl;
  5565. case GGML_TYPE_IQ1_S:
  5566. return dequantize_row_iq1_s_sycl;
  5567. case GGML_TYPE_IQ1_M:
  5568. return dequantize_row_iq1_m_sycl;
  5569. case GGML_TYPE_IQ2_XXS:
  5570. return dequantize_row_iq2_xxs_sycl;
  5571. case GGML_TYPE_IQ2_XS:
  5572. return dequantize_row_iq2_xs_sycl;
  5573. case GGML_TYPE_IQ2_S:
  5574. return dequantize_row_iq2_s_sycl;
  5575. case GGML_TYPE_IQ3_XXS:
  5576. return dequantize_row_iq3_xxs_sycl;
  5577. case GGML_TYPE_IQ3_S:
  5578. return dequantize_row_iq3_s_sycl;
  5579. case GGML_TYPE_IQ4_XS:
  5580. return dequantize_row_iq4_xs_sycl;
  5581. case GGML_TYPE_IQ4_NL:
  5582. return dequantize_row_iq4_nl_sycl;
  5583. case GGML_TYPE_F16:
  5584. return convert_unary_sycl<sycl::half>;
  5585. default:
  5586. return nullptr;
  5587. }
  5588. }
  5589. static void dequantize_mul_mat_vec_q4_0_sycl(const void *vx, const dfloat *y,
  5590. float *dst, const int ncols,
  5591. const int nrows,
  5592. queue_ptr stream) {
  5593. GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0);
  5594. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5595. // the number of rows may exceed maximum grid size in the y or z dimensions, use the x dimension instead
  5596. const sycl::range<3> block_nums(1, 1, block_num_y);
  5597. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5598. {
  5599. dpct::has_capability_or_fail(stream->get_device(),
  5600. {sycl::aspect::fp16});
  5601. stream->parallel_for(
  5602. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5603. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  5604. dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>(
  5605. vx, y, dst, ncols, nrows, item_ct1);
  5606. });
  5607. }
  5608. }
  5609. static void dequantize_mul_mat_vec_q4_1_sycl(const void *vx, const dfloat *y,
  5610. float *dst, const int ncols,
  5611. const int nrows,
  5612. queue_ptr stream) {
  5613. GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0);
  5614. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5615. const sycl::range<3> block_nums(1, 1, block_num_y);
  5616. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5617. {
  5618. dpct::has_capability_or_fail(stream->get_device(),
  5619. {sycl::aspect::fp16});
  5620. stream->parallel_for(
  5621. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5622. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  5623. dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>(
  5624. vx, y, dst, ncols, nrows, item_ct1);
  5625. });
  5626. }
  5627. }
  5628. static void dequantize_mul_mat_vec_q5_0_sycl(const void *vx, const dfloat *y,
  5629. float *dst, const int ncols,
  5630. const int nrows,
  5631. queue_ptr stream) {
  5632. GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0);
  5633. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5634. const sycl::range<3> block_nums(1, 1, block_num_y);
  5635. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5636. {
  5637. dpct::has_capability_or_fail(stream->get_device(),
  5638. {sycl::aspect::fp16});
  5639. stream->parallel_for(
  5640. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5641. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  5642. dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>(
  5643. vx, y, dst, ncols, nrows, item_ct1);
  5644. });
  5645. }
  5646. }
  5647. static void dequantize_mul_mat_vec_q5_1_sycl(const void *vx, const dfloat *y,
  5648. float *dst, const int ncols,
  5649. const int nrows,
  5650. queue_ptr stream) {
  5651. GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0);
  5652. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5653. const sycl::range<3> block_nums(1, 1, block_num_y);
  5654. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5655. {
  5656. dpct::has_capability_or_fail(stream->get_device(),
  5657. {sycl::aspect::fp16});
  5658. stream->parallel_for(
  5659. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5660. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  5661. dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>(
  5662. vx, y, dst, ncols, nrows, item_ct1);
  5663. });
  5664. }
  5665. }
  5666. static void dequantize_mul_mat_vec_q8_0_sycl(const void *vx, const dfloat *y,
  5667. float *dst, const int ncols,
  5668. const int nrows,
  5669. queue_ptr stream) {
  5670. GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0);
  5671. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5672. const sycl::range<3> block_nums(1, 1, block_num_y);
  5673. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5674. {
  5675. dpct::has_capability_or_fail(stream->get_device(),
  5676. {sycl::aspect::fp16});
  5677. stream->parallel_for(
  5678. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5679. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  5680. dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>(
  5681. vx, y, dst, ncols, nrows, item_ct1);
  5682. });
  5683. }
  5684. }
  5685. static void dequantize_mul_mat_vec_q2_K_sycl(const void *vx, const float *y,
  5686. float *dst, const int ncols,
  5687. const int nrows,
  5688. queue_ptr stream) {
  5689. GGML_ASSERT(ncols % QK_K == 0);
  5690. const int ny = 2; // very slightly faster than 1 even when K_QUANTS_PER_ITERATION = 2
  5691. const int block_num_y = (nrows + ny - 1) / ny;
  5692. const sycl::range<3> block_nums(1, 1, block_num_y);
  5693. const sycl::range<3> block_dims(1, ny, 32);
  5694. stream->parallel_for(
  5695. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5696. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  5697. dequantize_mul_mat_vec_q2_k(vx, y, dst, ncols, nrows, item_ct1);
  5698. });
  5699. }
  5700. static void dequantize_mul_mat_vec_q3_K_sycl(const void *vx, const float *y,
  5701. float *dst, const int ncols,
  5702. const int nrows,
  5703. queue_ptr stream) {
  5704. GGML_ASSERT(ncols % QK_K == 0);
  5705. const int ny = 2 / K_QUANTS_PER_ITERATION;
  5706. const int block_num_y = (nrows + ny - 1) / ny;
  5707. const sycl::range<3> block_nums(1, 1, block_num_y);
  5708. const sycl::range<3> block_dims(1, ny, 32);
  5709. stream->parallel_for(
  5710. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5711. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  5712. dequantize_mul_mat_vec_q3_k(vx, y, dst, ncols, nrows, item_ct1);
  5713. });
  5714. }
  5715. static void dequantize_mul_mat_vec_q4_K_sycl(const void *vx, const float *y,
  5716. float *dst, const int ncols,
  5717. const int nrows,
  5718. queue_ptr stream) {
  5719. GGML_ASSERT(ncols % QK_K == 0);
  5720. const int ny = 2 / K_QUANTS_PER_ITERATION;
  5721. const int block_num_y = (nrows + ny - 1) / ny;
  5722. const sycl::range<3> block_nums(1, 1, block_num_y);
  5723. const sycl::range<3> block_dims(1, ny, 32);
  5724. stream->parallel_for(
  5725. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5726. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  5727. dequantize_mul_mat_vec_q4_k(vx, y, dst, ncols, nrows, item_ct1);
  5728. });
  5729. }
  5730. static void dequantize_mul_mat_vec_q5_K_sycl(const void *vx, const float *y,
  5731. float *dst, const int ncols,
  5732. const int nrows,
  5733. queue_ptr stream) {
  5734. GGML_ASSERT(ncols % QK_K == 0);
  5735. const sycl::range<3> block_dims(1, 1, 32);
  5736. stream->parallel_for(
  5737. sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims),
  5738. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  5739. dequantize_mul_mat_vec_q5_k(vx, y, dst, ncols, item_ct1);
  5740. });
  5741. }
  5742. static void dequantize_mul_mat_vec_q6_K_sycl(const void *vx, const float *y,
  5743. float *dst, const int ncols,
  5744. const int nrows,
  5745. queue_ptr stream) {
  5746. GGML_ASSERT(ncols % QK_K == 0);
  5747. const int ny = 2 / K_QUANTS_PER_ITERATION;
  5748. const int block_num_y = (nrows + ny - 1) / ny;
  5749. const sycl::range<3> block_nums(1, 1, block_num_y);
  5750. const sycl::range<3> block_dims(1, ny, 32);
  5751. stream->parallel_for(
  5752. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5753. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  5754. dequantize_mul_mat_vec_q6_k(vx, y, dst, ncols, nrows, item_ct1);
  5755. });
  5756. }
  5757. static void convert_mul_mat_vec_f16_sycl(const void *vx, const dfloat *y,
  5758. float *dst, const int ncols,
  5759. const int nrows,
  5760. queue_ptr stream) {
  5761. GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0);
  5762. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5763. const sycl::range<3> block_nums(1, 1, block_num_y);
  5764. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5765. {
  5766. dpct::has_capability_or_fail(stream->get_device(),
  5767. {sycl::aspect::fp16});
  5768. stream->parallel_for(
  5769. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5770. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  5771. dequantize_mul_mat_vec<1, 1, convert_f16>(vx, y, dst, ncols,
  5772. nrows, item_ct1);
  5773. });
  5774. }
  5775. }
  5776. static void mul_mat_vec_q4_0_q8_1_sycl(const void *vx, const void *vy,
  5777. float *dst, const int ncols,
  5778. const int nrows,
  5779. queue_ptr stream) {
  5780. GGML_ASSERT(ncols % QK4_0 == 0);
  5781. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5782. const sycl::range<3> block_nums(1, 1, block_num_y);
  5783. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5784. {
  5785. stream->submit([&](sycl::handler &cgh) {
  5786. cgh.parallel_for(
  5787. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5788. [=](sycl::nd_item<3> item_ct1)
  5789. [[intel::reqd_sub_group_size(32)]] {
  5790. mul_mat_vec_q<QK4_0, QI4_0, block_q4_0,
  5791. VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>(
  5792. vx, vy, dst, ncols, nrows, item_ct1);
  5793. });
  5794. });
  5795. }
  5796. }
  5797. static void mul_mat_vec_q4_1_q8_1_sycl(const void *vx, const void *vy,
  5798. float *dst, const int ncols,
  5799. const int nrows,
  5800. queue_ptr stream) {
  5801. GGML_ASSERT(ncols % QK4_1 == 0);
  5802. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5803. const sycl::range<3> block_nums(1, 1, block_num_y);
  5804. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5805. {
  5806. stream->submit([&](sycl::handler &cgh) {
  5807. cgh.parallel_for(
  5808. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5809. [=](sycl::nd_item<3> item_ct1)
  5810. [[intel::reqd_sub_group_size(32)]] {
  5811. mul_mat_vec_q<QK4_0, QI4_1, block_q4_1,
  5812. VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>(
  5813. vx, vy, dst, ncols, nrows, item_ct1);
  5814. });
  5815. });
  5816. }
  5817. }
  5818. static void mul_mat_vec_q5_0_q8_1_sycl(const void *vx, const void *vy,
  5819. float *dst, const int ncols,
  5820. const int nrows,
  5821. queue_ptr stream) {
  5822. GGML_ASSERT(ncols % QK5_0 == 0);
  5823. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5824. const sycl::range<3> block_nums(1, 1, block_num_y);
  5825. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5826. {
  5827. stream->submit([&](sycl::handler &cgh) {
  5828. cgh.parallel_for(
  5829. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5830. [=](sycl::nd_item<3> item_ct1)
  5831. [[intel::reqd_sub_group_size(32)]] {
  5832. mul_mat_vec_q<QK5_0, QI5_0, block_q5_0,
  5833. VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>(
  5834. vx, vy, dst, ncols, nrows, item_ct1);
  5835. });
  5836. });
  5837. }
  5838. }
  5839. static void mul_mat_vec_q5_1_q8_1_sycl(const void *vx, const void *vy,
  5840. float *dst, const int ncols,
  5841. const int nrows,
  5842. queue_ptr stream) {
  5843. GGML_ASSERT(ncols % QK5_1 == 0);
  5844. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5845. const sycl::range<3> block_nums(1, 1, block_num_y);
  5846. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5847. {
  5848. stream->submit([&](sycl::handler &cgh) {
  5849. cgh.parallel_for(
  5850. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5851. [=](sycl::nd_item<3> item_ct1)
  5852. [[intel::reqd_sub_group_size(32)]] {
  5853. mul_mat_vec_q<QK5_1, QI5_1, block_q5_1,
  5854. VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>(
  5855. vx, vy, dst, ncols, nrows, item_ct1);
  5856. });
  5857. });
  5858. }
  5859. }
  5860. static void mul_mat_vec_q8_0_q8_1_sycl(const void *vx, const void *vy,
  5861. float *dst, const int ncols,
  5862. const int nrows,
  5863. queue_ptr stream) {
  5864. GGML_ASSERT(ncols % QK8_0 == 0);
  5865. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5866. const sycl::range<3> block_nums(1, 1, block_num_y);
  5867. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5868. {
  5869. stream->submit([&](sycl::handler &cgh) {
  5870. cgh.parallel_for(
  5871. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5872. [=](sycl::nd_item<3> item_ct1)
  5873. [[intel::reqd_sub_group_size(32)]] {
  5874. mul_mat_vec_q<QK8_0, QI8_0, block_q8_0,
  5875. VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>(
  5876. vx, vy, dst, ncols, nrows, item_ct1);
  5877. });
  5878. });
  5879. }
  5880. }
  5881. static void mul_mat_vec_q2_K_q8_1_sycl(const void *vx, const void *vy,
  5882. float *dst, const int ncols,
  5883. const int nrows,
  5884. queue_ptr stream) {
  5885. GGML_ASSERT(ncols % QK_K == 0);
  5886. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5887. const sycl::range<3> block_nums(1, 1, block_num_y);
  5888. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5889. {
  5890. stream->submit([&](sycl::handler &cgh) {
  5891. cgh.parallel_for(
  5892. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5893. [=](sycl::nd_item<3> item_ct1)
  5894. [[intel::reqd_sub_group_size(32)]] {
  5895. mul_mat_vec_q<QK_K, QI2_K, block_q2_K,
  5896. VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>(
  5897. vx, vy, dst, ncols, nrows, item_ct1);
  5898. });
  5899. });
  5900. }
  5901. }
  5902. static void mul_mat_vec_q3_K_q8_1_sycl(const void *vx, const void *vy,
  5903. float *dst, const int ncols,
  5904. const int nrows,
  5905. queue_ptr stream) {
  5906. GGML_ASSERT(ncols % QK_K == 0);
  5907. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5908. const sycl::range<3> block_nums(1, 1, block_num_y);
  5909. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5910. {
  5911. stream->submit([&](sycl::handler &cgh) {
  5912. cgh.parallel_for(
  5913. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5914. [=](sycl::nd_item<3> item_ct1)
  5915. [[intel::reqd_sub_group_size(32)]] {
  5916. mul_mat_vec_q<QK_K, QI3_K, block_q3_K,
  5917. VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>(
  5918. vx, vy, dst, ncols, nrows, item_ct1);
  5919. });
  5920. });
  5921. }
  5922. }
  5923. static void mul_mat_vec_q4_K_q8_1_sycl(const void *vx, const void *vy,
  5924. float *dst, const int ncols,
  5925. const int nrows,
  5926. queue_ptr stream) {
  5927. GGML_ASSERT(ncols % QK_K == 0);
  5928. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5929. const sycl::range<3> block_nums(1, 1, block_num_y);
  5930. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5931. {
  5932. stream->submit([&](sycl::handler &cgh) {
  5933. cgh.parallel_for(
  5934. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5935. [=](sycl::nd_item<3> item_ct1)
  5936. [[intel::reqd_sub_group_size(32)]] {
  5937. mul_mat_vec_q<QK_K, QI4_K, block_q4_K,
  5938. VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>(
  5939. vx, vy, dst, ncols, nrows, item_ct1);
  5940. });
  5941. });
  5942. }
  5943. }
  5944. static void mul_mat_vec_q5_K_q8_1_sycl(const void *vx, const void *vy,
  5945. float *dst, const int ncols,
  5946. const int nrows,
  5947. queue_ptr stream) {
  5948. GGML_ASSERT(ncols % QK_K == 0);
  5949. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5950. const sycl::range<3> block_nums(1, 1, block_num_y);
  5951. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5952. {
  5953. stream->submit([&](sycl::handler &cgh) {
  5954. cgh.parallel_for(
  5955. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5956. [=](sycl::nd_item<3> item_ct1)
  5957. [[intel::reqd_sub_group_size(32)]] {
  5958. mul_mat_vec_q<QK_K, QI5_K, block_q5_K,
  5959. VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>(
  5960. vx, vy, dst, ncols, nrows, item_ct1);
  5961. });
  5962. });
  5963. }
  5964. }
  5965. static void mul_mat_vec_q6_K_q8_1_sycl(const void *vx, const void *vy,
  5966. float *dst, const int ncols,
  5967. const int nrows,
  5968. queue_ptr stream) {
  5969. GGML_ASSERT(ncols % QK_K == 0);
  5970. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5971. const sycl::range<3> block_nums(1, 1, block_num_y);
  5972. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5973. {
  5974. stream->submit([&](sycl::handler &cgh) {
  5975. cgh.parallel_for(
  5976. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5977. [=](sycl::nd_item<3> item_ct1)
  5978. [[intel::reqd_sub_group_size(32)]] {
  5979. mul_mat_vec_q<QK_K, QI6_K, block_q6_K,
  5980. VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>(
  5981. vx, vy, dst, ncols, nrows, item_ct1);
  5982. });
  5983. });
  5984. }
  5985. }
  5986. static void mul_mat_vec_iq2_xxs_q8_1_sycl(const void *vx, const void *vy,
  5987. float *dst, const int ncols,
  5988. const int nrows,
  5989. queue_ptr stream) {
  5990. GGML_ASSERT(ncols % QK_K == 0);
  5991. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  5992. const sycl::range<3> block_nums(1, 1, block_num_y);
  5993. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  5994. {
  5995. stream->submit([&](sycl::handler &cgh) {
  5996. cgh.parallel_for(
  5997. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  5998. [=](sycl::nd_item<3> item_ct1)
  5999. [[intel::reqd_sub_group_size(32)]] {
  6000. mul_mat_vec_q_iq2_xxs_q8_1<QK_K, QI2_XXS, block_iq2_xxs, 1>(
  6001. vx, vy, dst, ncols, nrows, item_ct1);
  6002. });
  6003. });
  6004. }
  6005. }
  6006. static void mul_mat_vec_iq2_xs_q8_1_sycl(const void *vx, const void *vy,
  6007. float *dst, const int ncols,
  6008. const int nrows,
  6009. queue_ptr stream) {
  6010. GGML_ASSERT(ncols % QK_K == 0);
  6011. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  6012. const sycl::range<3> block_nums(1, 1, block_num_y);
  6013. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  6014. {
  6015. stream->submit([&](sycl::handler &cgh) {
  6016. auto iq2xs_grid_ptr_ct1 = &iq2xs_grid[0];
  6017. auto ksigns64_ptr_ct1 = &ksigns64[0];
  6018. cgh.parallel_for(
  6019. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6020. [=](sycl::nd_item<3> item_ct1)
  6021. [[intel::reqd_sub_group_size(32)]] {
  6022. mul_mat_vec_q_iq2_xs_q8_1<QK_K, QI2_XS, block_iq2_xs, 1>(
  6023. vx, vy, dst, ncols, nrows, item_ct1);
  6024. });
  6025. });
  6026. }
  6027. }
  6028. static void mul_mat_vec_iq2_s_q8_1_sycl(const void *vx, const void *vy,
  6029. float *dst, const int ncols,
  6030. const int nrows,
  6031. queue_ptr stream) {
  6032. GGML_ASSERT(ncols % QK_K == 0);
  6033. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  6034. const sycl::range<3> block_nums(1, 1, block_num_y);
  6035. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  6036. {
  6037. stream->submit([&](sycl::handler &cgh) {
  6038. auto iq2xs_grid_ptr_ct1 = &iq2xs_grid[0];
  6039. auto ksigns64_ptr_ct1 = &ksigns64[0];
  6040. cgh.parallel_for(
  6041. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6042. [=](sycl::nd_item<3> item_ct1)
  6043. [[intel::reqd_sub_group_size(32)]] {
  6044. mul_mat_vec_q_iq2_s_q8_1<QK_K, QI2_S, block_iq2_s, 1>(
  6045. vx, vy, dst, ncols, nrows, item_ct1);
  6046. });
  6047. });
  6048. }
  6049. }
  6050. static void mul_mat_vec_iq3_xxs_q8_1_sycl(const void *vx, const void *vy,
  6051. float *dst, const int ncols,
  6052. const int nrows,
  6053. queue_ptr stream) {
  6054. GGML_ASSERT(ncols % QK_K == 0);
  6055. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  6056. const sycl::range<3> block_nums(1, 1, block_num_y);
  6057. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  6058. {
  6059. stream->submit([&](sycl::handler &cgh) {
  6060. auto iq3xxs_grid_ptr_ct1 = &iq3xxs_grid[0];
  6061. auto ksigns64_ptr_ct1 = &ksigns64[0];
  6062. cgh.parallel_for(
  6063. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6064. [=](sycl::nd_item<3> item_ct1)
  6065. [[intel::reqd_sub_group_size(32)]] {
  6066. mul_mat_vec_q_iq3_xxs_q8_1<QK_K, QI3_XXS, block_iq3_xxs, 1>(
  6067. vx, vy, dst, ncols, nrows, item_ct1);
  6068. });
  6069. });
  6070. }
  6071. }
  6072. static void mul_mat_vec_iq3_s_q8_1_sycl(const void *vx, const void *vy,
  6073. float *dst, const int ncols,
  6074. const int nrows,
  6075. queue_ptr stream) {
  6076. GGML_ASSERT(ncols % QK_K == 0);
  6077. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  6078. const sycl::range<3> block_nums(1, 1, block_num_y);
  6079. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  6080. {
  6081. stream->submit([&](sycl::handler &cgh) {
  6082. auto iq3s_grid_ptr_ct1 = &iq3s_grid[0];
  6083. cgh.parallel_for(
  6084. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6085. [=](sycl::nd_item<3> item_ct1)
  6086. [[intel::reqd_sub_group_size(32)]] {
  6087. mul_mat_vec_q_iq3_s_q8_1<QK_K, QI3_XS, block_iq3_s, 1>(
  6088. vx, vy, dst, ncols, nrows, item_ct1);
  6089. });
  6090. });
  6091. }
  6092. }
  6093. static void mul_mat_vec_iq1_s_q8_1_sycl(const void *vx, const void *vy,
  6094. float *dst, const int ncols,
  6095. const int nrows,
  6096. queue_ptr stream) {
  6097. GGML_ASSERT(ncols % QK_K == 0);
  6098. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  6099. const sycl::range<3> block_nums(1, 1, block_num_y);
  6100. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  6101. {
  6102. stream->submit([&](sycl::handler &cgh) {
  6103. auto iq1s_grid_ptr_ct1 = &iq1s_grid_gpu[0];
  6104. auto ksigns64_ptr_ct1 = &ksigns64[0];
  6105. cgh.parallel_for(
  6106. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6107. [=](sycl::nd_item<3> item_ct1)
  6108. [[intel::reqd_sub_group_size(32)]] {
  6109. mul_mat_vec_q_iq1_s_q8_1<QK_K, QI1_S, block_iq1_s, 1>(
  6110. vx, vy, dst, ncols, nrows, item_ct1);
  6111. });
  6112. });
  6113. }
  6114. }
  6115. static void mul_mat_vec_iq1_m_q8_1_sycl(const void *vx, const void *vy,
  6116. float *dst, const int ncols,
  6117. const int nrows,
  6118. queue_ptr stream) {
  6119. GGML_ASSERT(ncols % QK_K == 0);
  6120. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  6121. const sycl::range<3> block_nums(1, 1, block_num_y);
  6122. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  6123. {
  6124. stream->submit([&](sycl::handler &cgh) {
  6125. cgh.parallel_for(
  6126. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6127. [=](sycl::nd_item<3> item_ct1)
  6128. [[intel::reqd_sub_group_size(32)]] {
  6129. mul_mat_vec_q_iq1_m_q8_1<QK_K, QI1_S, block_iq1_m, 1>(
  6130. vx, vy, dst, ncols, nrows, item_ct1);
  6131. });
  6132. });
  6133. }
  6134. }
  6135. static void mul_mat_vec_iq4_nl_q8_1_sycl(const void *vx, const void *vy,
  6136. float *dst, const int ncols,
  6137. const int nrows,
  6138. queue_ptr stream) {
  6139. GGML_ASSERT(ncols % QK4_NL == 0);
  6140. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  6141. const sycl::range<3> block_nums(1, 1, block_num_y);
  6142. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  6143. {
  6144. stream->submit([&](sycl::handler &cgh) {
  6145. cgh.parallel_for(
  6146. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6147. [=](sycl::nd_item<3> item_ct1)
  6148. [[intel::reqd_sub_group_size(32)]] {
  6149. mul_mat_vec_q_iq4_nl_q8_1<QK4_NL, QI4_NL, block_iq4_nl, 1>(
  6150. vx, vy, dst, ncols, nrows, item_ct1);
  6151. });
  6152. });
  6153. }
  6154. }
  6155. static void mul_mat_vec_iq4_xs_q8_1_sycl(const void *vx, const void *vy,
  6156. float *dst, const int ncols,
  6157. const int nrows,
  6158. queue_ptr stream) {
  6159. GGML_ASSERT(ncols % QK_K == 0);
  6160. const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
  6161. const sycl::range<3> block_nums(1, 1, block_num_y);
  6162. const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
  6163. {
  6164. stream->submit([&](sycl::handler &cgh) {
  6165. cgh.parallel_for(
  6166. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6167. [=](sycl::nd_item<3> item_ct1)
  6168. [[intel::reqd_sub_group_size(32)]] {
  6169. mul_mat_vec_q_iq4_xs_q8_1<QK_K, QI4_XS, block_iq4_xs, 1>(
  6170. vx, vy, dst, ncols, nrows, item_ct1);
  6171. });
  6172. });
  6173. }
  6174. }
  6175. static void ggml_mul_mat_q4_0_q8_1_sycl(const void *vx, const void *vy,
  6176. float *dst, const int ncols_x,
  6177. const int nrows_x, const int ncols_y,
  6178. const int nrows_y, const int nrows_dst,
  6179. queue_ptr stream) try {
  6180. int id;
  6181. SYCL_CHECK(
  6182. CHECK_TRY_ERROR(id = get_current_device_id()));
  6183. const int compute_capability = ggml_sycl_info().devices[id].cc;
  6184. int mmq_x, mmq_y, nwarps;
  6185. if (compute_capability >= VER_GEN13) {
  6186. mmq_x = MMQ_X_Q4_0_RDNA2;
  6187. mmq_y = MMQ_Y_Q4_0_RDNA2;
  6188. nwarps = NWARPS_Q4_0_RDNA2;
  6189. } else if (compute_capability >= VER_GEN12) {
  6190. mmq_x = MMQ_X_Q4_0_RDNA1;
  6191. mmq_y = MMQ_Y_Q4_0_RDNA1;
  6192. nwarps = NWARPS_Q4_0_RDNA1;
  6193. } else if (compute_capability >= VER_GEN9) {
  6194. mmq_x = MMQ_X_Q4_0_AMPERE;
  6195. mmq_y = MMQ_Y_Q4_0_AMPERE;
  6196. nwarps = NWARPS_Q4_0_AMPERE;
  6197. } else if (compute_capability >= VER_4VEC) {
  6198. mmq_x = MMQ_X_Q4_0_PASCAL;
  6199. mmq_y = MMQ_Y_Q4_0_PASCAL;
  6200. nwarps = NWARPS_Q4_0_PASCAL;
  6201. } else {
  6202. GGML_ASSERT(false);
  6203. }
  6204. const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
  6205. const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
  6206. const sycl::range<3> block_nums(1, block_num_y, block_num_x);
  6207. const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
  6208. if (nrows_x % mmq_y == 0) {
  6209. const bool need_check = false;
  6210. /*
  6211. DPCT1049:20: The work-group size passed to the SYCL kernel may exceed
  6212. the limit. To get the device limit, query
  6213. info::device::max_work_group_size. Adjust the work-group size if needed.
  6214. */
  6215. {
  6216. dpct::has_capability_or_fail(stream->get_device(),
  6217. {sycl::aspect::fp16});
  6218. stream->submit([&](sycl::handler &cgh) {
  6219. sycl::local_accessor<int, 1> tile_x_qs_q4_0_acc_ct1(
  6220. sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
  6221. sycl::local_accessor<float, 1> tile_x_d_q4_0_acc_ct1(
  6222. sycl::range<1>(mmq_y * (WARP_SIZE / QI4_0) + mmq_y / QI4_0),
  6223. cgh);
  6224. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6225. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6226. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6227. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6228. cgh.parallel_for(
  6229. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6230. [=](sycl::nd_item<3> item_ct1) {
  6231. mul_mat_q4_0<need_check>(
  6232. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6233. nrows_dst, item_ct1,
  6234. tile_x_qs_q4_0_acc_ct1.get_pointer(),
  6235. tile_x_d_q4_0_acc_ct1.get_pointer(),
  6236. tile_y_qs_acc_ct1.get_pointer(),
  6237. tile_y_ds_acc_ct1.get_pointer());
  6238. });
  6239. });
  6240. }
  6241. } else {
  6242. const bool need_check = true;
  6243. /*
  6244. DPCT1049:21: The work-group size passed to the SYCL kernel may exceed
  6245. the limit. To get the device limit, query
  6246. info::device::max_work_group_size. Adjust the work-group size if needed.
  6247. */
  6248. {
  6249. dpct::has_capability_or_fail(stream->get_device(),
  6250. {sycl::aspect::fp16});
  6251. stream->submit([&](sycl::handler &cgh) {
  6252. sycl::local_accessor<int, 1> tile_x_qs_q4_0_acc_ct1(
  6253. sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
  6254. sycl::local_accessor<float, 1> tile_x_d_q4_0_acc_ct1(
  6255. sycl::range<1>(mmq_y * (WARP_SIZE / QI4_0) + mmq_y / QI4_0),
  6256. cgh);
  6257. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6258. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6259. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6260. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6261. cgh.parallel_for(
  6262. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6263. [=](sycl::nd_item<3> item_ct1) {
  6264. mul_mat_q4_0<need_check>(
  6265. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6266. nrows_dst, item_ct1,
  6267. tile_x_qs_q4_0_acc_ct1.get_pointer(),
  6268. tile_x_d_q4_0_acc_ct1.get_pointer(),
  6269. tile_y_qs_acc_ct1.get_pointer(),
  6270. tile_y_ds_acc_ct1.get_pointer());
  6271. });
  6272. });
  6273. }
  6274. }
  6275. }
  6276. catch (sycl::exception const &exc) {
  6277. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  6278. << ", line:" << __LINE__ << std::endl;
  6279. std::exit(1);
  6280. }
  6281. static void ggml_mul_mat_q4_1_q8_1_sycl(const void *vx, const void *vy,
  6282. float *dst, const int ncols_x,
  6283. const int nrows_x, const int ncols_y,
  6284. const int nrows_y, const int nrows_dst,
  6285. queue_ptr stream) try {
  6286. int id;
  6287. SYCL_CHECK(
  6288. CHECK_TRY_ERROR(id = get_current_device_id()));
  6289. const int compute_capability = ggml_sycl_info().devices[id].cc;
  6290. int mmq_x, mmq_y, nwarps;
  6291. if (compute_capability >= VER_GEN13) {
  6292. mmq_x = MMQ_X_Q4_1_RDNA2;
  6293. mmq_y = MMQ_Y_Q4_1_RDNA2;
  6294. nwarps = NWARPS_Q4_1_RDNA2;
  6295. } else if (compute_capability >= VER_GEN12) {
  6296. mmq_x = MMQ_X_Q4_1_RDNA1;
  6297. mmq_y = MMQ_Y_Q4_1_RDNA1;
  6298. nwarps = NWARPS_Q4_1_RDNA1;
  6299. } else if (compute_capability >= VER_GEN9) {
  6300. mmq_x = MMQ_X_Q4_1_AMPERE;
  6301. mmq_y = MMQ_Y_Q4_1_AMPERE;
  6302. nwarps = NWARPS_Q4_1_AMPERE;
  6303. } else if (compute_capability >= VER_4VEC) {
  6304. mmq_x = MMQ_X_Q4_1_PASCAL;
  6305. mmq_y = MMQ_Y_Q4_1_PASCAL;
  6306. nwarps = NWARPS_Q4_1_PASCAL;
  6307. } else {
  6308. GGML_ASSERT(false);
  6309. }
  6310. const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
  6311. const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
  6312. const sycl::range<3> block_nums(1, block_num_y, block_num_x);
  6313. const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
  6314. if (nrows_x % mmq_y == 0) {
  6315. const bool need_check = false;
  6316. /*
  6317. DPCT1049:22: The work-group size passed to the SYCL kernel may exceed
  6318. the limit. To get the device limit, query
  6319. info::device::max_work_group_size. Adjust the work-group size if needed.
  6320. */
  6321. {
  6322. dpct::has_capability_or_fail(stream->get_device(),
  6323. {sycl::aspect::fp16});
  6324. stream->submit([&](sycl::handler &cgh) {
  6325. sycl::local_accessor<int, 1> tile_x_qs_q4_1_acc_ct1(
  6326. sycl::range<1>(mmq_y * (WARP_SIZE) + +mmq_y), cgh);
  6327. sycl::local_accessor<sycl::half2, 1> tile_x_dm_q4_1_acc_ct1(
  6328. sycl::range<1>(mmq_y * (WARP_SIZE / QI4_1) + mmq_y / QI4_1),
  6329. cgh);
  6330. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6331. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6332. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6333. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6334. cgh.parallel_for(
  6335. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6336. [=](sycl::nd_item<3> item_ct1) {
  6337. mul_mat_q4_1<need_check>(
  6338. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6339. nrows_dst, item_ct1,
  6340. tile_x_qs_q4_1_acc_ct1.get_pointer(),
  6341. tile_x_dm_q4_1_acc_ct1.get_pointer(),
  6342. tile_y_qs_acc_ct1.get_pointer(),
  6343. tile_y_ds_acc_ct1.get_pointer());
  6344. });
  6345. });
  6346. }
  6347. } else {
  6348. const bool need_check = true;
  6349. /*
  6350. DPCT1049:23: The work-group size passed to the SYCL kernel may exceed
  6351. the limit. To get the device limit, query
  6352. info::device::max_work_group_size. Adjust the work-group size if needed.
  6353. */
  6354. {
  6355. dpct::has_capability_or_fail(stream->get_device(),
  6356. {sycl::aspect::fp16});
  6357. stream->submit([&](sycl::handler &cgh) {
  6358. sycl::local_accessor<int, 1> tile_x_qs_q4_1_acc_ct1(
  6359. sycl::range<1>(mmq_y * (WARP_SIZE) + +mmq_y), cgh);
  6360. sycl::local_accessor<sycl::half2, 1> tile_x_dm_q4_1_acc_ct1(
  6361. sycl::range<1>(mmq_y * (WARP_SIZE / QI4_1) + mmq_y / QI4_1),
  6362. cgh);
  6363. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6364. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6365. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6366. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6367. cgh.parallel_for(
  6368. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6369. [=](sycl::nd_item<3> item_ct1) {
  6370. mul_mat_q4_1<need_check>(
  6371. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6372. nrows_dst, item_ct1,
  6373. tile_x_qs_q4_1_acc_ct1.get_pointer(),
  6374. tile_x_dm_q4_1_acc_ct1.get_pointer(),
  6375. tile_y_qs_acc_ct1.get_pointer(),
  6376. tile_y_ds_acc_ct1.get_pointer());
  6377. });
  6378. });
  6379. }
  6380. }
  6381. }
  6382. catch (sycl::exception const &exc) {
  6383. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  6384. << ", line:" << __LINE__ << std::endl;
  6385. std::exit(1);
  6386. }
  6387. static void ggml_mul_mat_q5_0_q8_1_sycl(const void *vx, const void *vy,
  6388. float *dst, const int ncols_x,
  6389. const int nrows_x, const int ncols_y,
  6390. const int nrows_y, const int nrows_dst,
  6391. queue_ptr stream) try {
  6392. int id;
  6393. SYCL_CHECK(
  6394. CHECK_TRY_ERROR(id = get_current_device_id()));
  6395. const int compute_capability = ggml_sycl_info().devices[id].cc;
  6396. int mmq_x, mmq_y, nwarps;
  6397. if (compute_capability >= VER_GEN13) {
  6398. mmq_x = MMQ_X_Q5_0_RDNA2;
  6399. mmq_y = MMQ_Y_Q5_0_RDNA2;
  6400. nwarps = NWARPS_Q5_0_RDNA2;
  6401. } else if (compute_capability >= VER_GEN12) {
  6402. mmq_x = MMQ_X_Q5_0_RDNA1;
  6403. mmq_y = MMQ_Y_Q5_0_RDNA1;
  6404. nwarps = NWARPS_Q5_0_RDNA1;
  6405. } else if (compute_capability >= VER_GEN9) {
  6406. mmq_x = MMQ_X_Q5_0_AMPERE;
  6407. mmq_y = MMQ_Y_Q5_0_AMPERE;
  6408. nwarps = NWARPS_Q5_0_AMPERE;
  6409. } else if (compute_capability >= VER_4VEC) {
  6410. mmq_x = MMQ_X_Q5_0_PASCAL;
  6411. mmq_y = MMQ_Y_Q5_0_PASCAL;
  6412. nwarps = NWARPS_Q5_0_PASCAL;
  6413. } else {
  6414. GGML_ASSERT(false);
  6415. }
  6416. const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
  6417. const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
  6418. const sycl::range<3> block_nums(1, block_num_y, block_num_x);
  6419. const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
  6420. if (nrows_x % mmq_y == 0) {
  6421. const bool need_check = false;
  6422. /*
  6423. DPCT1049:24: The work-group size passed to the SYCL kernel may exceed
  6424. the limit. To get the device limit, query
  6425. info::device::max_work_group_size. Adjust the work-group size if needed.
  6426. */
  6427. {
  6428. dpct::has_capability_or_fail(stream->get_device(),
  6429. {sycl::aspect::fp16});
  6430. stream->submit([&](sycl::handler &cgh) {
  6431. sycl::local_accessor<int, 1> tile_x_ql_q5_0_acc_ct1(
  6432. sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
  6433. sycl::local_accessor<float, 1> tile_x_d_q5_0_acc_ct1(
  6434. sycl::range<1>(mmq_y * (WARP_SIZE / QI5_0) + mmq_y / QI5_0),
  6435. cgh);
  6436. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6437. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6438. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6439. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6440. cgh.parallel_for(
  6441. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6442. [=](sycl::nd_item<3> item_ct1) {
  6443. mul_mat_q5_0<need_check>(
  6444. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6445. nrows_dst, item_ct1,
  6446. tile_x_ql_q5_0_acc_ct1.get_pointer(),
  6447. tile_x_d_q5_0_acc_ct1.get_pointer(),
  6448. tile_y_qs_acc_ct1.get_pointer(),
  6449. tile_y_ds_acc_ct1.get_pointer());
  6450. });
  6451. });
  6452. }
  6453. } else {
  6454. const bool need_check = true;
  6455. /*
  6456. DPCT1049:25: The work-group size passed to the SYCL kernel may exceed
  6457. the limit. To get the device limit, query
  6458. info::device::max_work_group_size. Adjust the work-group size if needed.
  6459. */
  6460. {
  6461. dpct::has_capability_or_fail(stream->get_device(),
  6462. {sycl::aspect::fp16});
  6463. stream->submit([&](sycl::handler &cgh) {
  6464. sycl::local_accessor<int, 1> tile_x_ql_q5_0_acc_ct1(
  6465. sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
  6466. sycl::local_accessor<float, 1> tile_x_d_q5_0_acc_ct1(
  6467. sycl::range<1>(mmq_y * (WARP_SIZE / QI5_0) + mmq_y / QI5_0),
  6468. cgh);
  6469. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6470. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6471. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6472. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6473. cgh.parallel_for(
  6474. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6475. [=](sycl::nd_item<3> item_ct1) {
  6476. mul_mat_q5_0<need_check>(
  6477. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6478. nrows_dst, item_ct1,
  6479. tile_x_ql_q5_0_acc_ct1.get_pointer(),
  6480. tile_x_d_q5_0_acc_ct1.get_pointer(),
  6481. tile_y_qs_acc_ct1.get_pointer(),
  6482. tile_y_ds_acc_ct1.get_pointer());
  6483. });
  6484. });
  6485. }
  6486. }
  6487. }
  6488. catch (sycl::exception const &exc) {
  6489. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  6490. << ", line:" << __LINE__ << std::endl;
  6491. std::exit(1);
  6492. }
  6493. static void ggml_mul_mat_q5_1_q8_1_sycl(const void *vx, const void *vy,
  6494. float *dst, const int ncols_x,
  6495. const int nrows_x, const int ncols_y,
  6496. const int nrows_y, const int nrows_dst,
  6497. queue_ptr stream) try {
  6498. int id;
  6499. SYCL_CHECK(
  6500. CHECK_TRY_ERROR(id = get_current_device_id()));
  6501. const int compute_capability = ggml_sycl_info().devices[id].cc;
  6502. int mmq_x, mmq_y, nwarps;
  6503. if (compute_capability >= VER_GEN13) {
  6504. mmq_x = MMQ_X_Q5_1_RDNA2;
  6505. mmq_y = MMQ_Y_Q5_1_RDNA2;
  6506. nwarps = NWARPS_Q5_1_RDNA2;
  6507. } else if (compute_capability >= VER_GEN12) {
  6508. mmq_x = MMQ_X_Q5_1_RDNA1;
  6509. mmq_y = MMQ_Y_Q5_1_RDNA1;
  6510. nwarps = NWARPS_Q5_1_RDNA1;
  6511. } else if (compute_capability >= VER_GEN9) {
  6512. mmq_x = MMQ_X_Q5_1_AMPERE;
  6513. mmq_y = MMQ_Y_Q5_1_AMPERE;
  6514. nwarps = NWARPS_Q5_1_AMPERE;
  6515. } else if (compute_capability >= VER_4VEC) {
  6516. mmq_x = MMQ_X_Q5_1_PASCAL;
  6517. mmq_y = MMQ_Y_Q5_1_PASCAL;
  6518. nwarps = NWARPS_Q5_1_PASCAL;
  6519. } else {
  6520. GGML_ASSERT(false);
  6521. }
  6522. const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
  6523. const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
  6524. const sycl::range<3> block_nums(1, block_num_y, block_num_x);
  6525. const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
  6526. if (nrows_x % mmq_y == 0) {
  6527. const bool need_check = false;
  6528. /*
  6529. DPCT1049:26: The work-group size passed to the SYCL kernel may exceed
  6530. the limit. To get the device limit, query
  6531. info::device::max_work_group_size. Adjust the work-group size if needed.
  6532. */
  6533. {
  6534. dpct::has_capability_or_fail(stream->get_device(),
  6535. {sycl::aspect::fp16});
  6536. stream->submit([&](sycl::handler &cgh) {
  6537. sycl::local_accessor<int, 1> tile_x_ql_q5_1_acc_ct1(
  6538. sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
  6539. sycl::local_accessor<sycl::half2, 1> tile_x_dm_q5_1_acc_ct1(
  6540. sycl::range<1>(mmq_y * (WARP_SIZE / QI5_1) + mmq_y / QI5_1),
  6541. cgh);
  6542. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6543. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6544. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6545. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6546. cgh.parallel_for(
  6547. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6548. [=](sycl::nd_item<3> item_ct1) {
  6549. mul_mat_q5_1<need_check>(
  6550. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6551. nrows_dst, item_ct1,
  6552. tile_x_ql_q5_1_acc_ct1.get_pointer(),
  6553. tile_x_dm_q5_1_acc_ct1.get_pointer(),
  6554. tile_y_qs_acc_ct1.get_pointer(),
  6555. tile_y_ds_acc_ct1.get_pointer());
  6556. });
  6557. });
  6558. }
  6559. } else {
  6560. const bool need_check = true;
  6561. /*
  6562. DPCT1049:27: The work-group size passed to the SYCL kernel may exceed
  6563. the limit. To get the device limit, query
  6564. info::device::max_work_group_size. Adjust the work-group size if needed.
  6565. */
  6566. {
  6567. dpct::has_capability_or_fail(stream->get_device(),
  6568. {sycl::aspect::fp16});
  6569. stream->submit([&](sycl::handler &cgh) {
  6570. sycl::local_accessor<int, 1> tile_x_ql_q5_1_acc_ct1(
  6571. sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
  6572. sycl::local_accessor<sycl::half2, 1> tile_x_dm_q5_1_acc_ct1(
  6573. sycl::range<1>(mmq_y * (WARP_SIZE / QI5_1) + mmq_y / QI5_1),
  6574. cgh);
  6575. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6576. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6577. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6578. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6579. cgh.parallel_for(
  6580. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6581. [=](sycl::nd_item<3> item_ct1) {
  6582. mul_mat_q5_1<need_check>(
  6583. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6584. nrows_dst, item_ct1,
  6585. tile_x_ql_q5_1_acc_ct1.get_pointer(),
  6586. tile_x_dm_q5_1_acc_ct1.get_pointer(),
  6587. tile_y_qs_acc_ct1.get_pointer(),
  6588. tile_y_ds_acc_ct1.get_pointer());
  6589. });
  6590. });
  6591. }
  6592. }
  6593. }
  6594. catch (sycl::exception const &exc) {
  6595. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  6596. << ", line:" << __LINE__ << std::endl;
  6597. std::exit(1);
  6598. }
  6599. static void ggml_mul_mat_q8_0_q8_1_sycl(const void *vx, const void *vy,
  6600. float *dst, const int ncols_x,
  6601. const int nrows_x, const int ncols_y,
  6602. const int nrows_y, const int nrows_dst,
  6603. queue_ptr stream) try {
  6604. int id;
  6605. SYCL_CHECK(
  6606. CHECK_TRY_ERROR(id = get_current_device_id()));
  6607. const int compute_capability = ggml_sycl_info().devices[id].cc;
  6608. int mmq_x, mmq_y, nwarps;
  6609. if (compute_capability >= VER_GEN13) {
  6610. mmq_x = MMQ_X_Q8_0_RDNA2;
  6611. mmq_y = MMQ_Y_Q8_0_RDNA2;
  6612. nwarps = NWARPS_Q8_0_RDNA2;
  6613. } else if (compute_capability >= VER_GEN12) {
  6614. mmq_x = MMQ_X_Q8_0_RDNA1;
  6615. mmq_y = MMQ_Y_Q8_0_RDNA1;
  6616. nwarps = NWARPS_Q8_0_RDNA1;
  6617. } else if (compute_capability >= VER_GEN9) {
  6618. mmq_x = MMQ_X_Q8_0_AMPERE;
  6619. mmq_y = MMQ_Y_Q8_0_AMPERE;
  6620. nwarps = NWARPS_Q8_0_AMPERE;
  6621. } else if (compute_capability >= VER_4VEC) {
  6622. mmq_x = MMQ_X_Q8_0_PASCAL;
  6623. mmq_y = MMQ_Y_Q8_0_PASCAL;
  6624. nwarps = NWARPS_Q8_0_PASCAL;
  6625. } else {
  6626. GGML_ASSERT(false);
  6627. }
  6628. const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
  6629. const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
  6630. const sycl::range<3> block_nums(1, block_num_y, block_num_x);
  6631. const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
  6632. if (nrows_x % mmq_y == 0) {
  6633. const bool need_check = false;
  6634. /*
  6635. DPCT1049:28: The work-group size passed to the SYCL kernel may exceed
  6636. the limit. To get the device limit, query
  6637. info::device::max_work_group_size. Adjust the work-group size if needed.
  6638. */
  6639. {
  6640. dpct::has_capability_or_fail(stream->get_device(),
  6641. {sycl::aspect::fp16});
  6642. stream->submit([&](sycl::handler &cgh) {
  6643. sycl::local_accessor<int, 1> tile_x_qs_q8_0_acc_ct1(
  6644. sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
  6645. sycl::local_accessor<float, 1> tile_x_d_q8_0_acc_ct1(
  6646. sycl::range<1>(mmq_y * (WARP_SIZE / QI8_0) + mmq_y / QI8_0),
  6647. cgh);
  6648. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6649. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6650. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6651. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6652. cgh.parallel_for(
  6653. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6654. [=](sycl::nd_item<3> item_ct1) {
  6655. mul_mat_q8_0<need_check>(
  6656. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6657. nrows_dst, item_ct1,
  6658. tile_x_qs_q8_0_acc_ct1.get_pointer(),
  6659. tile_x_d_q8_0_acc_ct1.get_pointer(),
  6660. tile_y_qs_acc_ct1.get_pointer(),
  6661. tile_y_ds_acc_ct1.get_pointer());
  6662. });
  6663. });
  6664. }
  6665. } else {
  6666. const bool need_check = true;
  6667. /*
  6668. DPCT1049:29: The work-group size passed to the SYCL kernel may exceed
  6669. the limit. To get the device limit, query
  6670. info::device::max_work_group_size. Adjust the work-group size if needed.
  6671. */
  6672. {
  6673. dpct::has_capability_or_fail(stream->get_device(),
  6674. {sycl::aspect::fp16});
  6675. stream->submit([&](sycl::handler &cgh) {
  6676. sycl::local_accessor<int, 1> tile_x_qs_q8_0_acc_ct1(
  6677. sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
  6678. sycl::local_accessor<float, 1> tile_x_d_q8_0_acc_ct1(
  6679. sycl::range<1>(mmq_y * (WARP_SIZE / QI8_0) + mmq_y / QI8_0),
  6680. cgh);
  6681. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6682. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6683. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6684. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6685. cgh.parallel_for(
  6686. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6687. [=](sycl::nd_item<3> item_ct1) {
  6688. mul_mat_q8_0<need_check>(
  6689. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6690. nrows_dst, item_ct1,
  6691. tile_x_qs_q8_0_acc_ct1.get_pointer(),
  6692. tile_x_d_q8_0_acc_ct1.get_pointer(),
  6693. tile_y_qs_acc_ct1.get_pointer(),
  6694. tile_y_ds_acc_ct1.get_pointer());
  6695. });
  6696. });
  6697. }
  6698. }
  6699. }
  6700. catch (sycl::exception const &exc) {
  6701. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  6702. << ", line:" << __LINE__ << std::endl;
  6703. std::exit(1);
  6704. }
  6705. static void ggml_mul_mat_q2_K_q8_1_sycl(const void *vx, const void *vy,
  6706. float *dst, const int ncols_x,
  6707. const int nrows_x, const int ncols_y,
  6708. const int nrows_y, const int nrows_dst,
  6709. queue_ptr stream) try {
  6710. int id;
  6711. SYCL_CHECK(
  6712. CHECK_TRY_ERROR(id = get_current_device_id()));
  6713. const int compute_capability = ggml_sycl_info().devices[id].cc;
  6714. int mmq_x, mmq_y, nwarps;
  6715. if (compute_capability >= VER_GEN13) {
  6716. mmq_x = MMQ_X_Q2_K_RDNA2;
  6717. mmq_y = MMQ_Y_Q2_K_RDNA2;
  6718. nwarps = NWARPS_Q2_K_RDNA2;
  6719. } else if (compute_capability >= VER_GEN12) {
  6720. mmq_x = MMQ_X_Q2_K_RDNA1;
  6721. mmq_y = MMQ_Y_Q2_K_RDNA1;
  6722. nwarps = NWARPS_Q2_K_RDNA1;
  6723. } else if (compute_capability >= VER_GEN9) {
  6724. mmq_x = MMQ_X_Q2_K_AMPERE;
  6725. mmq_y = MMQ_Y_Q2_K_AMPERE;
  6726. nwarps = NWARPS_Q2_K_AMPERE;
  6727. } else if (compute_capability >= VER_4VEC) {
  6728. mmq_x = MMQ_X_Q2_K_PASCAL;
  6729. mmq_y = MMQ_Y_Q2_K_PASCAL;
  6730. nwarps = NWARPS_Q2_K_PASCAL;
  6731. } else {
  6732. GGML_ASSERT(false);
  6733. }
  6734. const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
  6735. const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
  6736. const sycl::range<3> block_nums(1, block_num_y, block_num_x);
  6737. const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
  6738. if (nrows_x % mmq_y == 0) {
  6739. const bool need_check = false;
  6740. /*
  6741. DPCT1049:30: The work-group size passed to the SYCL kernel may exceed
  6742. the limit. To get the device limit, query
  6743. info::device::max_work_group_size. Adjust the work-group size if needed.
  6744. */
  6745. {
  6746. dpct::has_capability_or_fail(stream->get_device(),
  6747. {sycl::aspect::fp16});
  6748. stream->submit([&](sycl::handler &cgh) {
  6749. sycl::local_accessor<int, 1> tile_x_ql_q2_K_acc_ct1(
  6750. sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
  6751. sycl::local_accessor<sycl::half2, 1> tile_x_dm_q2_K_acc_ct1(
  6752. sycl::range<1>(mmq_y * (WARP_SIZE / QI2_K) + mmq_y / QI2_K),
  6753. cgh);
  6754. sycl::local_accessor<int, 1> tile_x_sc_q2_K_acc_ct1(
  6755. sycl::range<1>(mmq_y * (WARP_SIZE / 4) + mmq_y / 4), cgh);
  6756. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6757. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6758. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6759. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6760. cgh.parallel_for(
  6761. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6762. [=](sycl::nd_item<3> item_ct1) {
  6763. mul_mat_q2_K<need_check>(
  6764. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6765. nrows_dst, item_ct1,
  6766. tile_x_ql_q2_K_acc_ct1.get_pointer(),
  6767. tile_x_dm_q2_K_acc_ct1.get_pointer(),
  6768. tile_x_sc_q2_K_acc_ct1.get_pointer(),
  6769. tile_y_qs_acc_ct1.get_pointer(),
  6770. tile_y_ds_acc_ct1.get_pointer());
  6771. });
  6772. });
  6773. }
  6774. } else {
  6775. const bool need_check = true;
  6776. /*
  6777. DPCT1049:31: The work-group size passed to the SYCL kernel may exceed
  6778. the limit. To get the device limit, query
  6779. info::device::max_work_group_size. Adjust the work-group size if needed.
  6780. */
  6781. {
  6782. dpct::has_capability_or_fail(stream->get_device(),
  6783. {sycl::aspect::fp16});
  6784. stream->submit([&](sycl::handler &cgh) {
  6785. sycl::local_accessor<int, 1> tile_x_ql_q2_K_acc_ct1(
  6786. sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
  6787. sycl::local_accessor<sycl::half2, 1> tile_x_dm_q2_K_acc_ct1(
  6788. sycl::range<1>(mmq_y * (WARP_SIZE / QI2_K) + mmq_y / QI2_K),
  6789. cgh);
  6790. sycl::local_accessor<int, 1> tile_x_sc_q2_K_acc_ct1(
  6791. sycl::range<1>(mmq_y * (WARP_SIZE / 4) + mmq_y / 4), cgh);
  6792. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6793. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6794. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6795. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6796. cgh.parallel_for(
  6797. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6798. [=](sycl::nd_item<3> item_ct1) {
  6799. mul_mat_q2_K<need_check>(
  6800. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6801. nrows_dst, item_ct1,
  6802. tile_x_ql_q2_K_acc_ct1.get_pointer(),
  6803. tile_x_dm_q2_K_acc_ct1.get_pointer(),
  6804. tile_x_sc_q2_K_acc_ct1.get_pointer(),
  6805. tile_y_qs_acc_ct1.get_pointer(),
  6806. tile_y_ds_acc_ct1.get_pointer());
  6807. });
  6808. });
  6809. }
  6810. }
  6811. }
  6812. catch (sycl::exception const &exc) {
  6813. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  6814. << ", line:" << __LINE__ << std::endl;
  6815. std::exit(1);
  6816. }
  6817. static void ggml_mul_mat_q3_K_q8_1_sycl(const void *vx, const void *vy,
  6818. float *dst, const int ncols_x,
  6819. const int nrows_x, const int ncols_y,
  6820. const int nrows_y, const int nrows_dst,
  6821. queue_ptr stream) try {
  6822. int id;
  6823. SYCL_CHECK(
  6824. CHECK_TRY_ERROR(id = get_current_device_id()));
  6825. const int compute_capability = ggml_sycl_info().devices[id].cc;
  6826. int mmq_x, mmq_y, nwarps;
  6827. if (compute_capability >= VER_GEN13) {
  6828. mmq_x = MMQ_X_Q3_K_RDNA2;
  6829. mmq_y = MMQ_Y_Q3_K_RDNA2;
  6830. nwarps = NWARPS_Q3_K_RDNA2;
  6831. } else if (compute_capability >= VER_GEN12) {
  6832. mmq_x = MMQ_X_Q3_K_RDNA1;
  6833. mmq_y = MMQ_Y_Q3_K_RDNA1;
  6834. nwarps = NWARPS_Q3_K_RDNA1;
  6835. } else if (compute_capability >= VER_GEN9) {
  6836. mmq_x = MMQ_X_Q3_K_AMPERE;
  6837. mmq_y = MMQ_Y_Q3_K_AMPERE;
  6838. nwarps = NWARPS_Q3_K_AMPERE;
  6839. } else if (compute_capability >= VER_4VEC) {
  6840. mmq_x = MMQ_X_Q3_K_PASCAL;
  6841. mmq_y = MMQ_Y_Q3_K_PASCAL;
  6842. nwarps = NWARPS_Q3_K_PASCAL;
  6843. } else {
  6844. GGML_ASSERT(false);
  6845. }
  6846. const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
  6847. const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
  6848. const sycl::range<3> block_nums(1, block_num_y, block_num_x);
  6849. const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
  6850. if (nrows_x % mmq_y == 0) {
  6851. const bool need_check = false;
  6852. /*
  6853. DPCT1049:32: The work-group size passed to the SYCL kernel may exceed
  6854. the limit. To get the device limit, query
  6855. info::device::max_work_group_size. Adjust the work-group size if needed.
  6856. */
  6857. {
  6858. dpct::has_capability_or_fail(stream->get_device(),
  6859. {sycl::aspect::fp16});
  6860. stream->submit([&](sycl::handler &cgh) {
  6861. sycl::local_accessor<int, 1> tile_x_ql_q3_K_acc_ct1(
  6862. sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
  6863. sycl::local_accessor<sycl::half2, 1> tile_x_dm_q3_K_acc_ct1(
  6864. sycl::range<1>(mmq_y * (WARP_SIZE / QI3_K) + mmq_y / QI3_K),
  6865. cgh);
  6866. sycl::local_accessor<int, 1> tile_x_qh_q3_K_acc_ct1(
  6867. sycl::range<1>(mmq_y * (WARP_SIZE / 2) + mmq_y / 2), cgh);
  6868. sycl::local_accessor<int, 1> tile_x_sc_q3_K_acc_ct1(
  6869. sycl::range<1>(mmq_y * (WARP_SIZE / 4) + mmq_y / 4), cgh);
  6870. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6871. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6872. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6873. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6874. cgh.parallel_for(
  6875. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6876. [=](sycl::nd_item<3> item_ct1) {
  6877. mul_mat_q3_K<need_check>(
  6878. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6879. nrows_dst, item_ct1,
  6880. tile_x_ql_q3_K_acc_ct1.get_pointer(),
  6881. tile_x_dm_q3_K_acc_ct1.get_pointer(),
  6882. tile_x_qh_q3_K_acc_ct1.get_pointer(),
  6883. tile_x_sc_q3_K_acc_ct1.get_pointer(),
  6884. tile_y_qs_acc_ct1.get_pointer(),
  6885. tile_y_ds_acc_ct1.get_pointer());
  6886. });
  6887. });
  6888. }
  6889. } else {
  6890. const bool need_check = true;
  6891. /*
  6892. DPCT1049:33: The work-group size passed to the SYCL kernel may exceed
  6893. the limit. To get the device limit, query
  6894. info::device::max_work_group_size. Adjust the work-group size if needed.
  6895. */
  6896. {
  6897. dpct::has_capability_or_fail(stream->get_device(),
  6898. {sycl::aspect::fp16});
  6899. stream->submit([&](sycl::handler &cgh) {
  6900. sycl::local_accessor<int, 1> tile_x_ql_q3_K_acc_ct1(
  6901. sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
  6902. sycl::local_accessor<sycl::half2, 1> tile_x_dm_q3_K_acc_ct1(
  6903. sycl::range<1>(mmq_y * (WARP_SIZE / QI3_K) + mmq_y / QI3_K),
  6904. cgh);
  6905. sycl::local_accessor<int, 1> tile_x_qh_q3_K_acc_ct1(
  6906. sycl::range<1>(mmq_y * (WARP_SIZE / 2) + mmq_y / 2), cgh);
  6907. sycl::local_accessor<int, 1> tile_x_sc_q3_K_acc_ct1(
  6908. sycl::range<1>(mmq_y * (WARP_SIZE / 4) + mmq_y / 4), cgh);
  6909. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6910. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6911. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6912. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6913. cgh.parallel_for(
  6914. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6915. [=](sycl::nd_item<3> item_ct1) {
  6916. mul_mat_q3_K<need_check>(
  6917. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6918. nrows_dst, item_ct1,
  6919. tile_x_ql_q3_K_acc_ct1.get_pointer(),
  6920. tile_x_dm_q3_K_acc_ct1.get_pointer(),
  6921. tile_x_qh_q3_K_acc_ct1.get_pointer(),
  6922. tile_x_sc_q3_K_acc_ct1.get_pointer(),
  6923. tile_y_qs_acc_ct1.get_pointer(),
  6924. tile_y_ds_acc_ct1.get_pointer());
  6925. });
  6926. });
  6927. }
  6928. }
  6929. }
  6930. catch (sycl::exception const &exc) {
  6931. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  6932. << ", line:" << __LINE__ << std::endl;
  6933. std::exit(1);
  6934. }
  6935. static void ggml_mul_mat_q4_K_q8_1_sycl(const void *vx, const void *vy,
  6936. float *dst, const int ncols_x,
  6937. const int nrows_x, const int ncols_y,
  6938. const int nrows_y, const int nrows_dst,
  6939. queue_ptr stream) try {
  6940. int id;
  6941. SYCL_CHECK(
  6942. CHECK_TRY_ERROR(id = get_current_device_id()));
  6943. const int compute_capability = ggml_sycl_info().devices[id].cc;
  6944. int mmq_x, mmq_y, nwarps;
  6945. if (compute_capability >= VER_GEN13) {
  6946. mmq_x = MMQ_X_Q4_K_RDNA2;
  6947. mmq_y = MMQ_Y_Q4_K_RDNA2;
  6948. nwarps = NWARPS_Q4_K_RDNA2;
  6949. } else if (compute_capability >= VER_GEN12) {
  6950. mmq_x = MMQ_X_Q4_K_RDNA1;
  6951. mmq_y = MMQ_Y_Q4_K_RDNA1;
  6952. nwarps = NWARPS_Q4_K_RDNA1;
  6953. } else if (compute_capability >= VER_GEN9) {
  6954. mmq_x = MMQ_X_Q4_K_AMPERE;
  6955. mmq_y = MMQ_Y_Q4_K_AMPERE;
  6956. nwarps = NWARPS_Q4_K_AMPERE;
  6957. } else if (compute_capability >= VER_4VEC) {
  6958. mmq_x = MMQ_X_Q4_K_PASCAL;
  6959. mmq_y = MMQ_Y_Q4_K_PASCAL;
  6960. nwarps = NWARPS_Q4_K_PASCAL;
  6961. } else {
  6962. GGML_ASSERT(false);
  6963. }
  6964. const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
  6965. const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
  6966. const sycl::range<3> block_nums(1, block_num_y, block_num_x);
  6967. const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
  6968. if (nrows_x % mmq_y == 0) {
  6969. const bool need_check = false;
  6970. /*
  6971. DPCT1049:34: The work-group size passed to the SYCL kernel may exceed
  6972. the limit. To get the device limit, query
  6973. info::device::max_work_group_size. Adjust the work-group size if needed.
  6974. */
  6975. {
  6976. dpct::has_capability_or_fail(stream->get_device(),
  6977. {sycl::aspect::fp16});
  6978. stream->submit([&](sycl::handler &cgh) {
  6979. sycl::local_accessor<int, 1> tile_x_ql_q4_K_acc_ct1(
  6980. sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
  6981. sycl::local_accessor<sycl::half2, 1> tile_x_dm_q4_K_acc_ct1(
  6982. sycl::range<1>(mmq_y * (WARP_SIZE / QI4_K) + mmq_y / QI4_K),
  6983. cgh);
  6984. sycl::local_accessor<int, 1> tile_x_sc_q4_K_acc_ct1(
  6985. sycl::range<1>(mmq_y * (WARP_SIZE / 8) + mmq_y / 8), cgh);
  6986. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  6987. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  6988. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  6989. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  6990. cgh.parallel_for(
  6991. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  6992. [=](sycl::nd_item<3> item_ct1) {
  6993. mul_mat_q4_K<need_check>(
  6994. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  6995. nrows_dst, item_ct1,
  6996. tile_x_ql_q4_K_acc_ct1.get_pointer(),
  6997. tile_x_dm_q4_K_acc_ct1.get_pointer(),
  6998. tile_x_sc_q4_K_acc_ct1.get_pointer(),
  6999. tile_y_qs_acc_ct1.get_pointer(),
  7000. tile_y_ds_acc_ct1.get_pointer());
  7001. });
  7002. });
  7003. }
  7004. } else {
  7005. const bool need_check = true;
  7006. /*
  7007. DPCT1049:35: The work-group size passed to the SYCL kernel may exceed
  7008. the limit. To get the device limit, query
  7009. info::device::max_work_group_size. Adjust the work-group size if needed.
  7010. */
  7011. {
  7012. dpct::has_capability_or_fail(stream->get_device(),
  7013. {sycl::aspect::fp16});
  7014. stream->submit([&](sycl::handler &cgh) {
  7015. sycl::local_accessor<int, 1> tile_x_ql_q4_K_acc_ct1(
  7016. sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
  7017. sycl::local_accessor<sycl::half2, 1> tile_x_dm_q4_K_acc_ct1(
  7018. sycl::range<1>(mmq_y * (WARP_SIZE / QI4_K) + mmq_y / QI4_K),
  7019. cgh);
  7020. sycl::local_accessor<int, 1> tile_x_sc_q4_K_acc_ct1(
  7021. sycl::range<1>(mmq_y * (WARP_SIZE / 8) + mmq_y / 8), cgh);
  7022. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  7023. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  7024. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  7025. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  7026. cgh.parallel_for(
  7027. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7028. [=](sycl::nd_item<3> item_ct1) {
  7029. mul_mat_q4_K<need_check>(
  7030. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  7031. nrows_dst, item_ct1,
  7032. tile_x_ql_q4_K_acc_ct1.get_pointer(),
  7033. tile_x_dm_q4_K_acc_ct1.get_pointer(),
  7034. tile_x_sc_q4_K_acc_ct1.get_pointer(),
  7035. tile_y_qs_acc_ct1.get_pointer(),
  7036. tile_y_ds_acc_ct1.get_pointer());
  7037. });
  7038. });
  7039. }
  7040. }
  7041. }
  7042. catch (sycl::exception const &exc) {
  7043. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  7044. << ", line:" << __LINE__ << std::endl;
  7045. std::exit(1);
  7046. }
  7047. static void ggml_mul_mat_q5_K_q8_1_sycl(const void *vx, const void *vy,
  7048. float *dst, const int ncols_x,
  7049. const int nrows_x, const int ncols_y,
  7050. const int nrows_y, const int nrows_dst,
  7051. queue_ptr stream) try {
  7052. int id;
  7053. SYCL_CHECK(
  7054. CHECK_TRY_ERROR(id = get_current_device_id()));
  7055. const int compute_capability = ggml_sycl_info().devices[id].cc;
  7056. int mmq_x, mmq_y, nwarps;
  7057. if (compute_capability >= VER_GEN13) {
  7058. mmq_x = MMQ_X_Q5_K_RDNA2;
  7059. mmq_y = MMQ_Y_Q5_K_RDNA2;
  7060. nwarps = NWARPS_Q5_K_RDNA2;
  7061. } else if (compute_capability >= VER_GEN12) {
  7062. mmq_x = MMQ_X_Q5_K_RDNA1;
  7063. mmq_y = MMQ_Y_Q5_K_RDNA1;
  7064. nwarps = NWARPS_Q5_K_RDNA1;
  7065. } else if (compute_capability >= VER_GEN9) {
  7066. mmq_x = MMQ_X_Q5_K_AMPERE;
  7067. mmq_y = MMQ_Y_Q5_K_AMPERE;
  7068. nwarps = NWARPS_Q5_K_AMPERE;
  7069. } else if (compute_capability >= VER_4VEC) {
  7070. mmq_x = MMQ_X_Q5_K_PASCAL;
  7071. mmq_y = MMQ_Y_Q5_K_PASCAL;
  7072. nwarps = NWARPS_Q5_K_PASCAL;
  7073. } else {
  7074. GGML_ASSERT(false);
  7075. }
  7076. const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
  7077. const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
  7078. const sycl::range<3> block_nums(1, block_num_y, block_num_x);
  7079. const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
  7080. if (nrows_x % mmq_y == 0) {
  7081. const bool need_check = false;
  7082. /*
  7083. DPCT1049:36: The work-group size passed to the SYCL kernel may exceed
  7084. the limit. To get the device limit, query
  7085. info::device::max_work_group_size. Adjust the work-group size if needed.
  7086. */
  7087. {
  7088. dpct::has_capability_or_fail(stream->get_device(),
  7089. {sycl::aspect::fp16});
  7090. stream->submit([&](sycl::handler &cgh) {
  7091. sycl::local_accessor<int, 1> tile_x_ql_q5_K_acc_ct1(
  7092. sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
  7093. sycl::local_accessor<sycl::half2, 1> tile_x_dm_q5_K_acc_ct1(
  7094. sycl::range<1>(mmq_y * (WARP_SIZE / QI5_K) + mmq_y / QI5_K),
  7095. cgh);
  7096. sycl::local_accessor<int, 1> tile_x_sc_q5_K_acc_ct1(
  7097. sycl::range<1>(mmq_y * (WARP_SIZE / 8) + mmq_y / 8), cgh);
  7098. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  7099. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  7100. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  7101. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  7102. cgh.parallel_for(
  7103. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7104. [=](sycl::nd_item<3> item_ct1) {
  7105. mul_mat_q5_K<need_check>(
  7106. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  7107. nrows_dst, item_ct1,
  7108. tile_x_ql_q5_K_acc_ct1.get_pointer(),
  7109. tile_x_dm_q5_K_acc_ct1.get_pointer(),
  7110. tile_x_sc_q5_K_acc_ct1.get_pointer(),
  7111. tile_y_qs_acc_ct1.get_pointer(),
  7112. tile_y_ds_acc_ct1.get_pointer());
  7113. });
  7114. });
  7115. }
  7116. } else {
  7117. const bool need_check = true;
  7118. /*
  7119. DPCT1049:37: The work-group size passed to the SYCL kernel may exceed
  7120. the limit. To get the device limit, query
  7121. info::device::max_work_group_size. Adjust the work-group size if needed.
  7122. */
  7123. {
  7124. dpct::has_capability_or_fail(stream->get_device(),
  7125. {sycl::aspect::fp16});
  7126. stream->submit([&](sycl::handler &cgh) {
  7127. sycl::local_accessor<int, 1> tile_x_ql_q5_K_acc_ct1(
  7128. sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
  7129. sycl::local_accessor<sycl::half2, 1> tile_x_dm_q5_K_acc_ct1(
  7130. sycl::range<1>(mmq_y * (WARP_SIZE / QI5_K) + mmq_y / QI5_K),
  7131. cgh);
  7132. sycl::local_accessor<int, 1> tile_x_sc_q5_K_acc_ct1(
  7133. sycl::range<1>(mmq_y * (WARP_SIZE / 8) + mmq_y / 8), cgh);
  7134. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  7135. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  7136. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  7137. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  7138. cgh.parallel_for(
  7139. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7140. [=](sycl::nd_item<3> item_ct1) {
  7141. mul_mat_q5_K<need_check>(
  7142. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  7143. nrows_dst, item_ct1,
  7144. tile_x_ql_q5_K_acc_ct1.get_pointer(),
  7145. tile_x_dm_q5_K_acc_ct1.get_pointer(),
  7146. tile_x_sc_q5_K_acc_ct1.get_pointer(),
  7147. tile_y_qs_acc_ct1.get_pointer(),
  7148. tile_y_ds_acc_ct1.get_pointer());
  7149. });
  7150. });
  7151. }
  7152. }
  7153. }
  7154. catch (sycl::exception const &exc) {
  7155. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  7156. << ", line:" << __LINE__ << std::endl;
  7157. std::exit(1);
  7158. }
  7159. static void ggml_mul_mat_q6_K_q8_1_sycl(const void *vx, const void *vy,
  7160. float *dst, const int ncols_x,
  7161. const int nrows_x, const int ncols_y,
  7162. const int nrows_y, const int nrows_dst,
  7163. queue_ptr stream) try {
  7164. int id;
  7165. SYCL_CHECK(
  7166. CHECK_TRY_ERROR(id = get_current_device_id()));
  7167. const int compute_capability = ggml_sycl_info().devices[id].cc;
  7168. int mmq_x, mmq_y, nwarps;
  7169. if (compute_capability >= VER_GEN13) {
  7170. mmq_x = MMQ_X_Q6_K_RDNA2;
  7171. mmq_y = MMQ_Y_Q6_K_RDNA2;
  7172. nwarps = NWARPS_Q6_K_RDNA2;
  7173. } else if (compute_capability >= VER_GEN12) {
  7174. mmq_x = MMQ_X_Q6_K_RDNA1;
  7175. mmq_y = MMQ_Y_Q6_K_RDNA1;
  7176. nwarps = NWARPS_Q6_K_RDNA1;
  7177. } else if (compute_capability >= VER_GEN9) {
  7178. mmq_x = MMQ_X_Q6_K_AMPERE;
  7179. mmq_y = MMQ_Y_Q6_K_AMPERE;
  7180. nwarps = NWARPS_Q6_K_AMPERE;
  7181. } else if (compute_capability >= VER_4VEC) {
  7182. mmq_x = MMQ_X_Q6_K_PASCAL;
  7183. mmq_y = MMQ_Y_Q6_K_PASCAL;
  7184. nwarps = NWARPS_Q6_K_PASCAL;
  7185. } else {
  7186. GGML_ASSERT(false);
  7187. }
  7188. const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
  7189. const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
  7190. const sycl::range<3> block_nums(1, block_num_y, block_num_x);
  7191. const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
  7192. if (nrows_x % mmq_y == 0) {
  7193. const bool need_check = false;
  7194. /*
  7195. DPCT1049:38: The work-group size passed to the SYCL kernel may exceed
  7196. the limit. To get the device limit, query
  7197. info::device::max_work_group_size. Adjust the work-group size if needed.
  7198. */
  7199. {
  7200. dpct::has_capability_or_fail(stream->get_device(),
  7201. {sycl::aspect::fp16});
  7202. stream->submit([&](sycl::handler &cgh) {
  7203. sycl::local_accessor<int, 1> tile_x_ql_acc_ct1(
  7204. sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
  7205. sycl::local_accessor<sycl::half2, 1> tile_x_dm_acc_ct1(
  7206. sycl::range<1>(mmq_y * (WARP_SIZE / QI6_K) + mmq_y / QI6_K),
  7207. cgh);
  7208. sycl::local_accessor<int, 1> tile_x_sc_acc_ct1(
  7209. sycl::range<1>(mmq_y * (WARP_SIZE / 8) + mmq_y / 8), cgh);
  7210. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  7211. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  7212. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  7213. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  7214. cgh.parallel_for(
  7215. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7216. [=](sycl::nd_item<3> item_ct1) {
  7217. mul_mat_q6_K<need_check>(
  7218. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  7219. nrows_dst, item_ct1,
  7220. tile_x_ql_acc_ct1.get_pointer(),
  7221. tile_x_dm_acc_ct1.get_pointer(),
  7222. tile_x_sc_acc_ct1.get_pointer(),
  7223. tile_y_qs_acc_ct1.get_pointer(),
  7224. tile_y_ds_acc_ct1.get_pointer());
  7225. });
  7226. });
  7227. }
  7228. } else {
  7229. const bool need_check = true;
  7230. /*
  7231. DPCT1049:39: The work-group size passed to the SYCL kernel may exceed
  7232. the limit. To get the device limit, query
  7233. info::device::max_work_group_size. Adjust the work-group size if needed.
  7234. */
  7235. {
  7236. dpct::has_capability_or_fail(stream->get_device(),
  7237. {sycl::aspect::fp16});
  7238. stream->submit([&](sycl::handler &cgh) {
  7239. sycl::local_accessor<int, 1> tile_x_ql_acc_ct1(
  7240. sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
  7241. sycl::local_accessor<sycl::half2, 1> tile_x_dm_acc_ct1(
  7242. sycl::range<1>(mmq_y * (WARP_SIZE / QI6_K) + mmq_y / QI6_K),
  7243. cgh);
  7244. sycl::local_accessor<int, 1> tile_x_sc_acc_ct1(
  7245. sycl::range<1>(mmq_y * (WARP_SIZE / 8) + mmq_y / 8), cgh);
  7246. sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
  7247. sycl::range<1>(mmq_x * WARP_SIZE), cgh);
  7248. sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
  7249. sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
  7250. cgh.parallel_for(
  7251. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7252. [=](sycl::nd_item<3> item_ct1) {
  7253. mul_mat_q6_K<need_check>(
  7254. vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
  7255. nrows_dst, item_ct1,
  7256. tile_x_ql_acc_ct1.get_pointer(),
  7257. tile_x_dm_acc_ct1.get_pointer(),
  7258. tile_x_sc_acc_ct1.get_pointer(),
  7259. tile_y_qs_acc_ct1.get_pointer(),
  7260. tile_y_ds_acc_ct1.get_pointer());
  7261. });
  7262. });
  7263. }
  7264. }
  7265. }
  7266. catch (sycl::exception const &exc) {
  7267. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  7268. << ", line:" << __LINE__ << std::endl;
  7269. std::exit(1);
  7270. }
  7271. static void ggml_mul_mat_p021_f16_f32_sycl(const void *vx, const float *y,
  7272. float *dst, const int ncols_x,
  7273. const int nrows_x,
  7274. const int nchannels_x,
  7275. const int nchannels_y,
  7276. queue_ptr stream) {
  7277. const sycl::range<3> block_nums(nchannels_y, nrows_x, 1);
  7278. const sycl::range<3> block_dims(1, 1, WARP_SIZE);
  7279. {
  7280. dpct::has_capability_or_fail(stream->get_device(),
  7281. {sycl::aspect::fp16});
  7282. stream->parallel_for(
  7283. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7284. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  7285. mul_mat_p021_f16_f32(vx, y, dst, ncols_x, nrows_x, nchannels_x,
  7286. nchannels_y, item_ct1);
  7287. });
  7288. }
  7289. }
  7290. static void ggml_mul_mat_vec_nc_f16_f32_sycl(
  7291. const void *vx, const float *y, float *dst, const int ncols_x,
  7292. const int nrows_x, const int row_stride_x, const int nchannels_x,
  7293. const int nchannels_y, const int channel_stride_x, queue_ptr stream) {
  7294. const sycl::range<3> block_nums(nchannels_y, nrows_x, 1);
  7295. const sycl::range<3> block_dims(1, 1, WARP_SIZE);
  7296. {
  7297. dpct::has_capability_or_fail(stream->get_device(),
  7298. {sycl::aspect::fp16});
  7299. stream->parallel_for(
  7300. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7301. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  7302. mul_mat_vec_nc_f16_f32(vx, y, dst, ncols_x, nrows_x,
  7303. row_stride_x, channel_stride_x,
  7304. nchannels_y / nchannels_x, item_ct1);
  7305. });
  7306. }
  7307. }
  7308. static void
  7309. ggml_cpy_f16_f32_sycl(const char *cx, char *cdst, const int ne, const int ne00,
  7310. const int ne01, const int ne02, const int nb00,
  7311. const int nb01, const int nb02, const int nb03,
  7312. const int ne10, const int ne11, const int ne12,
  7313. const int nb10, const int nb11, const int nb12,
  7314. const int nb13, queue_ptr stream) {
  7315. const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE;
  7316. {
  7317. dpct::has_capability_or_fail(stream->get_device(),
  7318. {sycl::aspect::fp16});
  7319. stream->parallel_for(
  7320. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  7321. sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
  7322. sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)),
  7323. [=](sycl::nd_item<3> item_ct1) {
  7324. cpy_f32_f16<cpy_1_f16_f32>(cx, cdst, ne, ne00, ne01, ne02, nb00,
  7325. nb01, nb02, nb03, ne10, ne11, ne12,
  7326. nb10, nb11, nb12, nb13, item_ct1);
  7327. });
  7328. }
  7329. }
  7330. static void ggml_cpy_f32_f32_sycl(const char *cx, char *cdst, const int ne,
  7331. const int ne00, const int ne01,
  7332. const int ne02, const int nb00,
  7333. const int nb01, const int nb02,
  7334. const int nb03, const int ne10,
  7335. const int ne11, const int ne12,
  7336. const int nb10, const int nb11,
  7337. const int nb12, const int nb13,
  7338. queue_ptr stream) {
  7339. const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE;
  7340. {
  7341. dpct::has_capability_or_fail(stream->get_device(),
  7342. {sycl::aspect::fp16});
  7343. stream->parallel_for(
  7344. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  7345. sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
  7346. sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)),
  7347. [=](sycl::nd_item<3> item_ct1) {
  7348. cpy_f32_f16<cpy_1_f32_f32>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02,
  7349. nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13,
  7350. item_ct1);
  7351. });
  7352. }
  7353. }
  7354. static void ggml_cpy_f32_f16_sycl(const char *cx, char *cdst, const int ne,
  7355. const int ne00, const int ne01,
  7356. const int ne02, const int nb00,
  7357. const int nb01, const int nb02,
  7358. const int nb03, const int ne10,
  7359. const int ne11, const int ne12,
  7360. const int nb10, const int nb11,
  7361. const int nb12, const int nb13,
  7362. queue_ptr stream) {
  7363. const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE;
  7364. {
  7365. dpct::has_capability_or_fail(stream->get_device(),
  7366. {sycl::aspect::fp16});
  7367. stream->parallel_for(
  7368. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  7369. sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
  7370. sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)),
  7371. [=](sycl::nd_item<3> item_ct1) {
  7372. cpy_f32_f16<cpy_1_f32_f16>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02,
  7373. nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13,
  7374. item_ct1);
  7375. });
  7376. }
  7377. }
  7378. static void ggml_cpy_f32_q8_0_sycl(const char *cx, char *cdst, const int ne,
  7379. const int ne00, const int ne01,
  7380. const int ne02, const int nb00,
  7381. const int nb01, const int nb02,
  7382. const int nb03, const int ne10,
  7383. const int ne11, const int ne12,
  7384. const int nb10, const int nb11,
  7385. const int nb12, const int nb13,
  7386. queue_ptr stream) {
  7387. GGML_ASSERT(ne % QK8_0 == 0);
  7388. const int num_blocks = ne / QK8_0;
  7389. stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks),
  7390. sycl::range<3>(1, 1, 1)),
  7391. [=](sycl::nd_item<3> item_ct1) {
  7392. cpy_f32_q<cpy_blck_f32_q8_0, QK8_0>(
  7393. cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02,
  7394. nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13,
  7395. item_ct1);
  7396. });
  7397. }
  7398. static void ggml_cpy_f32_q4_0_sycl(const char *cx, char *cdst, const int ne,
  7399. const int ne00, const int ne01,
  7400. const int ne02, const int nb00,
  7401. const int nb01, const int nb02,
  7402. const int nb03, const int ne10,
  7403. const int ne11, const int ne12,
  7404. const int nb10, const int nb11,
  7405. const int nb12, const int nb13,
  7406. queue_ptr stream) {
  7407. GGML_ASSERT(ne % QK4_0 == 0);
  7408. const int num_blocks = ne / QK4_0;
  7409. stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks),
  7410. sycl::range<3>(1, 1, 1)),
  7411. [=](sycl::nd_item<3> item_ct1) {
  7412. cpy_f32_q<cpy_blck_f32_q4_0, QK4_0>(
  7413. cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02,
  7414. nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13,
  7415. item_ct1);
  7416. });
  7417. }
  7418. static void ggml_cpy_f32_q4_1_sycl(const char *cx, char *cdst, const int ne,
  7419. const int ne00, const int ne01,
  7420. const int ne02, const int nb00,
  7421. const int nb01, const int nb02,
  7422. const int nb03, const int ne10,
  7423. const int ne11, const int ne12,
  7424. const int nb10, const int nb11,
  7425. const int nb12, const int nb13,
  7426. queue_ptr stream) {
  7427. GGML_ASSERT(ne % QK4_1 == 0);
  7428. const int num_blocks = ne / QK4_1;
  7429. stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks),
  7430. sycl::range<3>(1, 1, 1)),
  7431. [=](sycl::nd_item<3> item_ct1) {
  7432. cpy_f32_q<cpy_blck_f32_q4_1, QK4_1>(
  7433. cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02,
  7434. nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13,
  7435. item_ct1);
  7436. });
  7437. }
  7438. static void ggml_cpy_f16_f16_sycl(const char *cx, char *cdst, const int ne,
  7439. const int ne00, const int ne01,
  7440. const int ne02, const int nb00,
  7441. const int nb01, const int nb02,
  7442. const int nb03, const int ne10,
  7443. const int ne11, const int ne12,
  7444. const int nb10, const int nb11,
  7445. const int nb12, const int nb13,
  7446. queue_ptr stream) {
  7447. const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE;
  7448. {
  7449. dpct::has_capability_or_fail(stream->get_device(),
  7450. {sycl::aspect::fp16});
  7451. stream->parallel_for(
  7452. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  7453. sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
  7454. sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)),
  7455. [=](sycl::nd_item<3> item_ct1) {
  7456. cpy_f32_f16<cpy_1_f16_f16>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02,
  7457. nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13,
  7458. item_ct1);
  7459. });
  7460. }
  7461. }
  7462. static void ggml_cpy_i16_i16_sycl(const char *cx, char *cdst, const int ne,
  7463. const int ne00, const int ne01,
  7464. const int ne02, const int nb00,
  7465. const int nb01, const int nb02,
  7466. const int nb03, const int ne10,
  7467. const int ne11, const int ne12,
  7468. const int nb10, const int nb11,
  7469. const int nb12, const int nb13,
  7470. queue_ptr stream) {
  7471. const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE;
  7472. {
  7473. // dpct::has_capability_or_fail(stream->get_device(),
  7474. // {sycl::aspect::fp16});
  7475. stream->parallel_for(
  7476. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  7477. sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
  7478. sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)),
  7479. [=](sycl::nd_item<3> item_ct1) {
  7480. cpy_f32_f16<cpy_1_i16_i16>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02,
  7481. nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13,
  7482. item_ct1);
  7483. });
  7484. }
  7485. }
  7486. static void ggml_cpy_i32_i32_sycl(const char *cx, char *cdst, const int ne,
  7487. const int ne00, const int ne01,
  7488. const int ne02, const int nb00,
  7489. const int nb01, const int nb02,
  7490. const int nb03, const int ne10,
  7491. const int ne11, const int ne12,
  7492. const int nb10, const int nb11,
  7493. const int nb12, const int nb13,
  7494. queue_ptr stream) {
  7495. const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE;
  7496. {
  7497. // dpct::has_capability_or_fail(stream->get_device(),
  7498. // {sycl::aspect::fp16});
  7499. stream->parallel_for(
  7500. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  7501. sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
  7502. sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)),
  7503. [=](sycl::nd_item<3> item_ct1) {
  7504. cpy_f32_f16<cpy_1_i32_i32>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02,
  7505. nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13,
  7506. item_ct1);
  7507. });
  7508. }
  7509. }
  7510. static void scale_f32_sycl(const float *x, float *dst, const float scale,
  7511. const int k, queue_ptr stream) {
  7512. const int num_blocks = (k + SYCL_SCALE_BLOCK_SIZE - 1) / SYCL_SCALE_BLOCK_SIZE;
  7513. stream->parallel_for(
  7514. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  7515. sycl::range<3>(1, 1, SYCL_SCALE_BLOCK_SIZE),
  7516. sycl::range<3>(1, 1, SYCL_SCALE_BLOCK_SIZE)),
  7517. [=](sycl::nd_item<3> item_ct1) {
  7518. scale_f32(x, dst, scale, k, item_ct1);
  7519. });
  7520. }
  7521. static void clamp_f32_sycl(const float *x, float *dst, const float min,
  7522. const float max, const int k,
  7523. queue_ptr stream) {
  7524. const int num_blocks = (k + SYCL_CLAMP_BLOCK_SIZE - 1) / SYCL_CLAMP_BLOCK_SIZE;
  7525. stream->parallel_for(
  7526. sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
  7527. sycl::range<3>(1, 1, SYCL_CLAMP_BLOCK_SIZE),
  7528. sycl::range<3>(1, 1, SYCL_CLAMP_BLOCK_SIZE)),
  7529. [=](sycl::nd_item<3> item_ct1) {
  7530. clamp_f32(x, dst, min, max, k, item_ct1);
  7531. });
  7532. }
  7533. template <typename T>
  7534. static void rope_sycl(const T *x, T *dst, int ncols, int nrows,
  7535. const int32_t *pos, float freq_scale, int p_delta_rows,
  7536. float freq_base, float ext_factor, float attn_factor,
  7537. rope_corr_dims corr_dims, queue_ptr stream) {
  7538. GGML_ASSERT(ncols % 2 == 0);
  7539. const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1);
  7540. const int num_blocks_x = (ncols + 2*SYCL_ROPE_BLOCK_SIZE - 1) / (2*SYCL_ROPE_BLOCK_SIZE);
  7541. const sycl::range<3> block_nums(1, num_blocks_x, nrows);
  7542. if (pos == nullptr) {
  7543. /*
  7544. DPCT1049:40: The work-group size passed to the SYCL kernel may exceed
  7545. the limit. To get the device limit, query
  7546. info::device::max_work_group_size. Adjust the work-group size if needed.
  7547. */
  7548. dpct::has_capability_or_fail(stream->get_device(),
  7549. {sycl::aspect::fp16});
  7550. stream->parallel_for(
  7551. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7552. [=](sycl::nd_item<3> item_ct1) {
  7553. rope<T, false>(x, dst, ncols, pos, freq_scale, p_delta_rows,
  7554. freq_base, ext_factor, attn_factor, corr_dims,
  7555. item_ct1);
  7556. });
  7557. } else {
  7558. /*
  7559. DPCT1049:41: The work-group size passed to the SYCL kernel may exceed
  7560. the limit. To get the device limit, query
  7561. info::device::max_work_group_size. Adjust the work-group size if needed.
  7562. */
  7563. dpct::has_capability_or_fail(stream->get_device(),
  7564. {sycl::aspect::fp16});
  7565. stream->parallel_for(
  7566. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7567. [=](sycl::nd_item<3> item_ct1) {
  7568. rope<T, true>(x, dst, ncols, pos, freq_scale, p_delta_rows,
  7569. freq_base, ext_factor, attn_factor, corr_dims,
  7570. item_ct1);
  7571. });
  7572. }
  7573. }
  7574. template <typename T>
  7575. static void rope_neox_sycl(const T *x, T *dst, int ncols, int n_dims, int nrows,
  7576. const int32_t *pos, float freq_scale,
  7577. int p_delta_rows, float freq_base, float ext_factor,
  7578. float attn_factor, rope_corr_dims corr_dims,
  7579. const float * freq_factors, queue_ptr stream) {
  7580. GGML_ASSERT(ncols % 2 == 0);
  7581. const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1);
  7582. const int num_blocks_x = (ncols + 2*SYCL_ROPE_BLOCK_SIZE - 1) / (2*SYCL_ROPE_BLOCK_SIZE);
  7583. const sycl::range<3> block_nums(1, num_blocks_x, nrows);
  7584. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  7585. const float inv_ndims = -1.0f / n_dims;
  7586. if (pos == nullptr) {
  7587. dpct::has_capability_or_fail(stream->get_device(),
  7588. {sycl::aspect::fp16});
  7589. if (freq_factors == nullptr) {
  7590. stream->parallel_for(
  7591. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7592. [=](sycl::nd_item<3> item_ct1) {
  7593. rope_neox<T, false, false>(x, dst, ncols, n_dims, pos, freq_scale,
  7594. p_delta_rows, ext_factor, attn_factor,
  7595. corr_dims, theta_scale, inv_ndims, freq_factors,
  7596. item_ct1);
  7597. });
  7598. } else {
  7599. stream->parallel_for(
  7600. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7601. [=](sycl::nd_item<3> item_ct1) {
  7602. rope_neox<T, false, true>(x, dst, ncols, n_dims, pos, freq_scale,
  7603. p_delta_rows, ext_factor, attn_factor,
  7604. corr_dims, theta_scale, inv_ndims, freq_factors,
  7605. item_ct1);
  7606. });
  7607. }
  7608. } else {
  7609. dpct::has_capability_or_fail(stream->get_device(),
  7610. {sycl::aspect::fp16});
  7611. if (freq_factors == nullptr) {
  7612. stream->parallel_for(
  7613. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7614. [=](sycl::nd_item<3> item_ct1) {
  7615. rope_neox<T, true, false>(x, dst, ncols, n_dims, pos, freq_scale,
  7616. p_delta_rows, ext_factor, attn_factor,
  7617. corr_dims, theta_scale, inv_ndims, freq_factors, item_ct1);
  7618. });
  7619. } else {
  7620. stream->parallel_for(
  7621. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7622. [=](sycl::nd_item<3> item_ct1) {
  7623. rope_neox<T, true, true>(x, dst, ncols, n_dims, pos, freq_scale,
  7624. p_delta_rows, ext_factor, attn_factor,
  7625. corr_dims, theta_scale, inv_ndims, freq_factors, item_ct1);
  7626. });
  7627. }
  7628. }
  7629. }
  7630. static void sum_rows_f32_sycl(const float *x, float *dst, const int ncols,
  7631. const int nrows, queue_ptr stream) {
  7632. const sycl::range<3> block_dims(1, 1, WARP_SIZE);
  7633. const sycl::range<3> block_nums(1, nrows, 1);
  7634. stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7635. [=](sycl::nd_item<3> item_ct1)
  7636. [[intel::reqd_sub_group_size(32)]] {
  7637. k_sum_rows_f32(x, dst, ncols, item_ct1);
  7638. });
  7639. }
  7640. static int next_power_of_2(int x) {
  7641. int n = 1;
  7642. while (n < x) {
  7643. n *= 2;
  7644. }
  7645. return n;
  7646. }
  7647. static void argsort_f32_i32_sycl(const float *x, int *dst, const int ncols,
  7648. const int nrows, ggml_sort_order order,
  7649. queue_ptr stream) {
  7650. // bitonic sort requires ncols to be power of 2
  7651. const int ncols_pad = next_power_of_2(ncols);
  7652. const sycl::range<3> block_dims(1, 1, ncols_pad);
  7653. const sycl::range<3> block_nums(1, nrows, 1);
  7654. const size_t shared_mem = ncols_pad * sizeof(int);
  7655. if (order == GGML_SORT_ORDER_ASC) {
  7656. stream->submit([&](sycl::handler &cgh) {
  7657. sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
  7658. sycl::range<1>(shared_mem), cgh);
  7659. cgh.parallel_for(
  7660. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7661. [=](sycl::nd_item<3> item_ct1) {
  7662. k_argsort_f32_i32<GGML_SORT_ORDER_ASC>(
  7663. x, dst, ncols, ncols_pad, item_ct1,
  7664. dpct_local_acc_ct1.get_multi_ptr<sycl::access::decorated::no>()
  7665. .get());
  7666. });
  7667. });
  7668. } else if (order == GGML_SORT_ORDER_DESC) {
  7669. stream->submit([&](sycl::handler &cgh) {
  7670. sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
  7671. sycl::range<1>(shared_mem), cgh);
  7672. cgh.parallel_for(
  7673. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7674. [=](sycl::nd_item<3> item_ct1) {
  7675. k_argsort_f32_i32<GGML_SORT_ORDER_DESC>(
  7676. x, dst, ncols, ncols_pad, item_ct1,
  7677. dpct_local_acc_ct1.get_multi_ptr<sycl::access::decorated::no>()
  7678. .get());
  7679. });
  7680. });
  7681. } else {
  7682. GGML_ASSERT(false);
  7683. }
  7684. }
  7685. static void diag_mask_inf_f32_sycl(const float *x, float *dst,
  7686. const int ncols_x, const int nrows_x,
  7687. const int rows_per_channel, const int n_past,
  7688. queue_ptr stream) {
  7689. const sycl::range<3> block_dims(1, SYCL_DIAG_MASK_INF_BLOCK_SIZE, 1);
  7690. const int block_num_x = (ncols_x + SYCL_DIAG_MASK_INF_BLOCK_SIZE - 1) / SYCL_DIAG_MASK_INF_BLOCK_SIZE;
  7691. const sycl::range<3> block_nums(1, block_num_x, nrows_x);
  7692. stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7693. [=](sycl::nd_item<3> item_ct1) {
  7694. diag_mask_inf_f32(x, dst, ncols_x,
  7695. rows_per_channel, n_past,
  7696. item_ct1);
  7697. });
  7698. }
  7699. template <bool vals_smem, int ncols_template, int block_size_template>
  7700. static void soft_max_f32_submitter(const float * x, const float * mask, float * dst, const int ncols_par,
  7701. const int nrows_y, const float scale, const float max_bias, const float m0,
  7702. const float m1, uint32_t n_head_log2, sycl::range<3> block_nums, sycl::range<3> block_dims,
  7703. const size_t n_local_scratch, queue_ptr stream) {
  7704. stream->submit([&](sycl::handler &cgh) {
  7705. sycl::local_accessor<float, 1> local_buf_acc(n_local_scratch, cgh);
  7706. cgh.parallel_for(
  7707. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  7708. [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
  7709. soft_max_f32<vals_smem, ncols_template, block_size_template>(x, mask, dst, ncols_par,
  7710. nrows_y, scale, max_bias, m0,
  7711. m1, n_head_log2, item_ct1,
  7712. local_buf_acc.get_pointer());
  7713. });
  7714. });
  7715. }
  7716. static void soft_max_f32_sycl(const float * x, const float * mask,
  7717. float * dst, const int ncols_x, const int nrows_x,
  7718. const int nrows_y, const float scale, const float max_bias,
  7719. queue_ptr stream) {
  7720. int nth = WARP_SIZE;
  7721. int max_block_size = GROUP_SIZE;
  7722. while (nth < ncols_x && nth < max_block_size) nth *= 2;
  7723. if (nth>max_block_size) nth = max_block_size;
  7724. const sycl::range<3> block_dims(1, 1, nth);
  7725. const sycl::range<3> block_nums(1, 1, nrows_x);
  7726. const size_t n_local_scratch = (GGML_PAD(ncols_x, WARP_SIZE) + WARP_SIZE);
  7727. const uint32_t n_head_kv = nrows_x/nrows_y;
  7728. const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
  7729. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  7730. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  7731. const size_t local_mem_size = stream->get_device().get_info<sycl::info::device::local_mem_size>();
  7732. if (n_local_scratch*sizeof(float) < local_mem_size) {
  7733. if (ncols_x > max_block_size) {
  7734. soft_max_f32_submitter<true, 0, 0>(x, mask, dst, ncols_x, nrows_y, scale,
  7735. max_bias, m0, m1, n_head_log2, block_nums,
  7736. block_dims, n_local_scratch, stream);
  7737. return;
  7738. }
  7739. switch (ncols_x) {
  7740. case 32:
  7741. soft_max_f32_submitter<true, 32, 32>(x, mask, dst, ncols_x, nrows_y, scale,
  7742. max_bias, m0, m1, n_head_log2, block_nums,
  7743. block_dims, n_local_scratch, stream);
  7744. break;
  7745. case 64:
  7746. soft_max_f32_submitter<true, 64, 64>(x, mask, dst, ncols_x, nrows_y, scale,
  7747. max_bias, m0, m1, n_head_log2, block_nums,
  7748. block_dims, n_local_scratch, stream);
  7749. break;
  7750. case 128:
  7751. soft_max_f32_submitter<true, 128, 128>(x, mask, dst, ncols_x, nrows_y, scale,
  7752. max_bias, m0, m1, n_head_log2, block_nums,
  7753. block_dims, n_local_scratch, stream);
  7754. break;
  7755. case 256:
  7756. soft_max_f32_submitter<true, 256, 256>(x, mask, dst, ncols_x, nrows_y, scale,
  7757. max_bias, m0, m1, n_head_log2, block_nums,
  7758. block_dims, n_local_scratch, stream);
  7759. break;
  7760. case 512:
  7761. soft_max_f32_submitter<true, 512, 512>(x, mask, dst, ncols_x, nrows_y, scale,
  7762. max_bias, m0, m1, n_head_log2, block_nums,
  7763. block_dims, n_local_scratch, stream);
  7764. break;
  7765. case 1024:
  7766. soft_max_f32_submitter<true, 1024, 1024>(x, mask, dst, ncols_x, nrows_y, scale,
  7767. max_bias, m0, m1, n_head_log2, block_nums,
  7768. block_dims, n_local_scratch, stream);
  7769. break;
  7770. case 2048:
  7771. soft_max_f32_submitter<true, 2048, 1024>(x, mask, dst, ncols_x, nrows_y, scale,
  7772. max_bias, m0, m1, n_head_log2, block_nums,
  7773. block_dims, n_local_scratch, stream);
  7774. break;
  7775. case 4096:
  7776. soft_max_f32_submitter<true, 4096, 1024>(x, mask, dst, ncols_x, nrows_y, scale,
  7777. max_bias, m0, m1, n_head_log2, block_nums,
  7778. block_dims, n_local_scratch, stream);
  7779. break;
  7780. default:
  7781. soft_max_f32_submitter<true, 0, 0>(x, mask, dst, ncols_x, nrows_y, scale,
  7782. max_bias, m0, m1, n_head_log2, block_nums,
  7783. block_dims, n_local_scratch, stream);
  7784. break;
  7785. }
  7786. } else {
  7787. soft_max_f32_submitter<false, 0, 0>(x, mask, dst, ncols_x, nrows_y, scale,
  7788. max_bias, m0, m1, n_head_log2, block_nums,
  7789. block_dims, WARP_SIZE, stream);
  7790. }
  7791. }
  7792. template <typename T>
  7793. static void im2col_sycl(const float *x, T *dst, int IW, int IH,
  7794. int OW, int OH, int KW, int KH, int IC,
  7795. int offset_delta, int s0, int s1, int p0,
  7796. int p1, int d0, int d1,
  7797. queue_ptr stream) {
  7798. const int parallel_elements = OW * KW * KH;
  7799. const int num_blocks = (parallel_elements + SYCL_IM2COL_BLOCK_SIZE - 1) / SYCL_IM2COL_BLOCK_SIZE;
  7800. sycl::range<3> block_nums(IC, OH, num_blocks);
  7801. {
  7802. dpct::has_capability_or_fail(stream->get_device(),
  7803. {sycl::aspect::fp16});
  7804. stream->parallel_for(
  7805. sycl::nd_range<3>(block_nums *
  7806. sycl::range<3>(1, 1, SYCL_IM2COL_BLOCK_SIZE),
  7807. sycl::range<3>(1, 1, SYCL_IM2COL_BLOCK_SIZE)),
  7808. [=](sycl::nd_item<3> item_ct1) {
  7809. im2col_kernel(x, dst, offset_delta, IW, IH, OW, KW, KH,
  7810. parallel_elements, (IC * KH * KW), s0, s1, p0,
  7811. p1, d0, d1, item_ct1);
  7812. });
  7813. }
  7814. }
  7815. static bool g_sycl_loaded = false;
  7816. bool ggml_sycl_loaded(void) {
  7817. return g_sycl_loaded;
  7818. }
  7819. void print_device_detail(int id, sycl::device &device, std::string device_type) {
  7820. dpct::device_info prop;
  7821. SYCL_CHECK(CHECK_TRY_ERROR(
  7822. dpct::get_device_info(prop, device)));
  7823. std::string version;
  7824. version += std::to_string(prop.get_major_version());
  7825. version += ".";
  7826. version += std::to_string(prop.get_minor_version());
  7827. device_type = std::regex_replace(device_type, std::regex("ext_oneapi_"), "");
  7828. std::string name = std::string(prop.get_name());
  7829. name = std::regex_replace(name, std::regex("\\(R\\)"), "");
  7830. name = std::regex_replace(name, std::regex("\\(TM\\)"), "");
  7831. auto global_mem_size = prop.get_global_mem_size()/1000000;
  7832. fprintf(stderr, "|%2d|%19s|%39s|%7s|%7d|%8d|%5d|%6luM|%21s|\n", id, device_type.c_str(),
  7833. name.c_str(), version.c_str(), prop.get_max_compute_units(),
  7834. prop.get_max_work_group_size(), prop.get_max_sub_group_size(),
  7835. global_mem_size, device.get_info<sycl::info::device::driver_version>().c_str());
  7836. }
  7837. void ggml_backend_sycl_print_sycl_devices() {
  7838. GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_print_sycl_devices\n");
  7839. int device_count = dpct::dev_mgr::instance().device_count();
  7840. std::map<std::string, size_t> DeviceNums;
  7841. fprintf(stderr, "found %d SYCL devices:\n", device_count);
  7842. fprintf(stderr, "| | | | |Max | |Max |Global | |\n");
  7843. fprintf(stderr, "| | | | |compute|Max work|sub |mem | |\n");
  7844. fprintf(stderr, "|ID| Device Type| Name|Version|units |group |group|size | Driver version|\n");
  7845. fprintf(stderr, "|--|-------------------|---------------------------------------|-------|-------|--------|-----|-------|---------------------|\n");
  7846. for (int id = 0; id < device_count; ++id) {
  7847. sycl::device device = dpct::dev_mgr::instance().get_device(id);
  7848. sycl::backend backend = device.get_backend();
  7849. std::string backend_type = get_device_backend_and_type(device);
  7850. int type_id=DeviceNums[backend_type]++;
  7851. std::stringstream device_type;
  7852. device_type << "[" << backend_type << ":" << std::to_string(type_id) << "]";
  7853. print_device_detail(id, device, device_type.str());
  7854. }
  7855. }
  7856. int get_sycl_env(const char *env_name, int default_val) {
  7857. char *user_device_string = getenv(env_name);
  7858. int user_number = default_val;
  7859. unsigned n;
  7860. if (user_device_string != NULL &&
  7861. sscanf(user_device_string, " %u", &n) == 1) {
  7862. user_number = (int)n;
  7863. } else {
  7864. user_number = default_val;
  7865. }
  7866. return user_number;
  7867. }
  7868. int get_work_group_size(int user_device_id) {
  7869. dpct::device_info prop;
  7870. dpct::get_device_info(prop,
  7871. dpct::dev_mgr::instance().get_device(user_device_id));
  7872. return prop.get_max_work_group_size();
  7873. }
  7874. static void ggml_check_sycl() try {
  7875. static bool initialized = false;
  7876. if (!initialized) {
  7877. fprintf(stderr, "[SYCL] call ggml_check_sycl\n");
  7878. g_ggml_sycl_debug = get_sycl_env("GGML_SYCL_DEBUG", 0);
  7879. fprintf(stderr, "%s: GGML_SYCL_DEBUG: %d\n", __func__, g_ggml_sycl_debug);
  7880. #if defined(GGML_SYCL_F16)
  7881. fprintf(stderr, "%s: GGML_SYCL_F16: yes\n", __func__);
  7882. #else
  7883. fprintf(stderr, "%s: GGML_SYCL_F16: no\n", __func__);
  7884. #endif
  7885. /* NOT REMOVE, keep it for next optimize for XMX.
  7886. #if defined(SYCL_USE_XMX)
  7887. fprintf(stderr, "%s: SYCL_USE_XMX: yes\n", __func__);
  7888. #else
  7889. fprintf(stderr, "%s: SYCL_USE_XMX: no\n", __func__);
  7890. #endif
  7891. */
  7892. if (CHECK_TRY_ERROR(g_all_sycl_device_count =
  7893. dpct::dev_mgr::instance().device_count()) != 0) {
  7894. initialized = true;
  7895. g_sycl_loaded = false;
  7896. return;
  7897. }
  7898. GGML_ASSERT(g_all_sycl_device_count <= GGML_SYCL_MAX_DEVICES);
  7899. ggml_backend_sycl_print_sycl_devices();
  7900. initialized = true;
  7901. g_sycl_loaded = true;
  7902. }
  7903. }
  7904. catch (sycl::exception const &exc) {
  7905. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  7906. << ", line:" << __LINE__ << std::endl;
  7907. std::exit(1);
  7908. }
  7909. static ggml_sycl_device_info ggml_sycl_init() {
  7910. ggml_sycl_device_info info = {};
  7911. info.device_count = dpct::dev_mgr::instance().device_count();
  7912. if (info.device_count == 0) {
  7913. fprintf(stderr, "%s: failed to initialize " GGML_SYCL_NAME ": %s\n", __func__);
  7914. return info;
  7915. }
  7916. GGML_ASSERT(info.device_count <= GGML_SYCL_MAX_DEVICES);
  7917. int64_t total_vram = 0;
  7918. #if defined(GGML_SYCL_FORCE_MMQ)
  7919. fprintf(stderr, "%s: GGML_SYCL_FORCE_MMQ: yes\n", __func__);
  7920. #else
  7921. fprintf(stderr, "%s: GGML_SYCL_FORCE_MMQ: no\n", __func__);
  7922. #endif
  7923. #if defined(SYCL_USE_XMX)
  7924. fprintf(stderr, "%s: SYCL_USE_XMX: yes\n", __func__);
  7925. #else
  7926. fprintf(stderr, "%s: SYCL_USE_XMX: no\n", __func__);
  7927. #endif
  7928. fprintf(stderr, "%s: found %d " GGML_SYCL_NAME " devices:\n", __func__, info.device_count);
  7929. for (int i = 0; i < info.device_count; ++i) {
  7930. info.devices[i].vmm = 0;
  7931. dpct::device_info prop;
  7932. SYCL_CHECK(CHECK_TRY_ERROR(dpct::get_device_info(
  7933. prop, dpct::dev_mgr::instance().get_device(i))));
  7934. info.default_tensor_split[i] = total_vram;
  7935. total_vram += prop.get_global_mem_size();
  7936. info.devices[i].cc =
  7937. 100 * prop.get_major_version() + 10 * prop.get_minor_version();
  7938. }
  7939. for (int id = 0; id < info.device_count; ++id) {
  7940. info.default_tensor_split[id] /= total_vram;
  7941. }
  7942. return info;
  7943. }
  7944. const ggml_sycl_device_info & ggml_sycl_info() {
  7945. static ggml_sycl_device_info info = ggml_sycl_init();
  7946. return info;
  7947. }
  7948. /*
  7949. device_index: device index from 0 to n (continue numbers).
  7950. It is used for device select/set in SYCL backend internal data structure.
  7951. */
  7952. inline void check_allow_gpu_index(const int device_index) {
  7953. if (device_index >= ggml_sycl_info().device_count) {
  7954. char error_buf[256];
  7955. snprintf(
  7956. error_buf,
  7957. sizeof(error_buf),
  7958. "%s error: device_index:%d is out of range: [0-%d]",
  7959. __func__,
  7960. device_index,
  7961. ggml_sycl_info().device_count - 1);
  7962. fprintf(stderr, "%s\n", error_buf);
  7963. assert(false);
  7964. }
  7965. }
  7966. // buffer pool for sycl (legacy)
  7967. struct ggml_sycl_pool_leg : public ggml_sycl_pool {
  7968. static const int MAX_SYCL_BUFFERS = 256;
  7969. int device;
  7970. queue_ptr qptr;
  7971. struct ggml_sycl_buffer {
  7972. void * ptr = nullptr;
  7973. size_t size = 0;
  7974. };
  7975. ggml_sycl_buffer buffer_pool[MAX_SYCL_BUFFERS] = {};
  7976. size_t pool_size = 0;
  7977. explicit ggml_sycl_pool_leg(queue_ptr qptr_, int device_) :
  7978. qptr(qptr_),
  7979. device(device_) {
  7980. }
  7981. ~ggml_sycl_pool_leg() {
  7982. for (int i = 0; i < MAX_SYCL_BUFFERS; ++i) {
  7983. ggml_sycl_buffer & b = buffer_pool[i];
  7984. if (b.ptr != nullptr) {
  7985. SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(b.ptr, *qptr)));
  7986. pool_size -= b.size;
  7987. }
  7988. }
  7989. GGML_ASSERT(pool_size == 0);
  7990. }
  7991. void * alloc(size_t size, size_t * actual_size) override {
  7992. #ifdef DEBUG_sycl_MALLOC
  7993. int nnz = 0;
  7994. size_t max_size = 0;
  7995. #endif
  7996. size_t best_diff = 1ull << 36;
  7997. int ibest = -1;
  7998. for (int i = 0; i < MAX_SYCL_BUFFERS; ++i) {
  7999. ggml_sycl_buffer& b = buffer_pool[i];
  8000. if (b.ptr != nullptr) {
  8001. #ifdef DEBUG_sycl_MALLOC
  8002. ++nnz;
  8003. if (b.size > max_size) max_size = b.size;
  8004. #endif
  8005. if (b.size >= size) {
  8006. size_t diff = b.size - size;
  8007. if (diff < best_diff) {
  8008. best_diff = diff;
  8009. ibest = i;
  8010. if (!best_diff) {
  8011. void * ptr = b.ptr;
  8012. *actual_size = b.size;
  8013. b.ptr = nullptr;
  8014. b.size = 0;
  8015. return ptr;
  8016. }
  8017. }
  8018. }
  8019. }
  8020. }
  8021. if (ibest >= 0) {
  8022. ggml_sycl_buffer& b = buffer_pool[ibest];
  8023. void * ptr = b.ptr;
  8024. *actual_size = b.size;
  8025. b.ptr = nullptr;
  8026. b.size = 0;
  8027. return ptr;
  8028. }
  8029. void * ptr;
  8030. size_t look_ahead_size = (size_t) (1.05 * size);
  8031. SYCL_CHECK(
  8032. CHECK_TRY_ERROR(ptr = (void *)sycl::malloc_device(
  8033. look_ahead_size, *qptr)));
  8034. *actual_size = look_ahead_size;
  8035. pool_size += look_ahead_size;
  8036. #ifdef DEBUG_SYCL_MALLOC
  8037. fprintf(stderr, "%s[%d]: %d buffers, max_size = %u MB, pool_size = %u MB, requested %u MB\n", __func__, id, nnz,
  8038. (uint32_t)(max_size/1024/1024), (uint32_t)(g_sycl_pool_size[id]/1024/1024), (uint32_t)(size/1024/1024));
  8039. #endif
  8040. // GGML_SYCL_DEBUG("ggml_sycl_pool_malloc_leg look_ahead_size=%lu, return %p\n", look_ahead_size, ptr);
  8041. return ptr;
  8042. }
  8043. void free(void * ptr, size_t size) override {
  8044. for (int i = 0; i < MAX_SYCL_BUFFERS; ++i) {
  8045. ggml_sycl_buffer& b = buffer_pool[i];
  8046. if (b.ptr == nullptr) {
  8047. b.ptr = ptr;
  8048. b.size = size;
  8049. return;
  8050. }
  8051. }
  8052. fprintf(stderr, "WARNING: sycl buffer pool full, increase MAX_sycl_BUFFERS\n");
  8053. SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(ptr, *qptr)));
  8054. pool_size -= size;
  8055. }
  8056. };
  8057. std::unique_ptr<ggml_sycl_pool> ggml_backend_sycl_context::new_pool_for_device(queue_ptr qptr, int device) {
  8058. // TBD: NO VMM support
  8059. // if (ggml_sycl_info().devices[device].vmm) {
  8060. // return std::unique_ptr<ggml_sycl_pool>(new ggml_sycl_pool_vmm(device));
  8061. // }
  8062. return std::unique_ptr<ggml_sycl_pool>(new ggml_sycl_pool_leg(qptr, device));
  8063. }
  8064. // TBD pool with virtual memory management
  8065. // struct ggml_sycl_pool_vmm : public ggml_sycl_pool
  8066. static dpct::err0 ggml_sycl_cpy_tensor_2d(void *dst,
  8067. const struct ggml_tensor *src,
  8068. int64_t i3, int64_t i2,
  8069. int64_t i1_low, int64_t i1_high,
  8070. queue_ptr stream) try {
  8071. dpct::memcpy_direction kind;
  8072. char * src_ptr;
  8073. if (src->backend == GGML_BACKEND_TYPE_CPU) {
  8074. kind = dpct::host_to_device;
  8075. src_ptr = (char *) src->data;
  8076. // GGML_SYCL_DEBUG("ggml_sycl_cpy_tensor_2d GGML_BACKEND_TYPE_CPU src_ptr %p\n", src_ptr);
  8077. } else if (src->backend == GGML_BACKEND_TYPE_GPU || src->backend == GGML_BACKEND_TYPE_GPU_SPLIT) {
  8078. GGML_ASSERT(src->backend != GGML_BACKEND_TYPE_GPU_SPLIT || (i1_low == 0 && i1_high == src->ne[1]));
  8079. kind = dpct::device_to_device;
  8080. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src->extra;
  8081. int id;
  8082. SYCL_CHECK(CHECK_TRY_ERROR(
  8083. id = get_current_device_id()));
  8084. // GGML_SYCL_DEBUG("current device index %d\n", id);
  8085. src_ptr = (char *) extra->data_device[id];
  8086. } else {
  8087. // GGML_SYCL_DEBUG("GGML_ASSERT(false)\n");
  8088. GGML_ASSERT(false);
  8089. }
  8090. char * dst_ptr = (char *) dst;
  8091. GGML_TENSOR_LOCALS_1(int64_t, ne, src, ne);
  8092. GGML_TENSOR_LOCALS(int64_t, nb, src, nb);
  8093. const enum ggml_type type = src->type;
  8094. const int64_t ts = ggml_type_size(type);
  8095. const int64_t bs = ggml_blck_size(type);
  8096. int64_t i1_diff = i1_high - i1_low;
  8097. const char * x = src_ptr + i1_low*nb1 + i2*nb2 + i3*nb3;
  8098. if (nb0 == ts && nb1 == ts*ne0/bs) {
  8099. // GGML_SYCL_DEBUG("stream->memcpy: dst_ptr=%p, x=%p, size=%lu\n", dst_ptr, x, i1_diff * nb1);
  8100. // return CHECK_TRY_ERROR(stream->memcpy(dst_ptr, x, i1_diff * nb1));
  8101. return CHECK_TRY_ERROR(dpct::async_dpct_memcpy(dst_ptr, x, i1_diff * nb1,
  8102. kind, *stream));
  8103. } else if (nb0 == ts) {
  8104. return CHECK_TRY_ERROR(
  8105. dpct::async_dpct_memcpy(dst_ptr, ts * ne0 / bs, x, nb1,
  8106. ts * ne0 / bs, i1_diff, kind, *stream));
  8107. } else {
  8108. for (int64_t i1 = 0; i1 < i1_diff; i1++) {
  8109. const void * rx = (const void *) ((const char *) x + i1*nb1);
  8110. void * rd = (void *) (dst_ptr + i1*ts*ne0/bs);
  8111. // pretend the row is a matrix with cols=1
  8112. dpct::err0 r = CHECK_TRY_ERROR(dpct::async_dpct_memcpy(
  8113. rd, ts / bs, rx, nb0, ts / bs, ne0, kind, *stream));
  8114. /*
  8115. DPCT1001:85: The statement could not be removed.
  8116. */
  8117. /*
  8118. DPCT1000:86: Error handling if-stmt was detected but could not be
  8119. rewritten.
  8120. */
  8121. if (r != 0) return r;
  8122. }
  8123. return 0;
  8124. }
  8125. }
  8126. catch (sycl::exception const &exc) {
  8127. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  8128. << ", line:" << __LINE__ << std::endl;
  8129. std::exit(1);
  8130. }
  8131. static void ggml_sycl_op_get_rows(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8132. const ggml_tensor *src1, ggml_tensor *dst,
  8133. const float *src0_d, const float *src1_d,
  8134. float *dst_d, const queue_ptr &stream) {
  8135. GGML_ASSERT(src1->type == GGML_TYPE_I32);
  8136. GGML_ASSERT(dst->type == GGML_TYPE_F32);
  8137. GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type));
  8138. GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type));
  8139. GGML_ASSERT(dst->nb[0] == ggml_type_size(dst->type));
  8140. const int32_t * src1_i32 = (const int32_t *) src1_d;
  8141. switch (src0->type) {
  8142. case GGML_TYPE_F16:
  8143. get_rows_sycl_float(ctx, src0, src1, dst, (const sycl::half *)src0_d,
  8144. src1_i32, dst_d, stream);
  8145. break;
  8146. case GGML_TYPE_F32:
  8147. get_rows_sycl_float(ctx, src0, src1, dst, src0_d, src1_i32, dst_d, stream);
  8148. break;
  8149. case GGML_TYPE_Q4_0:
  8150. get_rows_sycl<QK4_0, QR4_0, dequantize_q4_0>(ctx, src0, src1, dst, src0_d, src1_i32, dst_d, stream);
  8151. break;
  8152. case GGML_TYPE_Q4_1:
  8153. get_rows_sycl<QK4_1, QR4_1, dequantize_q4_1>(ctx, src0, src1, dst, src0_d, src1_i32, dst_d, stream);
  8154. break;
  8155. case GGML_TYPE_Q5_0:
  8156. get_rows_sycl<QK5_0, QR5_0, dequantize_q5_0>(ctx, src0, src1, dst, src0_d, src1_i32, dst_d, stream);
  8157. break;
  8158. case GGML_TYPE_Q5_1:
  8159. get_rows_sycl<QK5_1, QR5_1, dequantize_q5_1>(ctx, src0, src1, dst, src0_d, src1_i32, dst_d, stream);
  8160. break;
  8161. case GGML_TYPE_Q8_0:
  8162. get_rows_sycl<QK8_0, QR8_0, dequantize_q8_0>(ctx, src0, src1, dst, src0_d, src1_i32, dst_d, stream);
  8163. break;
  8164. default:
  8165. // TODO: k-quants
  8166. fprintf(stderr, "%s: unsupported type: %s\n", __func__, ggml_type_name(src0->type));
  8167. GGML_ASSERT(false);
  8168. break;
  8169. }
  8170. }
  8171. template <class op>
  8172. inline void ggml_sycl_op_bin_bcast(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8173. const ggml_tensor *src1, ggml_tensor *dst,
  8174. const float *src0_dd, const float *src1_dd,
  8175. float *dst_dd,
  8176. const queue_ptr &main_stream) {
  8177. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  8178. op()(ctx, src0, src1, dst, src0_dd, src1_dd, dst_dd, main_stream);
  8179. } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  8180. op()(ctx, src0, src1, dst, (const sycl::half *)src0_dd, src1_dd,
  8181. (sycl::half *)dst_dd, main_stream);
  8182. } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
  8183. op()(ctx, src0, src1, dst, (const sycl::half *)src0_dd, src1_dd, dst_dd,
  8184. main_stream);
  8185. } else if (src0->type == GGML_TYPE_I32 && dst->type == GGML_TYPE_I32) {
  8186. op()(ctx, src0, src1, dst, (const int32_t *)src0_dd, (const int32_t *)src1_dd, (int32_t *)dst_dd,
  8187. main_stream);
  8188. } else if (src0->type == GGML_TYPE_I16 && dst->type == GGML_TYPE_I16) {
  8189. op()(ctx, src0, src1, dst, (const int16_t *)src0_dd, (const int16_t *)src1_dd, (int16_t *)dst_dd,
  8190. main_stream);
  8191. } else {
  8192. fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s, src1: %s\n", __func__,
  8193. ggml_type_name(dst->type), ggml_type_name(src0->type), ggml_type_name(src1->type));
  8194. GGML_ASSERT(false);
  8195. }
  8196. }
  8197. static void ggml_sycl_op_repeat(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8198. const ggml_tensor *src1, ggml_tensor *dst,
  8199. const float *src0_d, const float *src1_d,
  8200. float *dst_d,
  8201. const queue_ptr &main_stream) {
  8202. ggml_sycl_op_bin_bcast<bin_bcast_sycl<op_repeat>>(ctx, dst, src0, dst, nullptr, src0_d, dst_d, main_stream);
  8203. (void) src1;
  8204. (void) src1_d;
  8205. }
  8206. inline void ggml_sycl_op_add(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  8207. ggml_tensor *dst, const float *src0_dd,
  8208. const float *src1_dd, float *dst_dd,
  8209. const queue_ptr &main_stream) {
  8210. ggml_sycl_op_bin_bcast<bin_bcast_sycl<op_add>>(ctx, src0, src1, dst, src0_dd, src1_dd, dst_dd, main_stream);
  8211. }
  8212. inline void ggml_sycl_op_acc(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  8213. ggml_tensor *dst, const float *src0_dd,
  8214. const float *src1_dd, float *dst_dd,
  8215. const queue_ptr &main_stream) {
  8216. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8217. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  8218. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8219. GGML_ASSERT(dst->ne[3] == 1); // just 3D tensors supported
  8220. int nb1 = dst->op_params[0] / 4; // 4 bytes of float32
  8221. int nb2 = dst->op_params[1] / 4; // 4 bytes of float32
  8222. // int nb3 = dst->op_params[2] / 4; // 4 bytes of float32 - unused
  8223. int offset = dst->op_params[3] / 4; // offset in bytes
  8224. acc_f32_sycl(src0_dd, src1_dd, dst_dd, ggml_nelements(dst), src1->ne[0], src1->ne[1], src1->ne[2], nb1, nb2, offset, main_stream);
  8225. (void) dst;
  8226. }
  8227. inline void ggml_sycl_op_mul(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  8228. ggml_tensor *dst, const float *src0_dd,
  8229. const float *src1_dd, float *dst_dd,
  8230. const queue_ptr &main_stream) {
  8231. ggml_sycl_op_bin_bcast<bin_bcast_sycl<op_mul>>(ctx, src0, src1, dst, src0_dd, src1_dd, dst_dd, main_stream);
  8232. }
  8233. inline void ggml_sycl_op_div(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  8234. ggml_tensor *dst, const float *src0_dd,
  8235. const float *src1_dd, float *dst_dd,
  8236. const queue_ptr &main_stream) {
  8237. ggml_sycl_op_bin_bcast<bin_bcast_sycl<op_div>>(ctx, src0, src1, dst, src0_dd, src1_dd, dst_dd, main_stream);
  8238. }
  8239. inline void ggml_sycl_op_gelu(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  8240. ggml_tensor *dst, const float *src0_dd,
  8241. const float *src1_dd, float *dst_dd,
  8242. const queue_ptr &main_stream) {
  8243. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8244. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8245. gelu_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream);
  8246. (void) src1;
  8247. (void) dst;
  8248. (void) src1_dd;
  8249. }
  8250. inline void ggml_sycl_op_silu(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  8251. ggml_tensor *dst, const float *src0_dd,
  8252. const float *src1_dd, float *dst_dd,
  8253. const queue_ptr &main_stream) {
  8254. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8255. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8256. silu_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream);
  8257. (void) src1;
  8258. (void) dst;
  8259. (void) src1_dd;
  8260. }
  8261. inline void ggml_sycl_op_gelu_quick(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8262. const ggml_tensor *src1, ggml_tensor *dst,
  8263. const float *src0_dd, const float *src1_dd,
  8264. float *dst_dd,
  8265. const queue_ptr &main_stream) {
  8266. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8267. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8268. gelu_quick_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream);
  8269. (void) src1;
  8270. (void) dst;
  8271. (void) src1_dd;
  8272. }
  8273. inline void ggml_sycl_op_tanh(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  8274. ggml_tensor *dst, const float *src0_dd,
  8275. const float *src1_dd, float *dst_dd,
  8276. const queue_ptr &main_stream) {
  8277. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8278. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8279. tanh_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream);
  8280. (void) src1;
  8281. (void) dst;
  8282. (void) src1_dd;
  8283. }
  8284. inline void ggml_sycl_op_relu(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  8285. ggml_tensor *dst, const float *src0_dd,
  8286. const float *src1_dd, float *dst_dd,
  8287. const queue_ptr &main_stream) {
  8288. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8289. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8290. relu_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream);
  8291. (void) src1;
  8292. (void) dst;
  8293. (void) src1_dd;
  8294. }
  8295. static void ggml_sycl_op_hardsigmoid(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8296. const ggml_tensor *src1, ggml_tensor *dst,
  8297. const float *src0_dd, const float *src1_dd,
  8298. float *dst_dd,
  8299. const queue_ptr &main_stream) {
  8300. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8301. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8302. hardsigmoid_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream);
  8303. (void) src1;
  8304. (void) dst;
  8305. (void) src1_dd;
  8306. }
  8307. static void ggml_sycl_op_hardswish(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8308. const ggml_tensor *src1, ggml_tensor *dst,
  8309. const float *src0_dd, const float *src1_dd,
  8310. float *dst_dd, const queue_ptr &main_stream) {
  8311. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8312. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8313. hardswish_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream);
  8314. (void) src1;
  8315. (void) dst;
  8316. (void) src1_dd;
  8317. }
  8318. inline void ggml_sycl_op_leaky_relu(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8319. const ggml_tensor *src1, ggml_tensor *dst,
  8320. const float *src0_dd, const float *src1_dd,
  8321. float *dst_dd,
  8322. const queue_ptr &main_stream) {
  8323. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8324. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8325. float negative_slope;
  8326. memcpy(&negative_slope, dst->op_params, sizeof(float));
  8327. leaky_relu_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), negative_slope, main_stream);
  8328. (void) src1;
  8329. (void) dst;
  8330. (void) src1_dd;
  8331. }
  8332. inline void ggml_sycl_op_sqr(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  8333. ggml_tensor *dst, const float *src0_dd,
  8334. const float *src1_dd, float *dst_dd,
  8335. const queue_ptr &main_stream) {
  8336. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8337. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8338. sqr_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream);
  8339. (void) src1;
  8340. (void) dst;
  8341. (void) src1_dd;
  8342. }
  8343. inline void ggml_sycl_op_norm(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  8344. ggml_tensor *dst, const float *src0_dd,
  8345. const float *src1_dd, float *dst_dd,
  8346. const queue_ptr &main_stream) {
  8347. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8348. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8349. const int64_t ne00 = src0->ne[0];
  8350. const int64_t nrows = ggml_nrows(src0);
  8351. float eps;
  8352. memcpy(&eps, dst->op_params, sizeof(float));
  8353. norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream);
  8354. (void) src1;
  8355. (void) dst;
  8356. (void) src1_dd;
  8357. }
  8358. inline void ggml_sycl_op_group_norm(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8359. const ggml_tensor *src1, ggml_tensor *dst,
  8360. const float *src0_dd, const float *src1_dd,
  8361. float *dst_dd,
  8362. const queue_ptr &main_stream) {
  8363. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8364. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8365. int num_groups = dst->op_params[0];
  8366. int group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups);
  8367. group_norm_f32_sycl(src0_dd, dst_dd, num_groups, group_size, src0->ne[0] * src0->ne[1] * src0->ne[2], main_stream);
  8368. (void) src1;
  8369. (void) dst;
  8370. (void) src1_dd;
  8371. }
  8372. inline void ggml_sycl_op_concat(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8373. const ggml_tensor *src1, ggml_tensor *dst,
  8374. const float *src0_dd, const float *src1_dd,
  8375. float *dst_dd,
  8376. const queue_ptr &main_stream) {
  8377. #pragma message("TODO: generalize concat kernel for dim != 2")
  8378. #pragma message(" https://github.com/ggerganov/llama.cpp/pull/7563")
  8379. int dim = dst->op_params[0];
  8380. GGML_ASSERT(dim == 2);
  8381. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8382. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  8383. GGML_ASSERT(dst->type == GGML_TYPE_F32);
  8384. for (int i3 = 0; i3 < dst->ne[3]; i3++) {
  8385. concat_f32_sycl(src0_dd + i3 * (src0->nb[3] / 4), src1_dd + i3 * (src1->nb[3] / 4), dst_dd + i3 * (dst->nb[3] / 4), dst->ne[0], dst->ne[1], dst->ne[2], src0->ne[2], main_stream);
  8386. }
  8387. (void) src1;
  8388. (void) dst;
  8389. }
  8390. inline void ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8391. const ggml_tensor *src1, ggml_tensor *dst,
  8392. const float *src0_dd, const float *src1_dd,
  8393. float *dst_dd,
  8394. const queue_ptr &main_stream) {
  8395. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8396. GGML_ASSERT(dst->type == GGML_TYPE_F32);
  8397. const float sf0 = (float)dst->ne[0]/src0->ne[0];
  8398. const float sf1 = (float)dst->ne[1]/src0->ne[1];
  8399. const float sf2 = (float)dst->ne[2]/src0->ne[2];
  8400. const float sf3 = (float)dst->ne[3]/src0->ne[3];
  8401. upscale_f32_sycl(src0_dd, dst_dd, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3],
  8402. dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3,
  8403. main_stream);
  8404. (void) src1;
  8405. (void) dst;
  8406. (void) src1_dd;
  8407. }
  8408. inline void ggml_sycl_op_pad(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  8409. ggml_tensor *dst, const float *src0_dd,
  8410. const float *src1_dd, float *dst_dd,
  8411. const queue_ptr &main_stream) {
  8412. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8413. GGML_ASSERT(dst->type == GGML_TYPE_F32);
  8414. GGML_ASSERT(src0->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors
  8415. pad_f32_sycl(src0_dd, dst_dd,
  8416. src0->ne[0], src0->ne[1], src0->ne[2],
  8417. dst->ne[0], dst->ne[1], dst->ne[2], main_stream);
  8418. (void) src1;
  8419. (void) dst;
  8420. (void) src1_dd;
  8421. }
  8422. inline void ggml_sycl_op_rms_norm(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8423. const ggml_tensor *src1, ggml_tensor *dst,
  8424. const float *src0_dd, const float *src1_dd,
  8425. float *dst_dd,
  8426. const queue_ptr &main_stream) {
  8427. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8428. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8429. const int64_t ne00 = src0->ne[0];
  8430. const int64_t nrows = ggml_nrows(src0);
  8431. float eps;
  8432. memcpy(&eps, dst->op_params, sizeof(float));
  8433. rms_norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream);
  8434. (void) src1;
  8435. (void) dst;
  8436. (void) src1_dd;
  8437. }
  8438. inline void ggml_sycl_op_mul_mat_q(
  8439. ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst,
  8440. const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i,
  8441. float *dst_dd_i, const int64_t row_low, const int64_t row_high,
  8442. const int64_t src1_ncols, const int64_t src1_padded_row_size,
  8443. const queue_ptr &stream) try {
  8444. const int64_t ne00 = src0->ne[0];
  8445. const int64_t ne10 = src1->ne[0];
  8446. GGML_ASSERT(ne10 % QK8_1 == 0);
  8447. const int64_t ne0 = dst->ne[0];
  8448. const int64_t row_diff = row_high - row_low;
  8449. int device_id;
  8450. SYCL_CHECK(
  8451. CHECK_TRY_ERROR(device_id = get_current_device_id()));
  8452. // the main device has a larger memory buffer to hold the results from all GPUs
  8453. // nrows_dst == nrows of the matrix that the dequantize_mul_mat kernel writes into
  8454. const int64_t nrows_dst = device_id == ctx.device ? ne0 : row_diff;
  8455. switch (src0->type) {
  8456. case GGML_TYPE_Q4_0:
  8457. ggml_mul_mat_q4_0_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
  8458. break;
  8459. case GGML_TYPE_Q4_1:
  8460. ggml_mul_mat_q4_1_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
  8461. break;
  8462. case GGML_TYPE_Q5_0:
  8463. ggml_mul_mat_q5_0_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
  8464. break;
  8465. case GGML_TYPE_Q5_1:
  8466. ggml_mul_mat_q5_1_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
  8467. break;
  8468. case GGML_TYPE_Q8_0:
  8469. ggml_mul_mat_q8_0_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
  8470. break;
  8471. case GGML_TYPE_Q2_K:
  8472. ggml_mul_mat_q2_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
  8473. break;
  8474. case GGML_TYPE_Q3_K:
  8475. ggml_mul_mat_q3_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
  8476. break;
  8477. case GGML_TYPE_Q4_K:
  8478. ggml_mul_mat_q4_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
  8479. break;
  8480. case GGML_TYPE_Q5_K:
  8481. ggml_mul_mat_q5_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
  8482. break;
  8483. case GGML_TYPE_Q6_K:
  8484. ggml_mul_mat_q6_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
  8485. break;
  8486. default:
  8487. GGML_ASSERT(false);
  8488. break;
  8489. }
  8490. (void) src1;
  8491. (void) dst;
  8492. (void) src1_ddf_i;
  8493. }
  8494. catch (sycl::exception const &exc) {
  8495. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  8496. << ", line:" << __LINE__ << std::endl;
  8497. std::exit(1);
  8498. }
  8499. static int64_t get_row_rounding(ggml_type type, const std::array<float, GGML_SYCL_MAX_DEVICES> & tensor_split) {
  8500. int64_t min_compute_capability = INT_MAX;
  8501. int64_t max_compute_capability = INT_MIN;
  8502. for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
  8503. if (tensor_split[i] < (i + 1 < ggml_sycl_info().device_count ? tensor_split[i + 1] : 1.0f)) {
  8504. if (min_compute_capability > ggml_sycl_info().devices[i].cc) {
  8505. min_compute_capability = ggml_sycl_info().devices[i].cc;
  8506. }
  8507. if (max_compute_capability < ggml_sycl_info().devices[i].cc) {
  8508. max_compute_capability = ggml_sycl_info().devices[i].cc;
  8509. }
  8510. }
  8511. }
  8512. switch(type) {
  8513. case GGML_TYPE_Q4_0:
  8514. case GGML_TYPE_Q4_1:
  8515. return max_compute_capability >= VER_GEN9 ? 128 : 64;
  8516. case GGML_TYPE_Q5_0:
  8517. case GGML_TYPE_Q5_1:
  8518. case GGML_TYPE_Q8_0:
  8519. return 64;
  8520. case GGML_TYPE_F16:
  8521. case GGML_TYPE_F32:
  8522. return 1;
  8523. case GGML_TYPE_Q2_K:
  8524. case GGML_TYPE_Q3_K:
  8525. case GGML_TYPE_Q4_K:
  8526. case GGML_TYPE_Q5_K:
  8527. case GGML_TYPE_IQ2_XXS:
  8528. case GGML_TYPE_IQ2_XS:
  8529. case GGML_TYPE_IQ2_S:
  8530. case GGML_TYPE_IQ1_S:
  8531. case GGML_TYPE_IQ1_M:
  8532. case GGML_TYPE_IQ3_XXS:
  8533. case GGML_TYPE_IQ4_XS:
  8534. case GGML_TYPE_IQ4_NL:
  8535. return max_compute_capability >= VER_GEN9 ? 128 : 64;
  8536. case GGML_TYPE_IQ3_S:
  8537. return max_compute_capability >= VER_GEN9 ? 128 : 64;
  8538. case GGML_TYPE_Q6_K:
  8539. return 64;
  8540. default:
  8541. GGML_ASSERT(false);
  8542. }
  8543. }
  8544. inline void ggml_sycl_op_mul_mat_vec_q(
  8545. ggml_backend_sycl_context & ctx,
  8546. const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst,
  8547. const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i,
  8548. float *dst_dd_i, const int64_t row_low, const int64_t row_high,
  8549. const int64_t src1_ncols, const int64_t src1_padded_row_size,
  8550. const queue_ptr &stream) {
  8551. const int64_t ne10 = src1->ne[0];
  8552. GGML_ASSERT(ne10 % QK8_1 == 0);
  8553. const int64_t ne00 = src0->ne[0];
  8554. const int64_t row_diff = row_high - row_low;
  8555. int id;
  8556. SYCL_CHECK(
  8557. CHECK_TRY_ERROR(id = get_current_device_id()));
  8558. // the main device has a larger memory buffer to hold the results from all GPUs
  8559. // nrows_dst == nrows of the matrix that the kernel writes into
  8560. const int64_t nrows_dst = id == ctx.device ? ne00 : row_diff;
  8561. switch (src0->type) {
  8562. case GGML_TYPE_Q4_0:
  8563. mul_mat_vec_q4_0_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8564. break;
  8565. case GGML_TYPE_Q4_1:
  8566. mul_mat_vec_q4_1_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8567. break;
  8568. case GGML_TYPE_Q5_0:
  8569. mul_mat_vec_q5_0_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8570. break;
  8571. case GGML_TYPE_Q5_1:
  8572. mul_mat_vec_q5_1_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8573. break;
  8574. case GGML_TYPE_Q8_0:
  8575. mul_mat_vec_q8_0_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8576. break;
  8577. case GGML_TYPE_Q2_K:
  8578. mul_mat_vec_q2_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8579. break;
  8580. case GGML_TYPE_Q3_K:
  8581. mul_mat_vec_q3_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8582. break;
  8583. case GGML_TYPE_Q4_K:
  8584. mul_mat_vec_q4_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8585. break;
  8586. case GGML_TYPE_Q5_K:
  8587. mul_mat_vec_q5_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8588. break;
  8589. case GGML_TYPE_Q6_K:
  8590. mul_mat_vec_q6_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8591. break;
  8592. case GGML_TYPE_IQ1_S:
  8593. mul_mat_vec_iq1_s_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8594. break;
  8595. case GGML_TYPE_IQ1_M:
  8596. mul_mat_vec_iq1_m_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8597. break;
  8598. case GGML_TYPE_IQ2_XXS:
  8599. mul_mat_vec_iq2_xxs_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8600. break;
  8601. case GGML_TYPE_IQ2_XS:
  8602. mul_mat_vec_iq2_xs_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8603. break;
  8604. case GGML_TYPE_IQ2_S:
  8605. mul_mat_vec_iq2_s_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8606. break;
  8607. case GGML_TYPE_IQ3_XXS:
  8608. mul_mat_vec_iq3_xxs_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8609. break;
  8610. case GGML_TYPE_IQ3_S:
  8611. mul_mat_vec_iq3_s_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8612. break;
  8613. case GGML_TYPE_IQ4_NL:
  8614. mul_mat_vec_iq4_nl_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8615. break;
  8616. case GGML_TYPE_IQ4_XS:
  8617. mul_mat_vec_iq4_xs_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
  8618. break;
  8619. default:
  8620. GGML_ASSERT(false);
  8621. break;
  8622. }
  8623. (void) src1;
  8624. (void) dst;
  8625. (void) src1_ddf_i;
  8626. (void) src1_ncols;
  8627. (void) src1_padded_row_size;
  8628. }
  8629. inline void ggml_sycl_op_dequantize_mul_mat_vec(
  8630. ggml_backend_sycl_context & ctx,
  8631. const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst,
  8632. const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i,
  8633. float *dst_dd_i, const int64_t row_low, const int64_t row_high,
  8634. const int64_t src1_ncols, const int64_t src1_padded_row_size,
  8635. const queue_ptr &stream) {
  8636. const int64_t ne00 = src0->ne[0];
  8637. const int64_t row_diff = row_high - row_low;
  8638. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  8639. // on some GPUs it is faster to convert src1 to half and to use half precision intrinsics
  8640. #ifdef GGML_SYCL_F16
  8641. ggml_sycl_pool_alloc<sycl::half> src1_dfloat_a(ctx.pool());
  8642. sycl::half *src1_dfloat = nullptr; // dfloat == half
  8643. bool src1_convert_f16 =
  8644. src0->type == GGML_TYPE_Q4_0 || src0->type == GGML_TYPE_Q4_1 ||
  8645. src0->type == GGML_TYPE_Q5_0 || src0->type == GGML_TYPE_Q5_1 ||
  8646. src0->type == GGML_TYPE_Q8_0 || src0->type == GGML_TYPE_F16;
  8647. if (src1_convert_f16) {
  8648. src1_dfloat = src1_dfloat_a.alloc(ne00);
  8649. const to_fp16_sycl_t to_fp16_sycl = ggml_get_to_fp16_sycl(src1->type);
  8650. GGML_ASSERT(to_fp16_sycl != nullptr);
  8651. to_fp16_sycl(src1_ddf_i, src1_dfloat, ne00, stream);
  8652. }
  8653. #else
  8654. const dfloat * src1_dfloat = (const dfloat *) src1_ddf_i; // dfloat == float, no conversion
  8655. #endif // GGML_SYCL_F16
  8656. switch (src0->type) {
  8657. case GGML_TYPE_Q4_0:
  8658. dequantize_mul_mat_vec_q4_0_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
  8659. break;
  8660. case GGML_TYPE_Q4_1:
  8661. dequantize_mul_mat_vec_q4_1_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
  8662. break;
  8663. case GGML_TYPE_Q5_0:
  8664. dequantize_mul_mat_vec_q5_0_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
  8665. break;
  8666. case GGML_TYPE_Q5_1:
  8667. dequantize_mul_mat_vec_q5_1_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
  8668. break;
  8669. case GGML_TYPE_Q8_0:
  8670. dequantize_mul_mat_vec_q8_0_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
  8671. break;
  8672. case GGML_TYPE_Q2_K:
  8673. dequantize_mul_mat_vec_q2_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
  8674. break;
  8675. case GGML_TYPE_Q3_K:
  8676. dequantize_mul_mat_vec_q3_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
  8677. break;
  8678. case GGML_TYPE_Q4_K:
  8679. dequantize_mul_mat_vec_q4_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
  8680. break;
  8681. case GGML_TYPE_Q5_K:
  8682. dequantize_mul_mat_vec_q5_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
  8683. break;
  8684. case GGML_TYPE_Q6_K:
  8685. dequantize_mul_mat_vec_q6_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
  8686. break;
  8687. case GGML_TYPE_F16:
  8688. convert_mul_mat_vec_f16_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
  8689. break;
  8690. default:
  8691. printf("ggml_sycl_op_dequantize_mul_mat_vec unsupported GGML_TYPE %d\n", src0->type);
  8692. GGML_ASSERT(false);
  8693. break;
  8694. }
  8695. (void) src1;
  8696. (void) dst;
  8697. (void) src1_ddq_i;
  8698. (void) src1_ncols;
  8699. (void) src1_padded_row_size;
  8700. }
  8701. inline void ggml_sycl_op_mul_mat_sycl(
  8702. ggml_backend_sycl_context & ctx,
  8703. const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst,
  8704. const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i,
  8705. float *dst_dd_i, const int64_t row_low, const int64_t row_high,
  8706. const int64_t src1_ncols, const int64_t src1_padded_row_size,
  8707. const queue_ptr &stream) try {
  8708. GGML_ASSERT(src0_dd_i != nullptr);
  8709. GGML_ASSERT(src1_ddf_i != nullptr);
  8710. GGML_ASSERT(dst_dd_i != nullptr);
  8711. const int64_t ne00 = src0->ne[0];
  8712. const int64_t ne10 = src1->ne[0];
  8713. const int64_t ne0 = dst->ne[0];
  8714. const int64_t row_diff = row_high - row_low;
  8715. int id;
  8716. SYCL_CHECK(
  8717. CHECK_TRY_ERROR(id = get_current_device_id()));
  8718. // the main device has a larger memory buffer to hold the results from all GPUs
  8719. // ldc == nrows of the matrix that cuBLAS writes into
  8720. int ldc = id == ctx.device ? ne0 : row_diff;
  8721. #ifdef GGML_SYCL_F16
  8722. bool use_fp16 = true; // TODO(Yu) SYCL capability check
  8723. #else
  8724. bool use_fp16 = false;
  8725. #endif
  8726. if ((src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) &&
  8727. use_fp16 && ggml_is_contiguous(src0) && row_diff == src0->ne[1] &&
  8728. dst->op_params[0] == GGML_PREC_DEFAULT) {
  8729. // GGML_SYCL_DEBUG("ggml_sycl_op_mul_mat_sycl - fp16 path\n");
  8730. ggml_sycl_pool_alloc<sycl::half> src0_as_f16(ctx.pool());
  8731. if (src0->type != GGML_TYPE_F16) {
  8732. const to_fp16_sycl_t to_fp16_sycl = ggml_get_to_fp16_sycl(src0->type);
  8733. GGML_ASSERT(to_fp16_sycl != nullptr);
  8734. size_t ne = row_diff*ne00;
  8735. src0_as_f16.alloc(ne);
  8736. to_fp16_sycl(src0_dd_i, src0_as_f16.get(), ne, stream);
  8737. }
  8738. const sycl::half *src0_ptr = src0->type == GGML_TYPE_F16
  8739. ? (const sycl::half *)src0_dd_i
  8740. : src0_as_f16.get();
  8741. ggml_sycl_pool_alloc<sycl::half> src1_as_f16(ctx.pool());
  8742. if (src1->type != GGML_TYPE_F16) {
  8743. const to_fp16_sycl_t to_fp16_sycl = ggml_get_to_fp16_sycl(src1->type);
  8744. GGML_ASSERT(to_fp16_sycl != nullptr);
  8745. size_t ne = src1_ncols*ne10;
  8746. src1_as_f16.alloc(ne);
  8747. to_fp16_sycl(src1_ddf_i, src1_as_f16.get(), ne, stream);
  8748. }
  8749. const sycl::half *src1_ptr = src1->type == GGML_TYPE_F16
  8750. ? (const sycl::half *)src1->data + src1_padded_row_size
  8751. : src1_as_f16.get();
  8752. ggml_sycl_pool_alloc<sycl::half> dst_f16(ctx.pool(), row_diff * src1_ncols);
  8753. const sycl::half alpha_f16 = 1.0f;
  8754. const sycl::half beta_f16 = 0.0f;
  8755. SYCL_CHECK(CHECK_TRY_ERROR(dpct::gemm(
  8756. *stream, oneapi::mkl::transpose::trans,
  8757. oneapi::mkl::transpose::nontrans, row_diff, src1_ncols, ne10,
  8758. &alpha_f16, src0_ptr, dpct::library_data_t::real_half, ne00,
  8759. src1_ptr, dpct::library_data_t::real_half, ne10, &beta_f16,
  8760. dst_f16.get(), dpct::library_data_t::real_half, ldc,
  8761. dpct::library_data_t::real_half)));
  8762. const to_fp32_sycl_t to_fp32_sycl = ggml_get_to_fp32_sycl(GGML_TYPE_F16);
  8763. to_fp32_sycl(dst_f16.get(), dst_dd_i, row_diff*src1_ncols, stream);
  8764. }
  8765. else {
  8766. // GGML_SYCL_DEBUG("ggml_sycl_op_mul_mat_sycl - fp32 path\n");
  8767. ggml_sycl_pool_alloc<float> src0_ddq_as_f32(ctx.pool());
  8768. ggml_sycl_pool_alloc<float> src1_ddq_as_f32(ctx.pool());
  8769. if (src0->type != GGML_TYPE_F32) {
  8770. const to_fp32_sycl_t to_fp32_sycl = ggml_get_to_fp32_sycl(src0->type);
  8771. GGML_ASSERT(to_fp32_sycl != nullptr);
  8772. src0_ddq_as_f32.alloc(row_diff*ne00);
  8773. to_fp32_sycl(src0_dd_i, src0_ddq_as_f32.get(), row_diff*ne00, stream);
  8774. }
  8775. if (src1->type != GGML_TYPE_F32) {
  8776. const to_fp32_sycl_t to_fp32_sycl = ggml_get_to_fp32_sycl(src1->type);
  8777. GGML_ASSERT(to_fp32_sycl != nullptr);
  8778. src1_ddq_as_f32.alloc(src1_ncols*ne10);
  8779. to_fp32_sycl(src1_ddf_i, src1_ddq_as_f32.get(), src1_ncols*ne10, stream);
  8780. }
  8781. const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32.get();
  8782. const float * src1_ddf1_i = src1->type == GGML_TYPE_F32 ? (const float *) src1_ddf_i : src1_ddq_as_f32.get();
  8783. const float alpha = 1.0f;
  8784. const float beta = 0.0f;
  8785. SYCL_CHECK(CHECK_TRY_ERROR(oneapi::mkl::blas::column_major::gemm(
  8786. *stream, oneapi::mkl::transpose::trans,
  8787. oneapi::mkl::transpose::nontrans, row_diff, src1_ncols, ne10,
  8788. dpct::get_value(&alpha, *stream), src0_ddf_i, ne00,
  8789. src1_ddf1_i, ne10, dpct::get_value(&beta, *stream),
  8790. dst_dd_i, ldc)));
  8791. }
  8792. (void) dst;
  8793. (void) src1_ddq_i;
  8794. (void) src1_padded_row_size;
  8795. }
  8796. catch (sycl::exception const &exc) {
  8797. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  8798. << ", line:" << __LINE__ << std::endl;
  8799. std::exit(1);
  8800. }
  8801. inline void ggml_sycl_op_rope(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  8802. ggml_tensor *dst, const float *src0_dd,
  8803. const float *src1_dd, float *dst_dd,
  8804. const queue_ptr &main_stream) {
  8805. const ggml_tensor * src2 = dst->src[2];
  8806. GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16);
  8807. GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
  8808. GGML_ASSERT(src0->type == dst->type);
  8809. const int64_t ne00 = src0->ne[0];
  8810. const int64_t ne01 = src0->ne[1];
  8811. const int64_t ne2 = dst->ne[2];
  8812. const int64_t nrows = ggml_nrows(src0);
  8813. //const int n_past = ((int32_t *) dst->op_params)[0];
  8814. const int n_dims = ((int32_t *) dst->op_params)[1];
  8815. const int mode = ((int32_t *) dst->op_params)[2];
  8816. //const int n_ctx = ((int32_t *) dst->op_params)[3];
  8817. const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
  8818. // RoPE alteration for extended context
  8819. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  8820. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  8821. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  8822. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  8823. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  8824. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  8825. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  8826. const float * freq_factors = nullptr;
  8827. const int32_t * pos = nullptr;
  8828. if ((mode & 1) == 0) {
  8829. GGML_ASSERT(src1->type == GGML_TYPE_I32);
  8830. GGML_ASSERT(src1->ne[0] == ne2);
  8831. pos = (const int32_t *) src1_dd;
  8832. }
  8833. const bool is_neox = mode & 2;
  8834. #pragma message("TODO: update rope NORM mode to match NEOX mode")
  8835. #pragma message(" https://github.com/ggerganov/llama.cpp/pull/7634")
  8836. if (is_neox) {
  8837. pos = (const int32_t *) src1_dd;
  8838. if (src2 != nullptr) {
  8839. freq_factors = (const float *) src2->data;
  8840. }
  8841. } else {
  8842. GGML_ASSERT(src2 == nullptr && "TODO: freq_factors not implemented for !is_neox");
  8843. }
  8844. rope_corr_dims corr_dims;
  8845. ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims.v);
  8846. // compute
  8847. if (is_neox) {
  8848. if (src0->type == GGML_TYPE_F32) {
  8849. rope_neox_sycl(
  8850. (const float *)src0_dd, (float *)dst_dd, ne00, n_dims, nrows, pos, freq_scale, ne01, freq_base, ext_factor,
  8851. attn_factor, corr_dims, freq_factors, main_stream
  8852. );
  8853. } else if (src0->type == GGML_TYPE_F16) {
  8854. rope_neox_sycl((const sycl::half *)src0_dd, (sycl::half *)dst_dd,
  8855. ne00, n_dims, nrows, pos, freq_scale, ne01,
  8856. freq_base, ext_factor, attn_factor, corr_dims,
  8857. freq_factors, main_stream);
  8858. } else {
  8859. GGML_ASSERT(false);
  8860. }
  8861. } else {
  8862. if (src0->type == GGML_TYPE_F32) {
  8863. rope_sycl(
  8864. (const float *)src0_dd, (float *)dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor,
  8865. attn_factor, corr_dims, main_stream
  8866. );
  8867. } else if (src0->type == GGML_TYPE_F16) {
  8868. rope_sycl((const sycl::half *)src0_dd, (sycl::half *)dst_dd, ne00,
  8869. nrows, pos, freq_scale, ne01, freq_base, ext_factor,
  8870. attn_factor, corr_dims, main_stream);
  8871. } else {
  8872. GGML_ASSERT(false);
  8873. }
  8874. }
  8875. (void) src1;
  8876. (void) dst;
  8877. (void) src1_dd;
  8878. }
  8879. static void ggml_sycl_op_pool2d(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8880. const ggml_tensor *src1, ggml_tensor *dst,
  8881. const float *src0_dd, const float *src1_dd,
  8882. float *dst_dd, const queue_ptr &main_stream) {
  8883. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8884. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8885. const int32_t * opts = (const int32_t *)dst->op_params;
  8886. enum ggml_op_pool op = static_cast<ggml_op_pool>(opts[0]);
  8887. const int k0 = opts[1];
  8888. const int k1 = opts[2];
  8889. const int s0 = opts[3];
  8890. const int s1 = opts[4];
  8891. const int p0 = opts[5];
  8892. const int p1 = opts[6];
  8893. const int64_t IH = src0->ne[1];
  8894. const int64_t IW = src0->ne[0];
  8895. const int64_t N = dst->ne[3];
  8896. const int64_t OC = dst->ne[2];
  8897. const int64_t OH = dst->ne[1];
  8898. const int64_t OW = dst->ne[0];
  8899. const int parallel_elements = N * OC * OH * OW;
  8900. const int num_blocks = (parallel_elements + SYCL_POOL2D_BLOCK_SIZE - 1) / SYCL_POOL2D_BLOCK_SIZE;
  8901. sycl::range<3> block_nums(1, 1, num_blocks);
  8902. main_stream->parallel_for(
  8903. sycl::nd_range<3>(block_nums *
  8904. sycl::range<3>(1, 1, SYCL_IM2COL_BLOCK_SIZE),
  8905. sycl::range<3>(1, 1, SYCL_IM2COL_BLOCK_SIZE)),
  8906. [=](sycl::nd_item<3> item_ct1) {
  8907. pool2d_nchw_kernel(IH, IW, OH, OW, k1, k0, s1, s0, p1, p0,
  8908. parallel_elements, src0_dd, dst_dd, op,
  8909. item_ct1);
  8910. });
  8911. (void) src1;
  8912. (void) src1_dd;
  8913. }
  8914. inline void ggml_sycl_op_im2col(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8915. const ggml_tensor *src1, ggml_tensor *dst,
  8916. const float *src0_dd, const float *src1_dd,
  8917. float *dst_dd,
  8918. const queue_ptr &main_stream) {
  8919. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  8920. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  8921. GGML_ASSERT( dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32);
  8922. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  8923. const int32_t s1 = ((const int32_t*)(dst->op_params))[1];
  8924. const int32_t p0 = ((const int32_t*)(dst->op_params))[2];
  8925. const int32_t p1 = ((const int32_t*)(dst->op_params))[3];
  8926. const int32_t d0 = ((const int32_t*)(dst->op_params))[4];
  8927. const int32_t d1 = ((const int32_t*)(dst->op_params))[5];
  8928. const bool is_2D = ((const int32_t*)(dst->op_params))[6] == 1;
  8929. const int64_t IC = src1->ne[is_2D ? 2 : 1];
  8930. const int64_t IH = is_2D ? src1->ne[1] : 1;
  8931. const int64_t IW = src1->ne[0];
  8932. const int64_t KH = is_2D ? src0->ne[1] : 1;
  8933. const int64_t KW = src0->ne[0];
  8934. const int64_t OH = is_2D ? dst->ne[2] : 1;
  8935. const int64_t OW = dst->ne[1];
  8936. const size_t delta_offset = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32
  8937. if (dst->type == GGML_TYPE_F16) {
  8938. im2col_sycl(src1_dd, (sycl::half *)dst_dd, IW, IH, OW, OH, KW, KH, IC, delta_offset, s0, s1, p0, p1, d0, d1, main_stream);
  8939. } else {
  8940. im2col_sycl(src1_dd, (float *)dst_dd, IW, IH, OW, OH, KW, KH, IC, delta_offset, s0, s1, p0, p1, d0, d1, main_stream);
  8941. }
  8942. (void) src0;
  8943. (void) src0_dd;
  8944. }
  8945. inline void ggml_sycl_op_sum_rows(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8946. const ggml_tensor *src1, ggml_tensor *dst,
  8947. const float *src0_dd, const float *src1_dd,
  8948. float *dst_dd,
  8949. const queue_ptr &main_stream) {
  8950. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8951. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8952. const int64_t ncols = src0->ne[0];
  8953. const int64_t nrows = ggml_nrows(src0);
  8954. sum_rows_f32_sycl(src0_dd, dst_dd, ncols, nrows, main_stream);
  8955. (void) src1;
  8956. (void) dst;
  8957. (void) src1_dd;
  8958. }
  8959. inline void ggml_sycl_op_argsort(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8960. const ggml_tensor *src1, ggml_tensor *dst,
  8961. const float *src0_dd, const float *src1_dd,
  8962. float *dst_dd,
  8963. const queue_ptr &main_stream) {
  8964. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8965. GGML_ASSERT( dst->type == GGML_TYPE_I32);
  8966. const int64_t ncols = src0->ne[0];
  8967. const int64_t nrows = ggml_nrows(src0);
  8968. enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0];
  8969. argsort_f32_i32_sycl(src0_dd, (int *)dst_dd, ncols, nrows, order, main_stream);
  8970. (void) src1;
  8971. (void) dst;
  8972. (void) src1_dd;
  8973. }
  8974. inline void ggml_sycl_op_diag_mask_inf(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8975. const ggml_tensor *src1,
  8976. ggml_tensor *dst, const float *src0_dd,
  8977. const float *src1_dd, float *dst_dd,
  8978. const queue_ptr &main_stream) {
  8979. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8980. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8981. const int64_t ne00 = src0->ne[0];
  8982. const int64_t ne01 = src0->ne[1];
  8983. const int nrows0 = ggml_nrows(src0);
  8984. const int n_past = ((int32_t *) dst->op_params)[0];
  8985. diag_mask_inf_f32_sycl(src0_dd, dst_dd, ne00, nrows0, ne01, n_past, main_stream);
  8986. (void) src1;
  8987. (void) dst;
  8988. (void) src1_dd;
  8989. }
  8990. inline void ggml_sycl_op_soft_max(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  8991. const ggml_tensor *src1, ggml_tensor *dst,
  8992. const float *src0_dd, const float *src1_dd,
  8993. float *dst_dd,
  8994. const queue_ptr &main_stream) {
  8995. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  8996. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  8997. #pragma message("TODO: add ggml_sycl_op_soft_max() F16 src1 support")
  8998. #pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5021")
  8999. GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional
  9000. const int64_t ne00 = src0->ne[0];
  9001. const int64_t nrows_x = ggml_nrows(src0);
  9002. const int64_t nrows_y = src0->ne[1];
  9003. float scale = 1.0f;
  9004. float max_bias = 0.0f;
  9005. memcpy(&scale, dst->op_params + 0, sizeof(float));
  9006. memcpy(&max_bias, dst->op_params + 1, sizeof(float));
  9007. soft_max_f32_sycl(src0_dd, src1 ? src1_dd : nullptr, dst_dd, ne00,
  9008. nrows_x, nrows_y, scale, max_bias, main_stream);
  9009. }
  9010. inline void ggml_sycl_op_scale(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  9011. ggml_tensor *dst, const float *src0_dd,
  9012. const float *src1_dd, float *dst_dd,
  9013. const queue_ptr &main_stream) {
  9014. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  9015. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  9016. float scale;
  9017. memcpy(&scale, dst->op_params, sizeof(float));
  9018. scale_f32_sycl(src0_dd, dst_dd, scale, ggml_nelements(src0), main_stream);
  9019. /*
  9020. DPCT1010:87: SYCL uses exceptions to report errors and does not use the
  9021. error codes. The call was replaced with 0. You need to rewrite this code.
  9022. */
  9023. SYCL_CHECK(0);
  9024. (void) src1;
  9025. (void) dst;
  9026. (void) src1_dd;
  9027. }
  9028. inline void ggml_sycl_op_clamp(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  9029. ggml_tensor *dst, const float *src0_dd,
  9030. const float *src1_dd, float *dst_dd,
  9031. const queue_ptr &main_stream) {
  9032. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  9033. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  9034. float min;
  9035. float max;
  9036. memcpy(&min, dst->op_params, sizeof(float));
  9037. memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
  9038. clamp_f32_sycl(src0_dd, dst_dd, min, max, ggml_nelements(src0), main_stream);
  9039. /*
  9040. DPCT1010:88: SYCL uses exceptions to report errors and does not use the
  9041. error codes. The call was replaced with 0. You need to rewrite this code.
  9042. */
  9043. SYCL_CHECK(0);
  9044. (void) src1;
  9045. (void) dst;
  9046. (void) src1_dd;
  9047. }
  9048. static void ggml_sycl_op_flatten(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  9049. const ggml_tensor *src1, ggml_tensor *dst,
  9050. const ggml_sycl_op_flatten_t op) try {
  9051. const int64_t nrows0 = ggml_nrows(src0);
  9052. const bool use_src1 = src1 != nullptr;
  9053. const int64_t nrows1 = use_src1 ? ggml_nrows(src1) : 1;
  9054. GGML_ASSERT(!use_src1 || src1->backend != GGML_BACKEND_TYPE_GPU_SPLIT);
  9055. GGML_ASSERT( dst->backend != GGML_BACKEND_TYPE_GPU_SPLIT);
  9056. ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
  9057. ggml_tensor_extra_gpu * src1_extra = use_src1 ? (ggml_tensor_extra_gpu *) src1->extra : nullptr;
  9058. ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
  9059. // dd = data device
  9060. float * src0_ddf = (float *) src0->data;
  9061. float * src1_ddf = use_src1 ? (float *) src1->data : nullptr;
  9062. float * dst_ddf = (float *) dst->data;
  9063. ggml_sycl_pool_alloc<float> src0_f(ctx.pool());
  9064. ggml_sycl_pool_alloc<float> src1_f(ctx.pool());
  9065. ggml_sycl_pool_alloc<float> dst_f(ctx.pool());
  9066. ggml_sycl_set_device(ctx.device);
  9067. queue_ptr main_stream = ctx.stream();
  9068. // GGML_SYCL_DEBUG("ctx.device=%d, main_stream=%p src0_on_device=%d, src1_on_device=%d, dst_on_device=%d\n",
  9069. // ctx.device, main_stream, src0_on_device, src1_on_device, dst_on_device);
  9070. // do the computation
  9071. op(ctx, src0, src1, dst, src0_ddf, src1_ddf, dst_ddf, main_stream);
  9072. // print_ggml_tensor("tensor", dst);
  9073. }
  9074. catch (sycl::exception const &exc) {
  9075. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  9076. << ", line:" << __LINE__ << std::endl;
  9077. std::exit(1);
  9078. }
  9079. static void ggml_sycl_set_peer_access(const int n_tokens, int main_device) {
  9080. static bool peer_access_enabled = false;
  9081. const bool enable_peer_access = n_tokens <= GGML_SYCL_PEER_MAX_BATCH_SIZE;
  9082. if (peer_access_enabled == enable_peer_access) {
  9083. return;
  9084. }
  9085. #ifdef NDEBUG
  9086. for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
  9087. SYCL_CHECK(ggml_sycl_set_device(i));
  9088. }
  9089. for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
  9090. SYCL_CHECK(ggml_sycl_set_device(i));
  9091. for (int id_other = 0; id_other < ggml_sycl_info().device_count; ++id_other) {
  9092. if (i == id_other) {
  9093. continue;
  9094. }
  9095. if (i != main_device && id_other != main_device) {
  9096. continue;
  9097. }
  9098. // int can_access_peer;
  9099. // SYCL_CHECK(syclDeviceCanAccessPeer(&can_access_peer, id, id_other));
  9100. // if (can_access_peer) {
  9101. // if (enable_peer_access) {
  9102. // SYCL_CHECK(syclDeviceEnablePeerAccess(id_other, 0));
  9103. // } else {
  9104. // SYCL_CHECK(syclDeviceDisablePeerAccess(id_other));
  9105. // }
  9106. // }
  9107. }
  9108. }
  9109. #endif // NDEBUG
  9110. peer_access_enabled = enable_peer_access;
  9111. }
  9112. struct ggml_backend_sycl_split_buffer_type_context {
  9113. std::array<float, GGML_SYCL_MAX_DEVICES> tensor_split;
  9114. };
  9115. static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  9116. const ggml_tensor *src1, ggml_tensor *dst,
  9117. ggml_sycl_op_mul_mat_t op,
  9118. const bool convert_src1_to_q8_1) try {
  9119. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
  9120. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
  9121. const int64_t nrows1 = ggml_nrows(src1);
  9122. GGML_ASSERT(ne03 == ne13);
  9123. const int64_t ne0 = dst->ne[0];
  9124. const int64_t ne1 = dst->ne[1];
  9125. const int nb2 = dst->nb[2];
  9126. const int nb3 = dst->nb[3];
  9127. GGML_ASSERT(dst->backend != GGML_BACKEND_TYPE_GPU_SPLIT);
  9128. GGML_ASSERT(src1->backend != GGML_BACKEND_TYPE_GPU_SPLIT);
  9129. GGML_ASSERT(src1->type == GGML_TYPE_F32 || (src1->ne[2] == 1 && src1->ne[3] == 1));
  9130. GGML_ASSERT(ne12 >= ne02 && ne12 % ne02 == 0);
  9131. const int64_t i02_divisor = ne12 / ne02;
  9132. const size_t src0_ts = ggml_type_size(src0->type);
  9133. const size_t src0_bs = ggml_blck_size(src0->type);
  9134. const size_t q8_1_ts = sizeof(block_q8_1);
  9135. const size_t q8_1_bs = QK8_1;
  9136. ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
  9137. ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
  9138. ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
  9139. const bool src0_is_contiguous = ggml_is_contiguous(src0);
  9140. const bool src1_is_contiguous = ggml_is_contiguous(src1);
  9141. int64_t src1_padded_col_size = GGML_PAD(ne10, MATRIX_ROW_PADDING);
  9142. const bool split = src0->backend == GGML_BACKEND_TYPE_GPU_SPLIT;
  9143. GGML_ASSERT(!(split && ne02 > 1));
  9144. GGML_ASSERT(!(split && ne03 > 1));
  9145. GGML_ASSERT(!(split && ne02 < ne12));
  9146. std::array<float, GGML_SYCL_MAX_DEVICES> tensor_split;
  9147. if (split) {
  9148. // TODO: check that src0->buffer->buft is a split buffer type, replace GGML_BACKEND_TYPE_GPU_SPLIT check
  9149. // GGML_ASSERT(src0->buffer != nullptr && src0->buffer->buft == ...);
  9150. ggml_backend_sycl_split_buffer_type_context * buft_ctx = (ggml_backend_sycl_split_buffer_type_context *) src0->buffer->buft->context;
  9151. tensor_split = buft_ctx->tensor_split;
  9152. }
  9153. struct dev_data {
  9154. ggml_sycl_pool_alloc<char> src0_dd_alloc;
  9155. ggml_sycl_pool_alloc<float> src1_ddf_alloc;
  9156. ggml_sycl_pool_alloc<char> src1_ddq_alloc;
  9157. ggml_sycl_pool_alloc<float> dst_dd_alloc;
  9158. char *src0_dd = nullptr;
  9159. float *src1_ddf = nullptr; // float
  9160. char *src1_ddq = nullptr; // q8_1
  9161. float *dst_dd = nullptr;
  9162. int64_t row_low;
  9163. int64_t row_high;
  9164. };
  9165. dev_data dev[GGML_SYCL_MAX_DEVICES];
  9166. int used_devices = 0;
  9167. queue_ptr main_stream = ctx.stream();
  9168. for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
  9169. // by default, use all rows
  9170. dev[i].row_low = 0;
  9171. dev[i].row_high = ne01;
  9172. // for multi GPU, get the row boundaries from tensor split
  9173. // and round to mul_mat_q tile sizes
  9174. if (split) {
  9175. const int64_t rounding = get_row_rounding(src0->type, tensor_split);
  9176. if (i != 0) {
  9177. dev[i].row_low = ne01*tensor_split[i];
  9178. if (dev[i].row_low < ne01) {
  9179. dev[i].row_low -= dev[i].row_low % rounding;
  9180. }
  9181. }
  9182. if (i != ggml_sycl_info().device_count - 1) {
  9183. dev[i].row_high = ne01*tensor_split[i + 1];
  9184. if (dev[i].row_high < ne01) {
  9185. dev[i].row_high -= dev[i].row_high % rounding;
  9186. }
  9187. }
  9188. }
  9189. }
  9190. for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
  9191. if ((!split && i != ctx.device) || dev[i].row_low == dev[i].row_high) {
  9192. continue;
  9193. }
  9194. used_devices++;
  9195. const bool src1_on_device = i == ctx.device;
  9196. const bool dst_on_device = i == ctx.device;
  9197. ggml_sycl_set_device(i);
  9198. queue_ptr stream = ctx.stream(i, 0);
  9199. if (src0_is_contiguous) {
  9200. dev[i].src0_dd = (char *) src0->data;
  9201. } else {
  9202. dev[i].src0_dd = dev[i].src0_dd_alloc.alloc(ctx.pool(i), ggml_nbytes(src0));
  9203. }
  9204. if (src1_on_device && src1_is_contiguous) {
  9205. dev[i].src1_ddf = (float *) src1->data;
  9206. } else {
  9207. dev[i].src1_ddf = dev[i].src1_ddf_alloc.alloc(ctx.pool(i), ggml_nelements(src1));
  9208. }
  9209. if (convert_src1_to_q8_1) {
  9210. dev[i].src1_ddq = dev[i].src1_ddq_alloc.alloc(ctx.pool(i), nrows1*src1_padded_col_size*q8_1_ts/q8_1_bs);
  9211. if (src1_on_device && src1_is_contiguous) {
  9212. quantize_row_q8_1_sycl(dev[i].src1_ddf, dev[i].src1_ddq, ne10, nrows1, src1_padded_col_size, stream);
  9213. /*
  9214. DPCT1010:90: SYCL uses exceptions to report errors and does not
  9215. use the error codes. The call was replaced with 0. You need to
  9216. rewrite this code.
  9217. */
  9218. SYCL_CHECK(0);
  9219. }
  9220. }
  9221. if (dst_on_device) {
  9222. dev[i].dst_dd = (float *) dst->data;
  9223. } else {
  9224. const size_t size_dst_ddf = split ? (dev[i].row_high - dev[i].row_low)*ne1 : ggml_nelements(dst);
  9225. dev[i].dst_dd = dev[i].dst_dd_alloc.alloc(ctx.pool(i), size_dst_ddf);
  9226. }
  9227. }
  9228. // if multiple devices are used they need to wait for the main device
  9229. // here an event is recorded that signals that the main device has finished calculating the input data
  9230. if (split && used_devices > 1) {
  9231. ggml_sycl_set_device(ctx.device);
  9232. /*
  9233. DPCT1024:91: The original code returned the error code that was further
  9234. consumed by the program logic. This original code was replaced with 0.
  9235. You may need to rewrite the program logic consuming the error code.
  9236. */
  9237. SYCL_CHECK(CHECK_TRY_ERROR(
  9238. *src0_extra->events[ctx.device][0] =
  9239. ctx.stream()->ext_oneapi_submit_barrier()));
  9240. }
  9241. const int64_t src1_col_stride = split && used_devices > 1 ? MUL_MAT_SRC1_COL_STRIDE : ne11;
  9242. for (int64_t src1_col_0 = 0; src1_col_0 < ne11; src1_col_0 += src1_col_stride) {
  9243. const int64_t is = split ? (src1_col_0/src1_col_stride) % GGML_SYCL_MAX_STREAMS : 0;
  9244. const int64_t src1_ncols = src1_col_0 + src1_col_stride > ne11 ? ne11 - src1_col_0 : src1_col_stride;
  9245. for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
  9246. if ((!split && i != ctx.device) || dev[i].row_low == dev[i].row_high) {
  9247. continue;
  9248. }
  9249. const bool src1_on_device = i == ctx.device;
  9250. const bool dst_on_device = i == ctx.device;
  9251. const int64_t row_diff = dev[i].row_high - dev[i].row_low;
  9252. ggml_sycl_set_device(i);
  9253. queue_ptr stream = ctx.stream(i, is);
  9254. // wait for main GPU data if necessary
  9255. if (split && (i != ctx.device || is != 0)) {
  9256. /*
  9257. DPCT1009:163: SYCL uses exceptions to report errors and does not
  9258. use the error codes. The original code was commented out and a
  9259. warning string was inserted. You need to rewrite this code.
  9260. */
  9261. SYCL_CHECK(CHECK_TRY_ERROR(stream->ext_oneapi_submit_barrier(
  9262. {*src0_extra->events[ctx.device][0]})));
  9263. }
  9264. for (int64_t i0 = 0; i0 < ne13*ne12; ++i0) {
  9265. const int64_t i03 = i0 / ne12;
  9266. const int64_t i02 = i0 % ne12;
  9267. const size_t src1_ddq_i_offset = (i0*ne11 + src1_col_0) * src1_padded_col_size*q8_1_ts/q8_1_bs;
  9268. // for split tensors the data begins at i0 == i0_offset_low
  9269. char * src0_dd_i = dev[i].src0_dd + (i0/i02_divisor) * (ne01*ne00*src0_ts)/src0_bs;
  9270. float * src1_ddf_i = dev[i].src1_ddf + (i0*ne11 + src1_col_0) * ne10;
  9271. char * src1_ddq_i = dev[i].src1_ddq + src1_ddq_i_offset;
  9272. float * dst_dd_i = dev[i].dst_dd + (i0*ne1 + src1_col_0) * (dst_on_device ? ne0 : row_diff);
  9273. // the main device memory buffer can be on VRAM scratch, with space for all partial results
  9274. // in that case an offset on dst_ddf_i is needed
  9275. if (i == ctx.device) {
  9276. dst_dd_i += dev[i].row_low; // offset is 0 if no tensor split
  9277. }
  9278. // copy src0, src1 to device if necessary
  9279. if (src1_is_contiguous) {
  9280. if (i != ctx.device) {
  9281. if (convert_src1_to_q8_1) {
  9282. char * src1_ddq_i_source = dev[ctx.device].src1_ddq + src1_ddq_i_offset;
  9283. SYCL_CHECK(CHECK_TRY_ERROR(stream->memcpy(
  9284. src1_ddq_i, src1_ddq_i_source,
  9285. src1_ncols * src1_padded_col_size * q8_1_ts /
  9286. q8_1_bs).wait()));
  9287. } else {
  9288. float * src1_ddf_i_source = (float *) src1_extra->data_device[ctx.device];
  9289. src1_ddf_i_source += (i0*ne11 + src1_col_0) * ne10;
  9290. SYCL_CHECK(CHECK_TRY_ERROR(dev2dev_memcpy(*stream, *main_stream,
  9291. src1_ddf_i, src1_ddf_i_source,
  9292. src1_ncols * ne10 * sizeof(float))));
  9293. }
  9294. }
  9295. } else if (src1_on_device && !src1_is_contiguous) {
  9296. SYCL_CHECK(ggml_sycl_cpy_tensor_2d(
  9297. src1_ddf_i, src1, i03, i02, src1_col_0, src1_col_0+src1_ncols, stream));
  9298. } else {
  9299. GGML_ASSERT(false);
  9300. }
  9301. if (convert_src1_to_q8_1 && !src1_is_contiguous) {
  9302. quantize_row_q8_1_sycl(src1_ddf_i, src1_ddq_i, ne10, src1_ncols, src1_padded_col_size, stream);
  9303. /*
  9304. DPCT1010:92: SYCL uses exceptions to report errors and does
  9305. not use the error codes. The call was replaced with 0. You
  9306. need to rewrite this code.
  9307. */
  9308. SYCL_CHECK(0);
  9309. }
  9310. if (src1_col_0 == 0 && !src0_is_contiguous && i02 % i02_divisor == 0) {
  9311. SYCL_CHECK(ggml_sycl_cpy_tensor_2d(src0_dd_i, src0, i03, i02/i02_divisor, dev[i].row_low, dev[i].row_high, stream));
  9312. }
  9313. if (src1->type == GGML_TYPE_F16) {
  9314. src1_padded_col_size = (i0 * ne11 + src1_col_0) * ne10;
  9315. }
  9316. // do the computation
  9317. SYCL_CHECK(CHECK_TRY_ERROR(op(ctx, src0, src1, dst, src0_dd_i, src1_ddf_i, src1_ddq_i, dst_dd_i,
  9318. dev[i].row_low, dev[i].row_high, src1_ncols, src1_padded_col_size, stream)));
  9319. /*
  9320. DPCT1010:93: SYCL uses exceptions to report errors and does not
  9321. use the error codes. The call was replaced with 0. You need to
  9322. rewrite this code.
  9323. */
  9324. SYCL_CHECK(0);
  9325. // copy dst to host or other device if necessary
  9326. if (!dst_on_device) {
  9327. void * dst_off_device = dst->data;
  9328. if (split) {
  9329. // src0 = weight matrix is saved as a transposed matrix for better memory layout.
  9330. // dst is NOT transposed.
  9331. // The outputs of matrix matrix multiplications can therefore NOT simply be concatenated for >1 GPU.
  9332. // Instead they need to be copied to the correct slice in ne0 = dst row index.
  9333. // If dst is a vector with ne0 == 1 then you don't have to do this but it still produces correct results.
  9334. float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3);
  9335. GGML_ASSERT(dst->nb[1] == ne0*sizeof(float));
  9336. dhf_dst_i += src1_col_0*ne0 + dev[i].row_low;
  9337. SYCL_CHECK(CHECK_TRY_ERROR(dpct::async_dpct_memcpy(
  9338. dhf_dst_i, ne0 * sizeof(float), dst_dd_i,
  9339. row_diff * sizeof(float), row_diff * sizeof(float),
  9340. src1_ncols, dpct::device_to_device, *stream)));
  9341. } else {
  9342. float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3);
  9343. GGML_ASSERT(dst->nb[1] == ne0*sizeof(float));
  9344. dhf_dst_i += src1_col_0*ne0;
  9345. SYCL_CHECK(CHECK_TRY_ERROR(
  9346. stream->memcpy(dhf_dst_i, dst_dd_i,
  9347. src1_ncols * ne0 * sizeof(float)).wait()));
  9348. }
  9349. }
  9350. // add event for the main device to wait on until other device is done
  9351. if (split && (i != ctx.device || is != 0)) {
  9352. /*
  9353. DPCT1024:94: The original code returned the error code that
  9354. was further consumed by the program logic. This original
  9355. code was replaced with 0. You may need to rewrite the
  9356. program logic consuming the error code.
  9357. */
  9358. SYCL_CHECK(CHECK_TRY_ERROR(
  9359. *src0_extra->events[i][is] =
  9360. stream->ext_oneapi_submit_barrier()));
  9361. }
  9362. }
  9363. }
  9364. }
  9365. // main device waits for all other devices to be finished
  9366. if (split && ggml_sycl_info().device_count > 1) {
  9367. int64_t is_max = (ne11 + MUL_MAT_SRC1_COL_STRIDE - 1) / MUL_MAT_SRC1_COL_STRIDE;
  9368. is_max = is_max <= GGML_SYCL_MAX_STREAMS ? is_max : GGML_SYCL_MAX_STREAMS;
  9369. ggml_sycl_set_device(ctx.device);
  9370. for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
  9371. if (dev[i].row_low == dev[i].row_high) {
  9372. continue;
  9373. }
  9374. for (int64_t is = 0; is < is_max; ++is) {
  9375. SYCL_CHECK(CHECK_TRY_ERROR(
  9376. ctx.stream()->ext_oneapi_submit_barrier(
  9377. {*src0_extra->events[i][is]})));
  9378. }
  9379. }
  9380. }
  9381. }
  9382. catch (sycl::exception const &exc) {
  9383. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  9384. << ", line:" << __LINE__ << std::endl;
  9385. std::exit(1);
  9386. }
  9387. static void ggml_sycl_repeat(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9388. GGML_SYCL_DEBUG("call %s\n", __func__);
  9389. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_repeat);
  9390. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9391. }
  9392. static void ggml_sycl_get_rows(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9393. GGML_SYCL_DEBUG("call %s\n", __func__);
  9394. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_get_rows);
  9395. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9396. }
  9397. static void ggml_sycl_add(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9398. GGML_SYCL_DEBUG("call %s\n", __func__);
  9399. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_add);
  9400. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9401. }
  9402. static void ggml_sycl_acc(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9403. GGML_SYCL_DEBUG("call %s\n", __func__);
  9404. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_acc);
  9405. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9406. }
  9407. static void ggml_sycl_mul(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9408. GGML_SYCL_DEBUG("call %s\n", __func__);
  9409. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_mul);
  9410. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9411. }
  9412. static void ggml_sycl_div(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9413. GGML_SYCL_DEBUG("call %s\n", __func__);
  9414. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_div);
  9415. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9416. }
  9417. static void ggml_sycl_gelu(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9418. GGML_SYCL_DEBUG("call %s\n", __func__);
  9419. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_gelu);
  9420. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9421. }
  9422. static void ggml_sycl_silu(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9423. GGML_SYCL_DEBUG("call %s\n", __func__);
  9424. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_silu);
  9425. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9426. }
  9427. static void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9428. GGML_SYCL_DEBUG("call %s\n", __func__);
  9429. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_gelu_quick);
  9430. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9431. }
  9432. static void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9433. GGML_SYCL_DEBUG("call %s\n", __func__);
  9434. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_tanh);
  9435. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9436. }
  9437. static void ggml_sycl_relu(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9438. GGML_SYCL_DEBUG("call %s\n", __func__);
  9439. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_relu);
  9440. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9441. }
  9442. static void ggml_sycl_hardsigmoid(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9443. GGML_SYCL_DEBUG("call %s\n", __func__);
  9444. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_hardsigmoid);
  9445. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9446. }
  9447. static void ggml_sycl_hardswish(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9448. GGML_SYCL_DEBUG("call %s\n", __func__);
  9449. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_hardswish);
  9450. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9451. }
  9452. static void ggml_sycl_leaky_relu(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9453. GGML_SYCL_DEBUG("call %s\n", __func__);
  9454. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_leaky_relu);
  9455. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9456. }
  9457. static void ggml_sycl_sqr(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9458. GGML_SYCL_DEBUG("call %s\n", __func__);
  9459. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_sqr);
  9460. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9461. }
  9462. static void ggml_sycl_norm(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9463. GGML_SYCL_DEBUG("call %s\n", __func__);
  9464. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_norm);
  9465. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9466. }
  9467. static void ggml_sycl_group_norm(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9468. GGML_SYCL_DEBUG("call %s\n", __func__);
  9469. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_group_norm);
  9470. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9471. }
  9472. static void ggml_sycl_concat(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9473. GGML_SYCL_DEBUG("call %s\n", __func__);
  9474. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_concat);
  9475. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9476. }
  9477. static void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9478. GGML_SYCL_DEBUG("call %s\n", __func__);
  9479. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_upscale);
  9480. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9481. }
  9482. static void ggml_sycl_pad(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9483. GGML_SYCL_DEBUG("call %s\n", __func__);
  9484. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_pad);
  9485. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9486. }
  9487. static void ggml_sycl_rms_norm(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9488. GGML_SYCL_DEBUG("call %s\n", __func__);
  9489. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_rms_norm);
  9490. GGML_SYCL_DEBUG("call %s done\n", __func__);
  9491. }
  9492. static void ggml_sycl_mul_mat_vec_p021(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  9493. const ggml_tensor *src1,
  9494. ggml_tensor *dst) try {
  9495. GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
  9496. GGML_ASSERT(src0->backend != GGML_BACKEND_TYPE_GPU_SPLIT);
  9497. GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // 0213 permutation
  9498. GGML_ASSERT(src1->nb[0] <= src1->nb[1] && src1->nb[2] <= src1->nb[3]); // 0213 permutation
  9499. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  9500. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9501. const int64_t ne00 = src0->ne[0];
  9502. const int64_t ne01 = src0->ne[1];
  9503. const int64_t ne02 = src0->ne[2];
  9504. const int64_t ne12 = src1->ne[2];
  9505. SYCL_CHECK(ggml_sycl_set_device(ctx.device));
  9506. queue_ptr main_stream = ctx.stream();
  9507. void * src0_ddq = src0->data;
  9508. float * src1_ddf = (float *) src1->data;
  9509. float * dst_ddf = (float *) dst->data;
  9510. ggml_mul_mat_p021_f16_f32_sycl(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, ne02, ne12, main_stream);
  9511. }
  9512. catch (sycl::exception const &exc) {
  9513. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  9514. << ", line:" << __LINE__ << std::endl;
  9515. std::exit(1);
  9516. }
  9517. static void ggml_sycl_mul_mat_vec_nc(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  9518. const ggml_tensor *src1,
  9519. ggml_tensor *dst) try {
  9520. GGML_ASSERT(!ggml_is_transposed(src0));
  9521. GGML_ASSERT(!ggml_is_transposed(src1));
  9522. GGML_ASSERT(!ggml_is_permuted(src0));
  9523. GGML_ASSERT(src0->backend != GGML_BACKEND_TYPE_GPU_SPLIT);
  9524. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  9525. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9526. const int64_t ne00 = src0->ne[0];
  9527. const int64_t ne01 = src0->ne[1];
  9528. const int64_t ne02 = src0->ne[2];
  9529. const int64_t nb01 = src0->nb[1];
  9530. const int64_t nb02 = src0->nb[2];
  9531. const int64_t ne12 = src1->ne[2];
  9532. SYCL_CHECK(ggml_sycl_set_device(ctx.device));
  9533. queue_ptr main_stream = ctx.stream();
  9534. void * src0_ddq = src0->data;
  9535. float * src1_ddf = (float *) src1->data;
  9536. float * dst_ddf = (float *) dst->data;
  9537. const int64_t row_stride_x = nb01 / sizeof(sycl::half);
  9538. const int64_t channel_stride_x = nb02 / sizeof(sycl::half);
  9539. ggml_mul_mat_vec_nc_f16_f32_sycl(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, row_stride_x, ne02, ne12, channel_stride_x, main_stream);
  9540. }
  9541. catch (sycl::exception const &exc) {
  9542. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  9543. << ", line:" << __LINE__ << std::endl;
  9544. std::exit(1);
  9545. }
  9546. static void k_compute_batched_ptrs(const sycl::half *src0_as_f16,
  9547. const sycl::half *src1_as_f16, char *dst,
  9548. const void **ptrs_src, void **ptrs_dst,
  9549. int64_t ne12, int64_t ne13, int64_t ne23,
  9550. size_t nb02, size_t nb03, size_t nb12,
  9551. size_t nb13, size_t nbd2, size_t nbd3,
  9552. int64_t r2, int64_t r3,
  9553. const sycl::nd_item<3> &item_ct1) {
  9554. int64_t i13 = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
  9555. item_ct1.get_local_id(2);
  9556. int64_t i12 = item_ct1.get_group(1) * item_ct1.get_local_range(1) +
  9557. item_ct1.get_local_id(1);
  9558. if (i13 >= ne13 || i12 >= ne12) {
  9559. return;
  9560. }
  9561. int64_t i03 = i13 / r3;
  9562. int64_t i02 = i12 / r2;
  9563. ptrs_src[0*ne23 + i12 + i13*ne12] = (const char *) src0_as_f16 + i02*nb02 + i03*nb03;
  9564. ptrs_src[1*ne23 + i12 + i13*ne12] = (const char *) src1_as_f16 + i12*nb12 + i13*nb13;
  9565. ptrs_dst[0*ne23 + i12 + i13*ne12] = ( char *) dst + i12*nbd2 + i13*nbd3;
  9566. }
  9567. static void ggml_sycl_mul_mat_batched_sycl(ggml_backend_sycl_context & ctx,
  9568. const ggml_tensor *src0,
  9569. const ggml_tensor *src1,
  9570. ggml_tensor *dst) try {
  9571. GGML_ASSERT(!ggml_is_transposed(src0));
  9572. GGML_ASSERT(!ggml_is_transposed(src1));
  9573. GGML_ASSERT(src0->backend != GGML_BACKEND_TYPE_GPU_SPLIT);
  9574. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  9575. GGML_TENSOR_BINARY_OP_LOCALS
  9576. const int64_t ne_dst = ggml_nelements(dst);
  9577. SYCL_CHECK(ggml_sycl_set_device(ctx.device));
  9578. queue_ptr main_stream = ctx.stream();;
  9579. bool no_mixed_dtypes = main_stream->get_backend() == sycl::backend::ext_oneapi_cuda ||
  9580. main_stream->get_backend() == sycl::backend::ext_oneapi_hip;
  9581. void * src0_ddq = src0->data;
  9582. sycl::half *src0_as_f16 = (sycl::half *)src0_ddq;
  9583. float * src1_ddf = (float *) src1->data;
  9584. float * dst_ddf = (float *) dst->data;
  9585. // convert src1 to fp16
  9586. ggml_sycl_pool_alloc<sycl::half> src1_f16_alloc(ctx.pool());
  9587. if (src1->type != GGML_TYPE_F16) {
  9588. const to_fp16_sycl_t to_fp16_sycl = ggml_get_to_fp16_sycl(src1->type);
  9589. const int64_t ne_src1 = ggml_nelements(src1);
  9590. src1_f16_alloc.alloc(ne_src1);
  9591. GGML_ASSERT(to_fp16_sycl != nullptr);
  9592. to_fp16_sycl(src1_ddf, src1_f16_alloc.get(), ne_src1, main_stream);
  9593. }
  9594. sycl::half *src1_f16 = src1->type == GGML_TYPE_F16 ? (sycl::half *)src1_ddf
  9595. : src1_f16_alloc.get();
  9596. ggml_sycl_pool_alloc<sycl::half> dst_f16(ctx.pool());
  9597. char * dst_t;
  9598. dpct::library_data_t cu_compute_type = dpct::library_data_t::real_float;
  9599. dpct::library_data_t cu_data_type = dpct::library_data_t::real_float;
  9600. if (no_mixed_dtypes) {
  9601. cu_compute_type = dpct::library_data_t::real_half;
  9602. cu_data_type = dpct::library_data_t::real_half;
  9603. }
  9604. // dst strides
  9605. size_t nbd2 = dst->nb[2];
  9606. size_t nbd3 = dst->nb[3];
  9607. const float alpha_f32 = 1.0f;
  9608. const float beta_f32 = 0.0f;
  9609. const sycl::half alpha_f16 = 1.0f;
  9610. const sycl::half beta_f16 = 0.0f;
  9611. const void * alpha = &alpha_f32;
  9612. const void * beta = &beta_f32;
  9613. if (no_mixed_dtypes) {
  9614. alpha = &alpha_f16;
  9615. beta = &beta_f16;
  9616. }
  9617. // TODO: Renable (dst->op_params[0] =! GGML_PREC_DEFAULT) pathway
  9618. // when oneMKL open source supports half, half, float, float: datatypes
  9619. dst_t = (char *) dst_ddf;
  9620. if (no_mixed_dtypes) {
  9621. dst_t = (char *) dst_f16.alloc(ne_dst);
  9622. nbd2 /= sizeof(float) / sizeof(sycl::half);
  9623. nbd3 /= sizeof(float) / sizeof(sycl::half);
  9624. }
  9625. GGML_ASSERT(ne12 % ne02 == 0);
  9626. GGML_ASSERT(ne13 % ne03 == 0);
  9627. // broadcast factors
  9628. const int64_t r2 = ne12/ne02;
  9629. const int64_t r3 = ne13/ne03;
  9630. if (r2 == 1 && r3 == 1 && ggml_is_contiguous_2(src0) && ggml_is_contiguous_2(src1)) {
  9631. // there is no broadcast and src0, src1 are contiguous across dims 2, 3
  9632. SYCL_CHECK(CHECK_TRY_ERROR(dpct::gemm_batch(
  9633. *main_stream, oneapi::mkl::transpose::trans,
  9634. oneapi::mkl::transpose::nontrans, ne01, ne11, ne10, alpha,
  9635. (const char *)src0_as_f16, dpct::library_data_t::real_half,
  9636. nb01 / nb00, nb02 / nb00,
  9637. (const char *)src1_f16, dpct::library_data_t::real_half,
  9638. nb11 / nb10, nb12 / nb10, beta,
  9639. (char *)dst_t, cu_data_type, ne01, nb2 / nb0,
  9640. ne12 * ne13, cu_compute_type)));
  9641. } else {
  9642. const int ne23 = ne12*ne13;
  9643. ggml_sycl_pool_alloc<const void *> ptrs_src(ctx.pool(), 2*ne23);
  9644. ggml_sycl_pool_alloc< void *> ptrs_dst(ctx.pool(), 1*ne23);
  9645. sycl::range<3> block_dims(1, ne12, ne13);
  9646. /*
  9647. DPCT1049:47: The work-group size passed to the SYCL kernel may exceed
  9648. the limit. To get the device limit, query
  9649. info::device::max_work_group_size. Adjust the work-group size if needed.
  9650. */
  9651. {
  9652. dpct::has_capability_or_fail(main_stream->get_device(),
  9653. {sycl::aspect::fp16});
  9654. main_stream->submit([&](sycl::handler &cgh) {
  9655. const void **ptrs_src_get = ptrs_src.get();
  9656. void **ptrs_dst_get = ptrs_dst.get();
  9657. size_t nb12_scaled = src1->type == GGML_TYPE_F16 ? nb12 : nb12 / 2;
  9658. size_t nb13_scaled = src1->type == GGML_TYPE_F16 ? nb13 : nb13 / 2;
  9659. cgh.parallel_for(sycl::nd_range<3>(block_dims, block_dims),
  9660. [=](sycl::nd_item<3> item_ct1) {
  9661. k_compute_batched_ptrs(
  9662. src0_as_f16, src1_f16,
  9663. dst_t, ptrs_src_get,
  9664. ptrs_dst_get, ne12, ne13, ne23,
  9665. nb02, nb03, nb12_scaled, nb13_scaled,
  9666. nbd2, nbd3, r2, r3, item_ct1);
  9667. });
  9668. });
  9669. }
  9670. SYCL_CHECK(CHECK_TRY_ERROR(dpct::gemm_batch(
  9671. *main_stream, oneapi::mkl::transpose::trans,
  9672. oneapi::mkl::transpose::nontrans, ne01, ne11, ne10, alpha,
  9673. (const void **)(ptrs_src.get() + 0 * ne23),
  9674. dpct::library_data_t::real_half, nb01 / nb00,
  9675. (const void **)(ptrs_src.get() + 1 * ne23),
  9676. dpct::library_data_t::real_half, nb11 / nb10, beta,
  9677. (void **)(ptrs_dst.get() + 0 * ne23), cu_data_type, ne01, ne23,
  9678. cu_compute_type)));
  9679. }
  9680. if (no_mixed_dtypes) {
  9681. const to_fp32_sycl_t to_fp32_sycl = ggml_get_to_fp32_sycl(GGML_TYPE_F16);
  9682. to_fp32_sycl(dst_f16.get(), dst_ddf, ne_dst, main_stream);
  9683. }
  9684. }
  9685. catch (sycl::exception const &exc) {
  9686. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  9687. << ", line:" << __LINE__ << std::endl;
  9688. std::exit(1);
  9689. }
  9690. inline bool ggml_sycl_supports_mmq(enum ggml_type type) {
  9691. // TODO: accuracy issues in MMQ
  9692. return false;
  9693. }
  9694. bool ggml_sycl_supports_dmmv(enum ggml_type type) {
  9695. switch (type) {
  9696. case GGML_TYPE_Q4_0:
  9697. case GGML_TYPE_Q4_1:
  9698. case GGML_TYPE_Q5_0:
  9699. case GGML_TYPE_Q5_1:
  9700. case GGML_TYPE_Q8_0:
  9701. case GGML_TYPE_Q2_K:
  9702. case GGML_TYPE_Q3_K:
  9703. case GGML_TYPE_Q4_K:
  9704. case GGML_TYPE_Q5_K:
  9705. case GGML_TYPE_Q6_K:
  9706. case GGML_TYPE_F16:
  9707. return true;
  9708. default:
  9709. return false;
  9710. }
  9711. }
  9712. static void ggml_sycl_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9713. const bool split = ggml_backend_buffer_is_sycl_split(src0->buffer);
  9714. int64_t min_compute_capability = INT_MAX;
  9715. if (split) {
  9716. ggml_backend_sycl_split_buffer_type_context * buft_ctx = (ggml_backend_sycl_split_buffer_type_context *) src0->buffer->buft->context;
  9717. auto & tensor_split = buft_ctx->tensor_split;
  9718. for (int id = 0; id < ggml_sycl_info().device_count; ++id) {
  9719. // skip devices that are not going to do any work:
  9720. if (tensor_split[id] >= (id + 1 < ggml_sycl_info().device_count ? tensor_split[id + 1] : 1.0f)) {
  9721. continue;
  9722. }
  9723. if (min_compute_capability > ggml_sycl_info().devices[id].cc) {
  9724. min_compute_capability = ggml_sycl_info().devices[id].cc;
  9725. }
  9726. }
  9727. } else {
  9728. min_compute_capability = ggml_sycl_info().devices[ctx.device].cc;
  9729. }
  9730. // check data types and tensor shapes for custom matrix multiplication kernels:
  9731. bool use_dequantize_mul_mat_vec = ggml_sycl_supports_dmmv(src0->type)
  9732. && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
  9733. && src0->ne[0] % GGML_SYCL_DMMV_X == 0 && src1->ne[1] == 1;
  9734. bool use_mul_mat_vec_q = ggml_is_quantized(src0->type)
  9735. && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
  9736. && src1->ne[1] <= MMVQ_MAX_BATCH_SIZE;
  9737. bool use_mul_mat_q = ggml_sycl_supports_mmq(src0->type)
  9738. && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32;
  9739. // mmvq and mmq need the __dp4a instruction which is available for gen12+
  9740. // Workaround in https://github.com/ggerganov/llama.cpp/commit/95f84d5ce8b449a9b16009434aca800df504a02e
  9741. use_mul_mat_q = use_mul_mat_q && (src0->type != GGML_TYPE_IQ2_XXS);
  9742. #ifdef SYCL_USE_XMX
  9743. use_mul_mat_q = use_mul_mat_q && (src1->ne[1] <= MMQ_MAX_BATCH_SIZE);
  9744. #endif // SYCL_USE_XMX
  9745. if (!split && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
  9746. // KQ single-batch
  9747. ggml_sycl_mul_mat_vec_p021(ctx, src0, src1, dst);
  9748. } else if (!split && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
  9749. // KQV single-batch
  9750. ggml_sycl_mul_mat_vec_nc(ctx, src0, src1, dst);
  9751. } else if (!split && src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16) && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) {
  9752. // KQ + KQV multi-batch
  9753. ggml_sycl_mul_mat_batched_sycl(ctx, src0, src1, dst);
  9754. } else if (use_dequantize_mul_mat_vec) {
  9755. ggml_sycl_op_mul_mat(ctx, src0, src1, dst, ggml_sycl_op_dequantize_mul_mat_vec, false);
  9756. } else if (use_mul_mat_vec_q) {
  9757. ggml_sycl_op_mul_mat(ctx, src0, src1, dst, ggml_sycl_op_mul_mat_vec_q, true);
  9758. } else if (use_mul_mat_q) {
  9759. ggml_sycl_op_mul_mat(ctx, src0, src1, dst, ggml_sycl_op_mul_mat_q, true);
  9760. } else {
  9761. ggml_sycl_op_mul_mat(ctx, src0, src1, dst, ggml_sycl_op_mul_mat_sycl, false);
  9762. }
  9763. }
  9764. struct mmid_row_mapping {
  9765. int32_t i1;
  9766. int32_t i2;
  9767. };
  9768. __dpct_inline__ static void k_copy_src1_to_contiguous(
  9769. const char *__restrict__ src1_original, char *__restrict__ src1_contiguous,
  9770. int *__restrict__ cur_src1_row, mmid_row_mapping *__restrict__ row_mapping,
  9771. const char *__restrict ids, int64_t i02, size_t ids_nb1, size_t ids_nb0,
  9772. int64_t ne11, int64_t ne10, size_t nb11, size_t nb12,
  9773. const sycl::nd_item<3> &item_ct1, int &src1_row) {
  9774. int32_t iid1 = item_ct1.get_group(2);
  9775. int32_t id = item_ct1.get_group(1);
  9776. const int32_t row_id_i = *(const int32_t *) (ids + iid1*ids_nb1 + id*ids_nb0);
  9777. if (row_id_i != i02) {
  9778. return;
  9779. }
  9780. const int64_t i11 = id % ne11;
  9781. const int64_t i12 = iid1;
  9782. if (item_ct1.get_local_id(2) == 0) {
  9783. src1_row =
  9784. dpct::atomic_fetch_add<sycl::access::address_space::generic_space>(
  9785. cur_src1_row, 1);
  9786. row_mapping[src1_row] = {id, iid1};
  9787. }
  9788. /*
  9789. DPCT1065:194: Consider replacing sycl::nd_item::barrier() with
  9790. sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better
  9791. performance if there is no access to global memory.
  9792. */
  9793. item_ct1.barrier();
  9794. const float * src1_row_original = (const float *)(src1_original + i11*nb11 + i12*nb12);
  9795. float * src1_row_contiguous = (float *)(src1_contiguous + src1_row*nb11);
  9796. #pragma unroll
  9797. for (int i = item_ct1.get_local_id(2); i < ne10;
  9798. i += item_ct1.get_local_range(2)) {
  9799. src1_row_contiguous[i] = src1_row_original[i];
  9800. }
  9801. }
  9802. __dpct_inline__ static void k_copy_dst_from_contiguous(
  9803. char *__restrict__ dst_original, const char *__restrict__ dst_contiguous,
  9804. const mmid_row_mapping *__restrict__ row_mapping, int64_t ne0, size_t nb1,
  9805. size_t nb2, const sycl::nd_item<3> &item_ct1) {
  9806. int32_t i = item_ct1.get_group(2);
  9807. const int32_t i1 = row_mapping[i].i1;
  9808. const int32_t i2 = row_mapping[i].i2;
  9809. const float * dst_row_contiguous = (const float *)(dst_contiguous + i*nb1);
  9810. float * dst_row_original = (float *)(dst_original + i1*nb1 + i2*nb2);
  9811. #pragma unroll
  9812. for (int j = item_ct1.get_local_id(2); j < ne0;
  9813. j += item_ct1.get_local_range(2)) {
  9814. dst_row_original[j] = dst_row_contiguous[j];
  9815. }
  9816. }
  9817. static void ggml_sycl_mul_mat_id(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  9818. const ggml_tensor *src1,
  9819. ggml_tensor *dst) try {
  9820. GGML_ASSERT(!ggml_backend_buffer_is_sycl_split(src0->buffer) && "mul_mat_id does not support split buffers");
  9821. const ggml_tensor *ids = dst->src[2];
  9822. GGML_TENSOR_BINARY_OP_LOCALS
  9823. const queue_ptr stream = ctx.stream();
  9824. const int64_t n_as = ne02;
  9825. const int64_t n_ids = ids->ne[0];
  9826. std::vector<char> ids_host(ggml_nbytes(ids));
  9827. const char * ids_dev = (const char *) ids->data;
  9828. SYCL_CHECK(CHECK_TRY_ERROR(
  9829. stream->memcpy(ids_host.data(), ids_dev, ggml_nbytes(ids))));
  9830. SYCL_CHECK(CHECK_TRY_ERROR(stream->wait()));
  9831. const ggml_tensor_extra_gpu *src0_extra =
  9832. (const ggml_tensor_extra_gpu *)src0->extra;
  9833. const ggml_tensor_extra_gpu *src1_extra =
  9834. (const ggml_tensor_extra_gpu *)src1->extra;
  9835. const ggml_tensor_extra_gpu *dst_extra =
  9836. (const ggml_tensor_extra_gpu *)dst->extra;
  9837. ggml_tensor_extra_gpu src0_row_extra;
  9838. ggml_tensor_extra_gpu src1_row_extra;
  9839. ggml_tensor_extra_gpu dst_row_extra;
  9840. ggml_tensor src0_row = *src0;
  9841. ggml_tensor src1_row = *src1;
  9842. ggml_tensor dst_row = *dst;
  9843. src1_row.backend = GGML_BACKEND_TYPE_GPU;
  9844. dst_row.backend = GGML_BACKEND_TYPE_GPU;
  9845. src0_row.extra = &src0_row_extra;
  9846. src1_row.extra = &src1_row_extra;
  9847. dst_row.extra = &dst_row_extra;
  9848. char *src0_original = src1->backend == GGML_BACKEND_TYPE_CPU
  9849. ? (char *)src0->data
  9850. : (char *)src0_extra->data_device[ctx.device];
  9851. char *src1_original = src1->backend == GGML_BACKEND_TYPE_CPU
  9852. ? (char *)src1->data
  9853. : (char *)src1_extra->data_device[ctx.device];
  9854. char *dst_original = dst->backend == GGML_BACKEND_TYPE_CPU
  9855. ? (char *)dst->data
  9856. : (char *)dst_extra->data_device[ctx.device];
  9857. src0_row.ne[2] = 1;
  9858. src0_row.ne[3] = 1;
  9859. src0_row.nb[3] = nb02;
  9860. src1_row.ne[1] = 1;
  9861. src1_row.ne[2] = 1;
  9862. src1_row.ne[3] = 1;
  9863. src1_row.nb[2] = nb11;
  9864. src1_row.nb[3] = nb11;
  9865. dst_row.ne[1] = 1;
  9866. dst_row.ne[2] = 1;
  9867. dst_row.ne[3] = 1;
  9868. dst_row.nb[2] = nb1;
  9869. dst_row.nb[3] = nb1;
  9870. if (ne12 == 1) {
  9871. for (int64_t iid1 = 0; iid1 < ids->ne[1]; iid1++) {
  9872. for (int64_t id = 0; id < n_ids; id++) {
  9873. const int32_t i02 = *(const int32_t *) (ids_host.data() + iid1*ids->nb[1] + id*ids->nb[0]);
  9874. GGML_ASSERT(i02 >= 0 && i02 < n_as);
  9875. const int64_t i11 = id % ne11;
  9876. const int64_t i12 = iid1;
  9877. const int64_t i1 = id;
  9878. const int64_t i2 = i12;
  9879. src0_row_extra.data_device[ctx.device] =
  9880. src0_original + i02*nb02;
  9881. src1_row_extra.data_device[ctx.device] =
  9882. src1_original + + i11*nb11 + i12*nb12;
  9883. dst_row_extra.data_device[ctx.device] =
  9884. dst_original + i1*nb1 + i2*nb2;
  9885. ggml_sycl_mul_mat(ctx, &src0_row, &src1_row, &dst_row);
  9886. }
  9887. }
  9888. } else {
  9889. ggml_sycl_pool_alloc<char> src1_contiguous(ctx.pool(), sizeof(float)*ggml_nelements(src1));
  9890. ggml_sycl_pool_alloc<char> dst_contiguous(ctx.pool(), sizeof(float)*ggml_nelements(dst));
  9891. src1_row_extra.data_device[ctx.device] = src1_contiguous.get();
  9892. dst_row_extra.data_device[ctx.device] = dst_contiguous.get();
  9893. for (int64_t i02 = 0; i02 < n_as; i02++) {
  9894. int64_t num_src1_rows = 0;
  9895. for (int64_t iid1 = 0; iid1 < ids->ne[1]; iid1++) {
  9896. for (int64_t id = 0; id < n_ids; id++) {
  9897. const int32_t row_id_i = *(const int32_t *) (ids_host.data() + iid1*ids->nb[1] + id*ids->nb[0]);
  9898. GGML_ASSERT(row_id_i >= 0 && row_id_i < n_as);
  9899. if (row_id_i != i02) {
  9900. continue;
  9901. }
  9902. num_src1_rows++;
  9903. }
  9904. }
  9905. if (num_src1_rows == 0) {
  9906. continue;
  9907. }
  9908. ggml_sycl_pool_alloc<int> dev_cur_src1_row(ctx.pool(), 1);
  9909. ggml_sycl_pool_alloc<mmid_row_mapping> dev_row_mapping(ctx.pool(), num_src1_rows);
  9910. SYCL_CHECK(CHECK_TRY_ERROR(
  9911. stream->memset(dev_cur_src1_row.get(), 0, sizeof(int))));
  9912. {
  9913. sycl::range<3> block_dims(1, 1, std::min((unsigned int)ne10, 768u));
  9914. sycl::range<3> grid_dims(1, n_ids, ids->ne[1]);
  9915. stream->submit([&](sycl::handler &cgh) {
  9916. sycl::local_accessor<int, 0> src1_row_acc(cgh);
  9917. char *__restrict src1_contiguous_get =
  9918. src1_contiguous.get();
  9919. int *__restrict dev_cur_src1_row_get =
  9920. dev_cur_src1_row.get();
  9921. mmid_row_mapping *__restrict dev_row_mapping_get =
  9922. dev_row_mapping.get();
  9923. size_t ids_nb_ct6 = ids->nb[1];
  9924. size_t ids_nb_ct7 = ids->nb[0];
  9925. cgh.parallel_for(
  9926. sycl::nd_range<3>(grid_dims * block_dims, block_dims),
  9927. [=](sycl::nd_item<3> item_ct1) {
  9928. k_copy_src1_to_contiguous(
  9929. src1_original, src1_contiguous_get,
  9930. dev_cur_src1_row_get,
  9931. dev_row_mapping_get, ids_dev, i02,
  9932. ids_nb_ct6, ids_nb_ct7, ne11, ne10, nb11, nb12,
  9933. item_ct1, src1_row_acc);
  9934. });
  9935. });
  9936. }
  9937. src0_row_extra.data_device[ctx.device] = src0_original + i02*nb02;
  9938. GGML_ASSERT(nb11 == sizeof(float)*ne10);
  9939. GGML_ASSERT(nb1 == sizeof(float)*ne0);
  9940. src1_row.ne[1] = num_src1_rows;
  9941. src1_row.nb[1] = nb11;
  9942. src1_row.nb[2] = num_src1_rows*nb11;
  9943. src1_row.nb[3] = num_src1_rows*nb11;
  9944. dst_row.ne[1] = num_src1_rows;
  9945. dst_row.nb[1] = nb1;
  9946. dst_row.nb[2] = num_src1_rows*nb1;
  9947. dst_row.nb[3] = num_src1_rows*nb1;
  9948. ggml_sycl_mul_mat(ctx, &src0_row, &src1_row, &dst_row);
  9949. {
  9950. sycl::range<3> block_dims(1, 1, std::min((unsigned int)ne0, 768u));
  9951. sycl::range<3> grid_dims(1, 1, num_src1_rows);
  9952. stream->submit([&](sycl::handler &cgh) {
  9953. const char *__restrict dst_contiguous_get =
  9954. dst_contiguous.get();
  9955. const mmid_row_mapping *__restrict dev_row_mapping_get =
  9956. dev_row_mapping.get();
  9957. cgh.parallel_for(
  9958. sycl::nd_range<3>(grid_dims * block_dims, block_dims),
  9959. [=](sycl::nd_item<3> item_ct1) {
  9960. k_copy_dst_from_contiguous(dst_original,
  9961. dst_contiguous_get,
  9962. dev_row_mapping_get,
  9963. ne0, nb1, nb2, item_ct1);
  9964. });
  9965. });
  9966. }
  9967. }
  9968. }
  9969. }
  9970. catch (sycl::exception const &exc) {
  9971. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  9972. << ", line:" << __LINE__ << std::endl;
  9973. std::exit(1);
  9974. }
  9975. static void ggml_sycl_scale(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9976. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_scale);
  9977. }
  9978. static void ggml_sycl_clamp(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  9979. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_clamp);
  9980. }
  9981. static void ggml_sycl_cpy(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  9982. ggml_tensor *dst) try {
  9983. const int64_t ne = ggml_nelements(src0);
  9984. GGML_ASSERT(ne == ggml_nelements(src1));
  9985. GGML_ASSERT(ggml_nbytes(src0) <= INT_MAX);
  9986. GGML_ASSERT(ggml_nbytes(src1) <= INT_MAX);
  9987. GGML_TENSOR_BINARY_OP_LOCALS;
  9988. SYCL_CHECK(ggml_sycl_set_device(ctx.device));
  9989. queue_ptr main_stream = ctx.stream();
  9990. char * src0_ddc = (char *) src0->data;
  9991. char * src1_ddc = (char *) src1->data;
  9992. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) {
  9993. ggml_cpy_f32_f32_sycl (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
  9994. } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) {
  9995. ggml_cpy_f32_f16_sycl (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
  9996. } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) {
  9997. ggml_cpy_f32_q8_0_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
  9998. } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_0) {
  9999. ggml_cpy_f32_q4_0_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
  10000. } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_1) {
  10001. ggml_cpy_f32_q4_1_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
  10002. } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) {
  10003. ggml_cpy_f16_f32_sycl (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
  10004. } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) {
  10005. ggml_cpy_f16_f16_sycl (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
  10006. } else if (src0->type == GGML_TYPE_I16 && src1->type == GGML_TYPE_I16) {
  10007. ggml_cpy_i16_i16_sycl (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
  10008. } else if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32) {
  10009. ggml_cpy_i32_i32_sycl (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
  10010. } else {
  10011. fprintf(stderr, "%s: unsupported type combination (%s to %s)\n", __func__,
  10012. ggml_type_name(src0->type), ggml_type_name(src1->type));
  10013. GGML_ASSERT(false);
  10014. }
  10015. (void) dst;
  10016. }
  10017. catch (sycl::exception const &exc) {
  10018. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10019. << ", line:" << __LINE__ << std::endl;
  10020. std::exit(1);
  10021. }
  10022. static void ggml_sycl_dup(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  10023. // TODO: why do we pass dst as src1 here?
  10024. ggml_sycl_cpy(ctx, src0, dst, nullptr);
  10025. (void) src1;
  10026. }
  10027. static void ggml_sycl_diag_mask_inf(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  10028. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_diag_mask_inf);
  10029. }
  10030. static void ggml_sycl_soft_max(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  10031. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_soft_max);
  10032. }
  10033. static void ggml_sycl_rope(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  10034. GGML_ASSERT(ggml_is_contiguous(src0)); // TODO: this restriction is temporary until non-cont support is implemented
  10035. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_rope);
  10036. }
  10037. static void ggml_sycl_pool2d(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  10038. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_pool2d);
  10039. }
  10040. static void ggml_sycl_im2col(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  10041. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_im2col);
  10042. }
  10043. static void ggml_sycl_sum_rows(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  10044. GGML_ASSERT(ggml_is_contiguous(src0));
  10045. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_sum_rows);
  10046. }
  10047. static void ggml_sycl_argsort(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  10048. GGML_ASSERT(ggml_is_contiguous(src0));
  10049. ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_argsort);
  10050. }
  10051. static void ggml_sycl_nop(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  10052. (void) src0;
  10053. (void) src1;
  10054. (void) dst;
  10055. }
  10056. static size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) {
  10057. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  10058. return nrows_split*ggml_row_size(tensor->type, tensor->ne[0]);
  10059. }
  10060. void ggml_sycl_set_main_device(const int main_device) try {
  10061. if (dpct::get_current_device_id() == main_device) return;
  10062. check_allow_gpu_index(main_device);
  10063. dpct::select_device(main_device);
  10064. if (g_ggml_sycl_debug) {
  10065. dpct::device_info prop;
  10066. SYCL_CHECK(CHECK_TRY_ERROR(dpct::get_device_info(
  10067. prop, dpct::dev_mgr::instance().get_device(main_device))));
  10068. fprintf(stderr, "Using device %d (%s) as main device\n",
  10069. main_device, prop.get_name());
  10070. }
  10071. }
  10072. catch (sycl::exception const &exc) {
  10073. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10074. << ", line:" << __LINE__ << std::endl;
  10075. std::exit(1);
  10076. }
  10077. bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct ggml_tensor * tensor) {
  10078. if (!g_sycl_loaded) return false;
  10079. ggml_sycl_func_t func;
  10080. switch (tensor->op) {
  10081. case GGML_OP_REPEAT:
  10082. func = ggml_sycl_repeat;
  10083. break;
  10084. case GGML_OP_GET_ROWS:
  10085. func = ggml_sycl_get_rows;
  10086. break;
  10087. case GGML_OP_DUP:
  10088. func = ggml_sycl_dup;
  10089. break;
  10090. case GGML_OP_ADD:
  10091. func = ggml_sycl_add;
  10092. break;
  10093. case GGML_OP_ACC:
  10094. func = ggml_sycl_acc;
  10095. break;
  10096. case GGML_OP_MUL:
  10097. func = ggml_sycl_mul;
  10098. break;
  10099. case GGML_OP_DIV:
  10100. func = ggml_sycl_div;
  10101. break;
  10102. case GGML_OP_UNARY:
  10103. switch (ggml_get_unary_op(tensor)) {
  10104. case GGML_UNARY_OP_GELU:
  10105. func = ggml_sycl_gelu;
  10106. break;
  10107. case GGML_UNARY_OP_SILU:
  10108. func = ggml_sycl_silu;
  10109. break;
  10110. case GGML_UNARY_OP_GELU_QUICK:
  10111. func = ggml_sycl_gelu_quick;
  10112. break;
  10113. case GGML_UNARY_OP_TANH:
  10114. func = ggml_sycl_tanh;
  10115. break;
  10116. case GGML_UNARY_OP_RELU:
  10117. func = ggml_sycl_relu;
  10118. break;
  10119. case GGML_UNARY_OP_HARDSIGMOID:
  10120. func = ggml_sycl_hardsigmoid;
  10121. break;
  10122. case GGML_UNARY_OP_HARDSWISH:
  10123. func = ggml_sycl_hardswish;
  10124. break;
  10125. default:
  10126. return false;
  10127. }
  10128. break;
  10129. case GGML_OP_NORM:
  10130. func = ggml_sycl_norm;
  10131. break;
  10132. case GGML_OP_GROUP_NORM:
  10133. func = ggml_sycl_group_norm;
  10134. break;
  10135. case GGML_OP_CONCAT:
  10136. func = ggml_sycl_concat;
  10137. break;
  10138. case GGML_OP_UPSCALE:
  10139. func = ggml_sycl_upscale;
  10140. break;
  10141. case GGML_OP_PAD:
  10142. func = ggml_sycl_pad;
  10143. break;
  10144. case GGML_OP_LEAKY_RELU:
  10145. func = ggml_sycl_leaky_relu;
  10146. break;
  10147. case GGML_OP_RMS_NORM:
  10148. func = ggml_sycl_rms_norm;
  10149. break;
  10150. case GGML_OP_MUL_MAT:
  10151. if (tensor->src[0]->ne[3] != tensor->src[1]->ne[3]) {
  10152. return false;
  10153. }
  10154. func = ggml_sycl_mul_mat;
  10155. break;
  10156. case GGML_OP_MUL_MAT_ID:
  10157. if (tensor->src[0]->ne[3] != tensor->src[1]->ne[3]) {
  10158. return false;
  10159. }
  10160. func = ggml_sycl_mul_mat_id;
  10161. break;
  10162. case GGML_OP_SCALE:
  10163. func = ggml_sycl_scale;
  10164. break;
  10165. case GGML_OP_SQR:
  10166. func = ggml_sycl_sqr;
  10167. break;
  10168. case GGML_OP_CLAMP:
  10169. func = ggml_sycl_clamp;
  10170. break;
  10171. case GGML_OP_CPY:
  10172. func = ggml_sycl_cpy;
  10173. break;
  10174. case GGML_OP_CONT:
  10175. func = ggml_sycl_dup;
  10176. break;
  10177. case GGML_OP_NONE:
  10178. case GGML_OP_RESHAPE:
  10179. case GGML_OP_VIEW:
  10180. case GGML_OP_PERMUTE:
  10181. case GGML_OP_TRANSPOSE:
  10182. func = ggml_sycl_nop;
  10183. break;
  10184. case GGML_OP_DIAG_MASK_INF:
  10185. func = ggml_sycl_diag_mask_inf;
  10186. break;
  10187. case GGML_OP_SOFT_MAX:
  10188. func = ggml_sycl_soft_max;
  10189. break;
  10190. case GGML_OP_ROPE:
  10191. func = ggml_sycl_rope;
  10192. break;
  10193. case GGML_OP_IM2COL:
  10194. func = ggml_sycl_im2col;
  10195. break;
  10196. case GGML_OP_POOL_2D:
  10197. func = ggml_sycl_pool2d;
  10198. break;
  10199. case GGML_OP_SUM_ROWS:
  10200. func = ggml_sycl_sum_rows;
  10201. break;
  10202. case GGML_OP_ARGSORT:
  10203. func = ggml_sycl_argsort;
  10204. break;
  10205. default:
  10206. return false;
  10207. }
  10208. if (tensor->src[0] != nullptr && ggml_backend_buffer_is_sycl_split(tensor->src[0]->buffer)) {
  10209. ggml_sycl_set_peer_access(tensor->src[1]->ne[1], ctx.device);
  10210. }
  10211. func(ctx, tensor->src[0], tensor->src[1], tensor);
  10212. return true;
  10213. }
  10214. GGML_API GGML_CALL void ggml_sycl_get_gpu_list(int *id_list, int max_len) try {
  10215. GGML_SYCL_DEBUG("[SYCL] call ggml_sycl_get_gpu_list\n");
  10216. for(int i=0;i<max_len;i++) id_list[i] = -1;
  10217. for (int i=0;i< ggml_sycl_info().device_count;i++){
  10218. if (i>=max_len) break;
  10219. id_list[i] = i;
  10220. }
  10221. return;
  10222. }
  10223. catch (sycl::exception const &exc) {
  10224. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10225. << ", line:" << __LINE__ << std::endl;
  10226. std::exit(1);
  10227. }
  10228. int ggml_sycl_get_device_count() try {
  10229. int device_count;
  10230. if (CHECK_TRY_ERROR(device_count =
  10231. dpct::dev_mgr::instance().device_count()) != 0) {
  10232. return 0;
  10233. }
  10234. return device_count;
  10235. }
  10236. catch (sycl::exception const &exc) {
  10237. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10238. << ", line:" << __LINE__ << std::endl;
  10239. std::exit(1);
  10240. }
  10241. GGML_API GGML_CALL void ggml_sycl_get_device_description(int device, char *description,
  10242. size_t description_size) try {
  10243. GGML_SYCL_DEBUG("[SYCL] call ggml_sycl_get_device_description\n");
  10244. dpct::device_info prop;
  10245. SYCL_CHECK(CHECK_TRY_ERROR(dpct::get_device_info(
  10246. prop, dpct::dev_mgr::instance().get_device(device))));
  10247. snprintf(description, description_size, "%s", prop.get_name());
  10248. }
  10249. catch (sycl::exception const &exc) {
  10250. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10251. << ", line:" << __LINE__ << std::endl;
  10252. std::exit(1);
  10253. }
  10254. GGML_CALL void ggml_backend_sycl_get_device_memory(int device, size_t *free,
  10255. size_t *total) try {
  10256. GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_get_device_memory\n");
  10257. ggml_sycl_set_device(device);
  10258. /*
  10259. DPCT1009:218: SYCL uses exceptions to report errors and does not use the
  10260. error codes. The original code was commented out and a warning string was
  10261. inserted. You need to rewrite this code.
  10262. */
  10263. /*
  10264. DPCT1106:217: 'cudaMemGetInfo' was migrated with the Intel extensions for
  10265. device information which may not be supported by all compilers or runtimes.
  10266. You may need to adjust the code.
  10267. */
  10268. SYCL_CHECK(CHECK_TRY_ERROR(
  10269. dpct::dev_mgr::instance().get_device(device).get_memory_info(*free, *total)));
  10270. }
  10271. catch (sycl::exception const &exc) {
  10272. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10273. << ", line:" << __LINE__ << std::endl;
  10274. std::exit(1);
  10275. }
  10276. ////////////////////////////////////////////////////////////////////////////////
  10277. // backend interface
  10278. #define UNUSED GGML_UNUSED
  10279. // sycl buffer
  10280. struct ggml_backend_sycl_buffer_context {
  10281. int device;
  10282. void * dev_ptr = nullptr;
  10283. queue_ptr stream;
  10284. std::string name;
  10285. ggml_backend_sycl_buffer_context(int device, void * dev_ptr, queue_ptr stream) :
  10286. device(device), dev_ptr(dev_ptr), stream(stream) {
  10287. check_allow_gpu_index(device);
  10288. name = (GGML_SYCL_NAME + std::to_string(device));
  10289. }
  10290. ~ggml_backend_sycl_buffer_context() {
  10291. if (dev_ptr != nullptr) {
  10292. ggml_sycl_set_device(device);
  10293. SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(dev_ptr, *stream)));
  10294. }
  10295. }
  10296. };
  10297. GGML_CALL static const char * ggml_backend_sycl_buffer_get_name(ggml_backend_buffer_t buffer) {
  10298. ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *)buffer->context;
  10299. return ctx->name.c_str();
  10300. }
  10301. GGML_CALL static bool ggml_backend_buffer_is_sycl(ggml_backend_buffer_t buffer) {
  10302. return buffer->iface.get_name == ggml_backend_sycl_buffer_get_name;
  10303. }
  10304. static void
  10305. ggml_backend_sycl_buffer_free_buffer(ggml_backend_buffer_t buffer) try {
  10306. ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context;
  10307. ggml_sycl_set_device(ctx->device);
  10308. delete ctx;
  10309. }
  10310. catch (sycl::exception const &exc) {
  10311. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10312. << ", line:" << __LINE__ << std::endl;
  10313. std::exit(1);
  10314. }
  10315. static void * ggml_backend_sycl_buffer_get_base(ggml_backend_buffer_t buffer) {
  10316. ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context;
  10317. return ctx->dev_ptr;
  10318. }
  10319. GGML_CALL static void
  10320. ggml_backend_sycl_buffer_init_tensor(ggml_backend_buffer_t buffer,
  10321. ggml_tensor *tensor) try {
  10322. ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *)buffer->context;
  10323. if (tensor->view_src != NULL && tensor->view_offs == 0) {
  10324. assert(tensor->view_src->buffer->buft == buffer->buft);
  10325. tensor->backend = tensor->view_src->backend;
  10326. tensor->extra = tensor->view_src->extra;
  10327. return;
  10328. }
  10329. if (ggml_is_quantized(tensor->type)) {
  10330. // initialize padding to 0 to avoid possible NaN values
  10331. size_t original_size = ggml_nbytes(tensor);
  10332. size_t padded_size = ggml_backend_buft_get_alloc_size(buffer->buft, tensor);
  10333. if (padded_size > original_size && tensor->view_src == nullptr) {
  10334. SYCL_CHECK(CHECK_TRY_ERROR(ctx->stream->memset(
  10335. (char *)tensor->data + original_size, 0,
  10336. padded_size - original_size).wait()));
  10337. }
  10338. }
  10339. }
  10340. catch (sycl::exception const &exc) {
  10341. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10342. << ", line:" << __LINE__ << std::endl;
  10343. std::exit(1);
  10344. }
  10345. static void ggml_backend_sycl_buffer_set_tensor(ggml_backend_buffer_t buffer,
  10346. ggml_tensor *tensor,
  10347. const void *data, size_t offset,
  10348. size_t size) try {
  10349. ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context;
  10350. ggml_sycl_set_device(ctx->device);
  10351. auto stream = &(dpct::dev_mgr::instance().get_device(ctx->device).default_queue());
  10352. SYCL_CHECK(
  10353. CHECK_TRY_ERROR(dpct::dev_mgr::instance().get_device(ctx->device).queues_wait_and_throw()));
  10354. char* host_buf = (char*)malloc(size);
  10355. memcpy(host_buf, data, size);
  10356. SYCL_CHECK(
  10357. CHECK_TRY_ERROR((*stream).memcpy((char *)tensor->data + offset, host_buf, size)
  10358. .wait()));
  10359. free(host_buf);
  10360. }
  10361. catch (sycl::exception const &exc) {
  10362. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10363. << ", line:" << __LINE__ << std::endl;
  10364. std::exit(1);
  10365. }
  10366. static void ggml_backend_sycl_buffer_get_tensor(ggml_backend_buffer_t buffer,
  10367. const ggml_tensor *tensor,
  10368. void *data, size_t offset,
  10369. size_t size) try {
  10370. ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context;
  10371. ggml_sycl_set_device(ctx->device);
  10372. auto stream = dpct::dev_mgr::instance().get_device(ctx->device).default_queue();
  10373. SYCL_CHECK(CHECK_TRY_ERROR(
  10374. stream.memcpy(data, (const char *)tensor->data + offset, size)
  10375. .wait()));
  10376. }
  10377. catch (sycl::exception const &exc) {
  10378. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10379. << ", line:" << __LINE__ << std::endl;
  10380. std::exit(1);
  10381. }
  10382. GGML_CALL static bool
  10383. ggml_backend_sycl_buffer_cpy_tensor(ggml_backend_buffer_t buffer,
  10384. const ggml_tensor *src,
  10385. ggml_tensor *dst) try {
  10386. if (ggml_backend_buffer_is_sycl(src->buffer)) {
  10387. ggml_backend_sycl_buffer_context * src_ctx = (ggml_backend_sycl_buffer_context *)src->buffer->context;
  10388. ggml_backend_sycl_buffer_context * dst_ctx = (ggml_backend_sycl_buffer_context *)dst->buffer->context;
  10389. ggml_sycl_set_device(src_ctx->device);
  10390. /*
  10391. DPCT1009:198: SYCL uses exceptions to report errors and does not use the
  10392. error codes. The original code was commented out and a warning string
  10393. was inserted. You need to rewrite this code.
  10394. */
  10395. SYCL_CHECK(CHECK_TRY_ERROR(
  10396. dpct::dev_mgr::instance().get_device(src_ctx->device).queues_wait_and_throw()));
  10397. ggml_sycl_set_device(dst_ctx->device);
  10398. /*
  10399. DPCT1009:199: SYCL uses exceptions to report errors and does not use the
  10400. error codes. The original code was commented out and a warning string
  10401. was inserted. You need to rewrite this code.
  10402. */
  10403. SYCL_CHECK(CHECK_TRY_ERROR(
  10404. dpct::dev_mgr::instance().get_device(dst_ctx->device).queues_wait_and_throw()));
  10405. /*
  10406. DPCT1009:200: SYCL uses exceptions to report errors and does not use the
  10407. error codes. The original code was commented out and a warning string
  10408. was inserted. You need to rewrite this code.
  10409. */
  10410. queue_ptr stream_dst = dst_ctx->stream;
  10411. queue_ptr stream_src = src_ctx->stream;
  10412. size_t size = ggml_nbytes(src);
  10413. //todo. it's dirty solutino to walkaroud known issue:device2device cross GPUs.
  10414. dev2dev_memcpy(*stream_dst, *stream_src, dst->data, src->data, size);
  10415. //todo, it's known issue:error in device2device cross GPUs. reused when the issue is fixed. DON"T remove
  10416. #if 0
  10417. SYCL_CHECK(CHECK_TRY_ERROR((*stream).memcpy(
  10418. (char *)dst->data, (const char *)src->data, size).wait()));
  10419. /*
  10420. DPCT1009:201: SYCL uses exceptions to report errors and does not use the
  10421. error codes. The original code was commented out and a warning string
  10422. was inserted. You need to rewrite this code.
  10423. */
  10424. SYCL_CHECK(CHECK_TRY_ERROR(
  10425. dpct::dev_mgr::instance().get_device(dst_ctx->device).queues_wait_and_throw()));
  10426. #endif
  10427. return true;
  10428. }
  10429. return false;
  10430. }
  10431. catch (sycl::exception const &exc) {
  10432. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10433. << ", line:" << __LINE__ << std::endl;
  10434. std::exit(1);
  10435. }
  10436. static void ggml_backend_sycl_buffer_clear(ggml_backend_buffer_t buffer,
  10437. uint8_t value) try {
  10438. ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context;
  10439. ggml_sycl_set_device(ctx->device);
  10440. queue_ptr stream = ctx->stream;
  10441. SYCL_CHECK(
  10442. CHECK_TRY_ERROR(dpct::get_current_device().queues_wait_and_throw()));
  10443. SYCL_CHECK(CHECK_TRY_ERROR((*stream)
  10444. .memset(ctx->dev_ptr, value, buffer->size)
  10445. .wait()));
  10446. }
  10447. catch (sycl::exception const &exc) {
  10448. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10449. << ", line:" << __LINE__ << std::endl;
  10450. std::exit(1);
  10451. }
  10452. static struct ggml_backend_buffer_i ggml_backend_sycl_buffer_interface = {
  10453. /* .get_name = */ ggml_backend_sycl_buffer_get_name,
  10454. /* .free_buffer = */ ggml_backend_sycl_buffer_free_buffer,
  10455. /* .get_base = */ ggml_backend_sycl_buffer_get_base,
  10456. /* .init_tensor = */ ggml_backend_sycl_buffer_init_tensor,
  10457. /* .set_tensor = */ ggml_backend_sycl_buffer_set_tensor,
  10458. /* .get_tensor = */ ggml_backend_sycl_buffer_get_tensor,
  10459. /* .cpy_tensor = */ ggml_backend_sycl_buffer_cpy_tensor,
  10460. /* .clear = */ ggml_backend_sycl_buffer_clear,
  10461. /* .reset = */ NULL,
  10462. };
  10463. // sycl buffer type
  10464. struct ggml_backend_sycl_buffer_type_context {
  10465. int device;
  10466. std::string name;
  10467. // each buffer type has its own stream
  10468. queue_ptr stream = nullptr;
  10469. };
  10470. GGML_CALL static const char * ggml_backend_sycl_buffer_type_name(ggml_backend_buffer_type_t buft) {
  10471. ggml_backend_sycl_buffer_type_context * ctx = (ggml_backend_sycl_buffer_type_context *)buft->context;
  10472. return ctx->name.c_str();
  10473. }
  10474. GGML_CALL static ggml_backend_buffer_t
  10475. ggml_backend_sycl_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft,
  10476. size_t size) try {
  10477. ggml_backend_sycl_buffer_type_context * buft_ctx = (ggml_backend_sycl_buffer_type_context *)buft->context;
  10478. ggml_sycl_set_device(buft_ctx->device);
  10479. const queue_ptr stream = buft_ctx->stream;
  10480. size = std::max(size, (size_t)1); // syclMalloc returns null for size 0
  10481. void * dev_ptr;
  10482. SYCL_CHECK(CHECK_TRY_ERROR(dev_ptr = (void *)sycl::malloc_device(
  10483. size, *stream)));
  10484. ggml_backend_sycl_buffer_context * ctx = new ggml_backend_sycl_buffer_context(buft_ctx->device, dev_ptr, buft_ctx->stream);
  10485. return ggml_backend_buffer_init(buft, ggml_backend_sycl_buffer_interface, ctx, size);
  10486. }
  10487. catch (sycl::exception const &exc) {
  10488. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10489. << ", line:" << __LINE__ << std::endl;
  10490. std::exit(1);
  10491. }
  10492. GGML_CALL static size_t ggml_backend_sycl_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  10493. return 128;
  10494. UNUSED(buft);
  10495. }
  10496. static size_t ggml_backend_sycl_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
  10497. return dpct::get_current_device().get_max_mem_alloc_size();
  10498. UNUSED(buft);
  10499. }
  10500. GGML_CALL static size_t ggml_backend_sycl_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
  10501. size_t size = ggml_nbytes(tensor);
  10502. int64_t ne0 = tensor->ne[0];
  10503. if (ggml_is_quantized(tensor->type)) {
  10504. if (ne0 % MATRIX_ROW_PADDING != 0) {
  10505. size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
  10506. }
  10507. }
  10508. return size;
  10509. UNUSED(buft);
  10510. }
  10511. static ggml_backend_buffer_type_i ggml_backend_sycl_buffer_type_interface = {
  10512. /* .get_name = */ ggml_backend_sycl_buffer_type_name,
  10513. /* .alloc_buffer = */ ggml_backend_sycl_buffer_type_alloc_buffer,
  10514. /* .get_alignment = */ ggml_backend_sycl_buffer_type_get_alignment,
  10515. /* .get_max_size = */ ggml_backend_sycl_buffer_type_get_max_size,
  10516. /* .get_alloc_size = */ ggml_backend_sycl_buffer_type_get_alloc_size,
  10517. /* .is_host = */ nullptr,
  10518. };
  10519. ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device) {
  10520. static std::mutex mutex;
  10521. std::lock_guard<std::mutex> lock(mutex);
  10522. GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_buffer_type\n");
  10523. if (device>=ggml_sycl_info().device_count or device<0) {
  10524. printf("ggml_backend_sycl_buffer_type error: device_index:%d is out of range [0, %d], miss to call ggml_backend_sycl_set_single_device()\n",
  10525. device, ggml_sycl_info().device_count-1);
  10526. GGML_ASSERT(device<ggml_sycl_info().device_count);
  10527. }
  10528. static struct ggml_backend_buffer_type ggml_backend_sycl_buffer_types[GGML_SYCL_MAX_DEVICES];
  10529. static bool ggml_backend_sycl_buffer_type_initialized = false;
  10530. if (!ggml_backend_sycl_buffer_type_initialized) {
  10531. for (int i = 0; i < ggml_sycl_info().device_count; i++) {
  10532. auto & device_i = dpct::dev_mgr::instance().get_device(i);
  10533. queue_ptr stream = &(device_i.default_queue());
  10534. ggml_backend_sycl_buffer_types[i] = {
  10535. /* .iface = */ ggml_backend_sycl_buffer_type_interface,
  10536. /* .context = */ new ggml_backend_sycl_buffer_type_context{i, GGML_SYCL_NAME + std::to_string(i), stream},
  10537. };
  10538. }
  10539. ggml_backend_sycl_buffer_type_initialized = true;
  10540. }
  10541. return &ggml_backend_sycl_buffer_types[device];
  10542. }
  10543. ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(ggml_backend_sycl_context * ctx) {
  10544. GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_buffer_type\n");
  10545. int device = ctx->device;
  10546. if (device>=ggml_sycl_info().device_count or device<0) {
  10547. printf("ggml_backend_sycl_buffer_type error: device_index:%d is out of range [0, %d], miss to call ggml_backend_sycl_set_single_device()\n",
  10548. device, ggml_sycl_info().device_count-1);
  10549. GGML_ASSERT(device<ggml_sycl_info().device_count);
  10550. }
  10551. static struct ggml_backend_buffer_type ggml_backend_sycl_buffer_types[GGML_SYCL_MAX_DEVICES];
  10552. static bool ggml_backend_sycl_buffer_type_initialized = false;
  10553. if (!ggml_backend_sycl_buffer_type_initialized) {
  10554. for (int i = 0; i < ggml_sycl_info().device_count; i++) {
  10555. ggml_backend_sycl_buffer_types[i] = {
  10556. /* .iface = */ ggml_backend_sycl_buffer_type_interface,
  10557. /* .context = */ new ggml_backend_sycl_buffer_type_context{i, GGML_SYCL_NAME + std::to_string(i), ctx->stream(i, 0)},
  10558. };
  10559. }
  10560. ggml_backend_sycl_buffer_type_initialized = true;
  10561. }
  10562. return &ggml_backend_sycl_buffer_types[device];
  10563. }
  10564. // sycl split buffer type
  10565. static void get_row_split(int64_t * row_low, int64_t * row_high, const ggml_tensor * tensor, const std::array<float, GGML_SYCL_MAX_DEVICES> & tensor_split, int id) {
  10566. const int64_t nrows = ggml_nrows(tensor);
  10567. const int64_t rounding = get_row_rounding(tensor->type, tensor_split);
  10568. *row_low = id == 0 ? 0 : nrows*tensor_split[id];
  10569. *row_low -= *row_low % rounding;
  10570. if (id == ggml_sycl_info().device_count - 1) {
  10571. *row_high = nrows;
  10572. } else {
  10573. *row_high = nrows*tensor_split[id + 1];
  10574. *row_high -= *row_high % rounding;
  10575. }
  10576. }
  10577. struct ggml_backend_sycl_split_buffer_context {
  10578. ~ggml_backend_sycl_split_buffer_context() try {
  10579. for (ggml_tensor_extra_gpu * extra : tensor_extras) {
  10580. for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
  10581. for (int64_t is = 0; is < GGML_SYCL_MAX_STREAMS; ++is) {
  10582. if (extra->events[i][is] != nullptr) {
  10583. /*
  10584. DPCT1009:206: SYCL uses exceptions to report errors and
  10585. does not use the error codes. The original code was
  10586. commented out and a warning string was inserted. You
  10587. need to rewrite this code.
  10588. */
  10589. SYCL_CHECK(CHECK_TRY_ERROR(
  10590. dpct::destroy_event(extra->events[i][is])));
  10591. }
  10592. }
  10593. if (extra->data_device[i] != nullptr) {
  10594. /*
  10595. DPCT1009:207: SYCL uses exceptions to report errors and does
  10596. not use the error codes. The original code was commented out
  10597. and a warning string was inserted. You need to rewrite this
  10598. code.
  10599. */
  10600. ggml_sycl_set_device(i);
  10601. SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(
  10602. extra->data_device[i], *(streams[i]))));
  10603. }
  10604. }
  10605. delete extra;
  10606. }
  10607. }
  10608. catch (sycl::exception const &exc) {
  10609. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10610. << ", line:" << __LINE__ << std::endl;
  10611. std::exit(1);
  10612. }
  10613. std::vector<ggml_tensor_extra_gpu *> tensor_extras;
  10614. std::vector<queue_ptr> streams;
  10615. };
  10616. GGML_CALL static const char * ggml_backend_sycl_split_buffer_get_name(ggml_backend_buffer_t buffer) {
  10617. return GGML_SYCL_NAME "_Split";
  10618. UNUSED(buffer);
  10619. }
  10620. static bool ggml_backend_buffer_is_sycl_split(ggml_backend_buffer_t buffer) {
  10621. return buffer->iface.get_name == ggml_backend_sycl_split_buffer_get_name;
  10622. }
  10623. GGML_CALL static void ggml_backend_sycl_split_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  10624. ggml_backend_sycl_split_buffer_context * ctx = (ggml_backend_sycl_split_buffer_context *)buffer->context;
  10625. delete ctx;
  10626. }
  10627. GGML_CALL static void * ggml_backend_sycl_split_buffer_get_base(ggml_backend_buffer_t buffer) {
  10628. // the pointers are stored in the tensor extras, this is just a dummy address and never dereferenced
  10629. return (void *)0x1000;
  10630. UNUSED(buffer);
  10631. }
  10632. GGML_CALL static void
  10633. ggml_backend_sycl_split_buffer_init_tensor(ggml_backend_buffer_t buffer,
  10634. ggml_tensor *tensor) try {
  10635. GGML_ASSERT(tensor->view_src == nullptr); // views of split tensors are not supported
  10636. ggml_backend_sycl_split_buffer_context * ctx = (ggml_backend_sycl_split_buffer_context *)buffer->context;
  10637. ggml_backend_sycl_split_buffer_type_context * buft_ctx = (ggml_backend_sycl_split_buffer_type_context *)buffer->buft->context;
  10638. const int64_t ne0 = tensor->ne[0];
  10639. ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu{};
  10640. ctx->tensor_extras.push_back(extra);
  10641. ctx->streams.push_back(&(dpct::get_current_device().default_queue()));
  10642. for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
  10643. int64_t row_low, row_high;
  10644. get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, i);
  10645. int64_t nrows_split = row_high - row_low;
  10646. if (nrows_split == 0) {
  10647. continue;
  10648. }
  10649. size_t size = ggml_nbytes_split(tensor, nrows_split);
  10650. const size_t original_size = size;
  10651. // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
  10652. if (ne0 % MATRIX_ROW_PADDING != 0) {
  10653. size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
  10654. }
  10655. // FIXME: do not crash if cudaMalloc fails
  10656. // currently, init_tensor cannot fail, it needs to be fixed in ggml-backend first
  10657. ggml_sycl_set_device(i);
  10658. const queue_ptr stream = ctx->streams[i];
  10659. char * buf;
  10660. /*
  10661. DPCT1009:208: SYCL uses exceptions to report errors and does not use the
  10662. error codes. The original code was commented out and a warning string
  10663. was inserted. You need to rewrite this code.
  10664. */
  10665. SYCL_CHECK(CHECK_TRY_ERROR(buf = (char *)sycl::malloc_device(
  10666. size, *stream)));
  10667. // set padding to 0 to avoid possible NaN values
  10668. if (size > original_size) {
  10669. /*
  10670. DPCT1009:209: SYCL uses exceptions to report errors and does not use
  10671. the error codes. The original code was commented out and a warning
  10672. string was inserted. You need to rewrite this code.
  10673. */
  10674. SYCL_CHECK(CHECK_TRY_ERROR(
  10675. (*stream)
  10676. .memset(buf + original_size, 0, size - original_size)
  10677. .wait()));
  10678. }
  10679. extra->data_device[i] = buf;
  10680. for (int64_t is = 0; is < GGML_SYCL_MAX_STREAMS; ++is) {
  10681. /*
  10682. DPCT1009:210: SYCL uses exceptions to report errors and does not use
  10683. the error codes. The original code was commented out and a warning
  10684. string was inserted. You need to rewrite this code.
  10685. */
  10686. SYCL_CHECK(
  10687. CHECK_TRY_ERROR(extra->events[i][is] = new sycl::event()));
  10688. }
  10689. }
  10690. tensor->backend = GGML_BACKEND_TYPE_GPU_SPLIT;
  10691. tensor->extra = extra;
  10692. }
  10693. catch (sycl::exception const &exc) {
  10694. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10695. << ", line:" << __LINE__ << std::endl;
  10696. std::exit(1);
  10697. }
  10698. GGML_CALL static void
  10699. ggml_backend_sycl_split_buffer_set_tensor(ggml_backend_buffer_t buffer,
  10700. ggml_tensor *tensor, const void *data,
  10701. size_t offset, size_t size) try {
  10702. // split tensors must always be set in their entirety at once
  10703. GGML_ASSERT(offset == 0);
  10704. GGML_ASSERT(size == ggml_nbytes(tensor));
  10705. ggml_backend_sycl_split_buffer_context * ctx = (ggml_backend_sycl_split_buffer_context *)buffer->context;
  10706. ggml_backend_sycl_split_buffer_type_context * buft_ctx = (ggml_backend_sycl_split_buffer_type_context *)buffer->buft->context;
  10707. const int64_t ne0 = tensor->ne[0];
  10708. const size_t nb1 = tensor->nb[1];
  10709. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *)tensor->extra;
  10710. for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
  10711. int64_t row_low, row_high;
  10712. get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, i);
  10713. int64_t nrows_split = row_high - row_low;
  10714. if (nrows_split == 0) {
  10715. continue;
  10716. }
  10717. const size_t offset_split = row_low*nb1;
  10718. size_t size = ggml_nbytes_split(tensor, nrows_split);
  10719. const size_t original_size = size;
  10720. // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
  10721. if (ne0 % MATRIX_ROW_PADDING != 0) {
  10722. size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
  10723. }
  10724. const char * buf_host = (const char *)data + offset_split;
  10725. /*
  10726. DPCT1009:211: SYCL uses exceptions to report errors and does not use the
  10727. error codes. The original code was commented out and a warning string
  10728. was inserted. You need to rewrite this code.
  10729. */
  10730. ggml_sycl_set_device(i);
  10731. const queue_ptr stream = ctx->streams[i];
  10732. SYCL_CHECK(CHECK_TRY_ERROR(
  10733. (*stream)
  10734. .memcpy(extra->data_device[i], buf_host, original_size)
  10735. .wait()));
  10736. }
  10737. }
  10738. catch (sycl::exception const &exc) {
  10739. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10740. << ", line:" << __LINE__ << std::endl;
  10741. std::exit(1);
  10742. }
  10743. GGML_CALL static void
  10744. ggml_backend_sycl_split_buffer_get_tensor(ggml_backend_buffer_t buffer,
  10745. const ggml_tensor *tensor, void *data,
  10746. size_t offset, size_t size) try {
  10747. // split tensors must always be set in their entirety at once
  10748. GGML_ASSERT(offset == 0);
  10749. GGML_ASSERT(size == ggml_nbytes(tensor));
  10750. ggml_backend_sycl_split_buffer_context * ctx = (ggml_backend_sycl_split_buffer_context *)buffer->context;
  10751. ggml_backend_sycl_split_buffer_type_context * buft_ctx = (ggml_backend_sycl_split_buffer_type_context *)buffer->buft->context;
  10752. const int64_t ne0 = tensor->ne[0];
  10753. const size_t nb1 = tensor->nb[1];
  10754. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *)tensor->extra;
  10755. for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
  10756. int64_t row_low, row_high;
  10757. get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, i);
  10758. int64_t nrows_split = row_high - row_low;
  10759. if (nrows_split == 0) {
  10760. continue;
  10761. }
  10762. const size_t offset_split = row_low*nb1;
  10763. size_t size = ggml_nbytes_split(tensor, nrows_split);
  10764. const size_t original_size = size;
  10765. // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
  10766. if (ne0 % MATRIX_ROW_PADDING != 0) {
  10767. size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
  10768. }
  10769. char * buf_host = (char *)data + offset_split;
  10770. /*
  10771. DPCT1009:212: SYCL uses exceptions to report errors and does not use the
  10772. error codes. The original code was commented out and a warning string
  10773. was inserted. You need to rewrite this code.
  10774. */
  10775. ggml_sycl_set_device(i);
  10776. const queue_ptr stream = ctx->streams[i];
  10777. SYCL_CHECK(CHECK_TRY_ERROR(
  10778. (*stream)
  10779. .memcpy(buf_host, extra->data_device[i], original_size)
  10780. .wait()));
  10781. }
  10782. }
  10783. catch (sycl::exception const &exc) {
  10784. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10785. << ", line:" << __LINE__ << std::endl;
  10786. std::exit(1);
  10787. }
  10788. GGML_CALL static void ggml_backend_sycl_split_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  10789. UNUSED(buffer);
  10790. UNUSED(value);
  10791. }
  10792. static struct ggml_backend_buffer_i ggml_backend_sycl_split_buffer_interface = {
  10793. /* .get_name = */ ggml_backend_sycl_split_buffer_get_name,
  10794. /* .free_buffer = */ ggml_backend_sycl_split_buffer_free_buffer,
  10795. /* .get_base = */ ggml_backend_sycl_split_buffer_get_base,
  10796. /* .init_tensor = */ ggml_backend_sycl_split_buffer_init_tensor,
  10797. /* .set_tensor = */ ggml_backend_sycl_split_buffer_set_tensor,
  10798. /* .get_tensor = */ ggml_backend_sycl_split_buffer_get_tensor,
  10799. /* .cpy_tensor = */ NULL,
  10800. /* .clear = */ ggml_backend_sycl_split_buffer_clear,
  10801. /* .reset = */ NULL,
  10802. };
  10803. GGML_CALL static const char * ggml_backend_sycl_split_buffer_type_name(ggml_backend_buffer_type_t buft) {
  10804. return GGML_SYCL_NAME "_Split";
  10805. UNUSED(buft);
  10806. }
  10807. GGML_CALL static ggml_backend_buffer_t ggml_backend_sycl_split_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  10808. // since we don't know the exact split after rounding, we cannot allocate the device buffers at this point
  10809. // instead, we allocate them for each tensor separately in init_tensor
  10810. // however, the size still represents the maximum cumulative size of all the device buffers after the tensors are allocated,
  10811. // as returned by get_alloc_size. this limit is enforced during tensor allocation by ggml-alloc, so it must be correct.
  10812. ggml_backend_sycl_split_buffer_context * ctx = new ggml_backend_sycl_split_buffer_context();
  10813. return ggml_backend_buffer_init(buft, ggml_backend_sycl_split_buffer_interface, ctx, size);
  10814. }
  10815. GGML_CALL static size_t ggml_backend_sycl_split_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  10816. return 128;
  10817. UNUSED(buft);
  10818. }
  10819. GGML_CALL static size_t ggml_backend_sycl_split_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
  10820. ggml_backend_sycl_split_buffer_type_context * ctx = (ggml_backend_sycl_split_buffer_type_context *)buft->context;
  10821. size_t total_size = 0;
  10822. const int64_t ne0 = tensor->ne[0];
  10823. for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
  10824. int64_t row_low, row_high;
  10825. get_row_split(&row_low, &row_high, tensor, ctx->tensor_split, i);
  10826. int64_t nrows_split = row_high - row_low;
  10827. if (nrows_split == 0) {
  10828. continue;
  10829. }
  10830. total_size += ggml_nbytes_split(tensor, nrows_split);
  10831. // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
  10832. if (ne0 % MATRIX_ROW_PADDING != 0) {
  10833. total_size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
  10834. }
  10835. }
  10836. return total_size;
  10837. }
  10838. GGML_CALL static bool ggml_backend_sycl_split_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
  10839. return false;
  10840. UNUSED(buft);
  10841. }
  10842. static ggml_backend_buffer_type_i ggml_backend_sycl_split_buffer_type_interface = {
  10843. /* .get_name = */ ggml_backend_sycl_split_buffer_type_name,
  10844. /* .alloc_buffer = */ ggml_backend_sycl_split_buffer_type_alloc_buffer,
  10845. /* .get_alignment = */ ggml_backend_sycl_split_buffer_type_get_alignment,
  10846. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  10847. /* .get_alloc_size = */ ggml_backend_sycl_split_buffer_type_get_alloc_size,
  10848. /* .is_host = */ ggml_backend_sycl_split_buffer_type_is_host,
  10849. };
  10850. GGML_CALL ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const float * tensor_split) {
  10851. static std::mutex mutex;
  10852. std::lock_guard<std::mutex> lock(mutex);
  10853. GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_split_buffer_type\n");
  10854. ggml_check_sycl();
  10855. // FIXME: this is not thread safe
  10856. static std::map<std::array<float, GGML_SYCL_MAX_DEVICES>, struct ggml_backend_buffer_type> buft_map;
  10857. std::array<float, GGML_SYCL_MAX_DEVICES> tensor_split_arr = {};
  10858. bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + GGML_SYCL_MAX_DEVICES, [](float x) { return x == 0.0f; });
  10859. if (all_zero) {
  10860. tensor_split_arr = ggml_sycl_info().default_tensor_split;
  10861. } else {
  10862. float split_sum = 0.0f;
  10863. for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
  10864. tensor_split_arr[i] = split_sum;
  10865. split_sum += tensor_split[i];
  10866. }
  10867. for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
  10868. tensor_split_arr[i] /= split_sum;
  10869. }
  10870. }
  10871. auto it = buft_map.find(tensor_split_arr);
  10872. if (it != buft_map.end()) {
  10873. return &it->second;
  10874. }
  10875. struct ggml_backend_buffer_type buft {
  10876. /* .iface = */ ggml_backend_sycl_split_buffer_type_interface,
  10877. /* .context = */ new ggml_backend_sycl_split_buffer_type_context{tensor_split_arr},
  10878. };
  10879. auto result = buft_map.emplace(tensor_split_arr, buft);
  10880. return &result.first->second;
  10881. }
  10882. // host buffer type
  10883. GGML_CALL static const char * ggml_backend_sycl_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
  10884. return GGML_SYCL_NAME "_Host";
  10885. UNUSED(buft);
  10886. }
  10887. GGML_CALL static const char * ggml_backend_sycl_host_buffer_name(ggml_backend_buffer_t buffer) {
  10888. return GGML_SYCL_NAME "_Host";
  10889. UNUSED(buffer);
  10890. }
  10891. static void ggml_backend_sycl_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  10892. ggml_sycl_host_free(buffer->context);
  10893. }
  10894. static ggml_backend_buffer_t ggml_backend_sycl_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  10895. void * ptr = ggml_sycl_host_malloc(size);
  10896. if (ptr == nullptr) {
  10897. // fallback to cpu buffer
  10898. return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
  10899. }
  10900. // FIXME: this is a hack to avoid having to implement a new buffer type
  10901. ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
  10902. buffer->buft = buft;
  10903. buffer->iface.get_name = ggml_backend_sycl_host_buffer_name;
  10904. buffer->iface.free_buffer = ggml_backend_sycl_host_buffer_free_buffer;
  10905. return buffer;
  10906. }
  10907. ggml_backend_buffer_type_t ggml_backend_sycl_host_buffer_type() {
  10908. GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_host_buffer_type\n");
  10909. static struct ggml_backend_buffer_type ggml_backend_sycl_buffer_type_host = {
  10910. /* .iface = */ {
  10911. /* .get_name = */ ggml_backend_sycl_host_buffer_type_name,
  10912. /* .alloc_buffer = */ ggml_backend_sycl_host_buffer_type_alloc_buffer,
  10913. /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment,
  10914. /* .get_max_size = */ NULL, // TODO: return device.maxBufferLength
  10915. /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
  10916. /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
  10917. },
  10918. /* .context = */ nullptr,
  10919. };
  10920. return &ggml_backend_sycl_buffer_type_host;
  10921. }
  10922. // backend
  10923. GGML_CALL static const char * ggml_backend_sycl_name(ggml_backend_t backend) {
  10924. ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
  10925. return sycl_ctx->name.c_str();
  10926. }
  10927. GGML_CALL static void ggml_backend_sycl_free(ggml_backend_t backend) {
  10928. ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
  10929. delete sycl_ctx;
  10930. delete backend;
  10931. }
  10932. GGML_CALL static ggml_backend_buffer_type_t ggml_backend_sycl_get_default_buffer_type(ggml_backend_t backend) {
  10933. ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
  10934. return ggml_backend_sycl_buffer_type(sycl_ctx->device);
  10935. }
  10936. GGML_CALL static void ggml_backend_sycl_set_tensor_async(ggml_backend_t backend,
  10937. ggml_tensor *tensor,
  10938. const void *data, size_t offset,
  10939. size_t size) try {
  10940. ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
  10941. ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  10942. GGML_ASSERT(buf->buft == ggml_backend_sycl_buffer_type(sycl_ctx->device) && "unsupported buffer type");
  10943. const queue_ptr stream = sycl_ctx->stream(sycl_ctx->device, 0);
  10944. SYCL_CHECK(CHECK_TRY_ERROR((stream)->memcpy(
  10945. (char *)tensor->data + offset, data, size).wait()));
  10946. }
  10947. catch (sycl::exception const &exc) {
  10948. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10949. << ", line:" << __LINE__ << std::endl;
  10950. std::exit(1);
  10951. }
  10952. GGML_CALL static void ggml_backend_sycl_get_tensor_async(ggml_backend_t backend,
  10953. const ggml_tensor *tensor,
  10954. void *data, size_t offset,
  10955. size_t size) try {
  10956. ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
  10957. ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  10958. GGML_ASSERT(buf->buft == ggml_backend_sycl_buffer_type(sycl_ctx->device) && "unsupported buffer type");
  10959. const queue_ptr stream = sycl_ctx->stream(sycl_ctx->device, 0);
  10960. SYCL_CHECK(CHECK_TRY_ERROR((stream)->memcpy(
  10961. data, (const char *)tensor->data + offset, size).wait()));
  10962. }
  10963. catch (sycl::exception const &exc) {
  10964. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10965. << ", line:" << __LINE__ << std::endl;
  10966. std::exit(1);
  10967. }
  10968. GGML_CALL static bool ggml_backend_sycl_cpy_tensor_async(ggml_backend_t backend,
  10969. const ggml_tensor *src,
  10970. ggml_tensor *dst) try {
  10971. ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
  10972. if (dst->buffer->buft == ggml_backend_sycl_buffer_type(sycl_ctx->device) && ggml_backend_buffer_is_sycl(src->buffer)) {
  10973. /*
  10974. DPCT1009:215: SYCL uses exceptions to report errors and does not use the
  10975. error codes. The original code was commented out and a warning string
  10976. was inserted. You need to rewrite this code.
  10977. */
  10978. const queue_ptr stream = sycl_ctx->stream(sycl_ctx->device, 0);
  10979. SYCL_CHECK(CHECK_TRY_ERROR((stream)->memcpy(
  10980. dst->data, src->data, ggml_nbytes(dst)).wait()));
  10981. return true;
  10982. }
  10983. return false;
  10984. }
  10985. catch (sycl::exception const &exc) {
  10986. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10987. << ", line:" << __LINE__ << std::endl;
  10988. std::exit(1);
  10989. }
  10990. static void ggml_backend_sycl_synchronize(ggml_backend_t backend) try {
  10991. ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
  10992. const queue_ptr stream = sycl_ctx->stream(sycl_ctx->device, 0);
  10993. SYCL_CHECK(CHECK_TRY_ERROR((stream)->wait()));
  10994. UNUSED(backend);
  10995. }
  10996. catch (sycl::exception const &exc) {
  10997. std::cerr << exc.what() << "Exception caught at file:" << __FILE__
  10998. << ", line:" << __LINE__ << std::endl;
  10999. std::exit(1);
  11000. }
  11001. GGML_CALL static ggml_status ggml_backend_sycl_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
  11002. ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
  11003. ggml_sycl_set_main_device(sycl_ctx->device);
  11004. for (int i = 0; i < cgraph->n_nodes; i++) {
  11005. ggml_tensor * node = cgraph->nodes[i];
  11006. if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) {
  11007. continue;
  11008. }
  11009. #ifndef NDEBUG
  11010. assert(node->buffer->buft == ggml_backend_sycl_buffer_type(sycl_ctx->device));
  11011. for (int j = 0; j < GGML_MAX_SRC; j++) {
  11012. if (node->src[j] != nullptr) {
  11013. assert(node->src[j]->buffer->buft == ggml_backend_sycl_buffer_type(sycl_ctx->device));
  11014. }
  11015. }
  11016. #endif
  11017. bool ok = ggml_sycl_compute_forward(*sycl_ctx, node);
  11018. if (!ok) {
  11019. fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op));
  11020. }
  11021. GGML_ASSERT(ok);
  11022. }
  11023. return GGML_STATUS_SUCCESS;
  11024. }
  11025. GGML_CALL static bool ggml_backend_sycl_supports_op(ggml_backend_t backend, const ggml_tensor * op) {
  11026. switch (op->op) {
  11027. case GGML_OP_UNARY:
  11028. switch (ggml_get_unary_op(op)) {
  11029. case GGML_UNARY_OP_GELU:
  11030. case GGML_UNARY_OP_SILU:
  11031. case GGML_UNARY_OP_RELU:
  11032. case GGML_UNARY_OP_HARDSIGMOID:
  11033. case GGML_UNARY_OP_HARDSWISH:
  11034. case GGML_UNARY_OP_GELU_QUICK:
  11035. case GGML_UNARY_OP_TANH:
  11036. return ggml_is_contiguous(op->src[0]);
  11037. default:
  11038. return false;
  11039. }
  11040. break;
  11041. case GGML_OP_MUL_MAT:
  11042. case GGML_OP_MUL_MAT_ID:
  11043. {
  11044. struct ggml_tensor * a;
  11045. struct ggml_tensor * b;
  11046. if (op->op == GGML_OP_MUL_MAT) {
  11047. a = op->src[0];
  11048. b = op->src[1];
  11049. } else {
  11050. a = op->src[2];
  11051. b = op->src[1];
  11052. }
  11053. if (a->ne[3] != b->ne[3]) {
  11054. return false;
  11055. }
  11056. ggml_type a_type = a->type;
  11057. if (a_type == GGML_TYPE_IQ4_NL || a_type == GGML_TYPE_IQ4_XS ||
  11058. a_type == GGML_TYPE_IQ3_XXS || a_type == GGML_TYPE_IQ3_S ||
  11059. a_type == GGML_TYPE_IQ2_XXS || a_type == GGML_TYPE_IQ2_XS || a_type == GGML_TYPE_IQ2_S ||
  11060. a_type == GGML_TYPE_IQ1_S || a_type == GGML_TYPE_IQ1_M
  11061. ) {
  11062. if (b->ne[1] == 1 && ggml_nrows(b) > 1) {
  11063. return false;
  11064. }
  11065. }
  11066. return true;
  11067. } break;
  11068. case GGML_OP_GET_ROWS:
  11069. {
  11070. switch (op->src[0]->type) {
  11071. case GGML_TYPE_F16:
  11072. case GGML_TYPE_F32:
  11073. case GGML_TYPE_Q4_0:
  11074. case GGML_TYPE_Q4_1:
  11075. case GGML_TYPE_Q5_0:
  11076. case GGML_TYPE_Q5_1:
  11077. case GGML_TYPE_Q8_0:
  11078. return true;
  11079. default:
  11080. return false;
  11081. }
  11082. } break;
  11083. case GGML_OP_CPY:
  11084. {
  11085. ggml_type src0_type = op->src[0]->type;
  11086. ggml_type src1_type = op->src[1]->type;
  11087. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  11088. return true;
  11089. }
  11090. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
  11091. return true;
  11092. }
  11093. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q8_0) {
  11094. return true;
  11095. }
  11096. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q4_0) {
  11097. return true;
  11098. }
  11099. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q4_1) {
  11100. return true;
  11101. }
  11102. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  11103. return true;
  11104. }
  11105. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  11106. return true;
  11107. }
  11108. return false;
  11109. } break;
  11110. case GGML_OP_CONCAT:
  11111. {
  11112. ggml_type src0_type = op->src[0]->type;
  11113. return src0_type != GGML_TYPE_I32 && src0_type != GGML_TYPE_I16;
  11114. } break;
  11115. case GGML_OP_DUP:
  11116. case GGML_OP_NONE:
  11117. case GGML_OP_RESHAPE:
  11118. case GGML_OP_REPEAT:
  11119. case GGML_OP_VIEW:
  11120. case GGML_OP_PERMUTE:
  11121. case GGML_OP_TRANSPOSE:
  11122. case GGML_OP_NORM:
  11123. case GGML_OP_ADD:
  11124. case GGML_OP_MUL:
  11125. case GGML_OP_DIV:
  11126. case GGML_OP_RMS_NORM:
  11127. case GGML_OP_SCALE:
  11128. case GGML_OP_SQR:
  11129. case GGML_OP_CLAMP:
  11130. case GGML_OP_CONT:
  11131. case GGML_OP_DIAG_MASK_INF:
  11132. case GGML_OP_SOFT_MAX:
  11133. case GGML_OP_ROPE:
  11134. case GGML_OP_IM2COL:
  11135. case GGML_OP_POOL_2D:
  11136. case GGML_OP_SUM_ROWS:
  11137. case GGML_OP_ARGSORT:
  11138. case GGML_OP_ACC:
  11139. case GGML_OP_GROUP_NORM:
  11140. case GGML_OP_UPSCALE:
  11141. case GGML_OP_PAD:
  11142. case GGML_OP_LEAKY_RELU:
  11143. return true;
  11144. default:
  11145. return false;
  11146. }
  11147. UNUSED(backend);
  11148. }
  11149. GGML_CALL static bool ggml_backend_sycl_offload_op(ggml_backend_t backend, const ggml_tensor * op) {
  11150. const int min_batch_size = 32;
  11151. return op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS && op->op != GGML_OP_MUL_MAT_ID;
  11152. GGML_UNUSED(backend);
  11153. }
  11154. GGML_CALL static bool ggml_backend_sycl_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
  11155. if (buft->iface.get_name != ggml_backend_sycl_buffer_type_name) {
  11156. return false;
  11157. }
  11158. ggml_backend_sycl_buffer_type_context * buft_ctx = (ggml_backend_sycl_buffer_type_context *)buft->context;
  11159. ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
  11160. return buft_ctx->device == sycl_ctx->device;
  11161. }
  11162. static ggml_backend_i ggml_backend_sycl_interface = {
  11163. /* .get_name = */ ggml_backend_sycl_name,
  11164. /* .free = */ ggml_backend_sycl_free,
  11165. /* .get_default_buffer_type = */ ggml_backend_sycl_get_default_buffer_type,
  11166. /* .set_tensor_async = */ ggml_backend_sycl_set_tensor_async,
  11167. /* .get_tensor_async = */ ggml_backend_sycl_get_tensor_async,
  11168. /* .cpy_tensor_async = */ NULL, //ggml_backend_sycl_cpy_tensor_async, // TODO: update for the new interface
  11169. /* .synchronize = */ ggml_backend_sycl_synchronize,
  11170. /* .graph_plan_create = */ NULL,
  11171. /* .graph_plan_free = */ NULL,
  11172. /* .graph_plan_update = */ NULL,
  11173. /* .graph_plan_compute = */ NULL,
  11174. /* .graph_compute = */ ggml_backend_sycl_graph_compute,
  11175. /* .supports_op = */ ggml_backend_sycl_supports_op,
  11176. /* .supports_buft = */ ggml_backend_sycl_supports_buft,
  11177. /* .offload_op = */ ggml_backend_sycl_offload_op,
  11178. /* .event_new = */ NULL,
  11179. /* .event_free = */ NULL,
  11180. /* .event_record = */ NULL,
  11181. /* .event_wait = */ NULL,
  11182. /* .event_synchronize = */ NULL,
  11183. };
  11184. static ggml_guid_t ggml_backend_sycl_guid() {
  11185. static ggml_guid guid = { 0x58, 0x05, 0x13, 0x8f, 0xcd, 0x3a, 0x61, 0x9d, 0xe7, 0xcd, 0x98, 0xa9, 0x03, 0xfd, 0x7c, 0x53 };
  11186. return &guid;
  11187. }
  11188. GGML_CALL ggml_backend_t ggml_backend_sycl_init(int device) {
  11189. GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_init\n");
  11190. ggml_check_sycl();
  11191. check_allow_gpu_index(device);
  11192. ggml_backend_sycl_context * ctx = new ggml_backend_sycl_context(device);
  11193. if (ctx == nullptr) {
  11194. fprintf(stderr, "%s: error: failed to allocate context\n", __func__);
  11195. return nullptr;
  11196. };
  11197. ggml_backend_t sycl_backend = new ggml_backend {
  11198. /* .guid = */ ggml_backend_sycl_guid(),
  11199. /* .interface = */ ggml_backend_sycl_interface,
  11200. /* .context = */ ctx
  11201. };
  11202. return sycl_backend;
  11203. }
  11204. bool ggml_backend_is_sycl(ggml_backend_t backend) {
  11205. return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_sycl_guid());
  11206. }
  11207. GGML_CALL int ggml_backend_sycl_get_device_count() {
  11208. GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_get_device_count\n");
  11209. return ggml_sycl_info().device_count;
  11210. }
  11211. GGML_CALL static ggml_backend_t ggml_backend_reg_sycl_init(const char * params, void * user_data) {
  11212. ggml_backend_t sycl_backend = ggml_backend_sycl_init((int) (intptr_t) user_data);
  11213. return sycl_backend;
  11214. UNUSED(params);
  11215. }
  11216. extern "C" int ggml_backend_sycl_reg_devices();
  11217. int ggml_backend_sycl_reg_devices() {
  11218. assert(ggml_sycl_info().device_count>0);
  11219. for (int i = 0; i < ggml_sycl_info().device_count; i++) {
  11220. char name[128];
  11221. snprintf(name, sizeof(name), "%s%d", GGML_SYCL_NAME, i);
  11222. ggml_backend_register(name, ggml_backend_reg_sycl_init, ggml_backend_sycl_buffer_type(i), (void *) (intptr_t) i);
  11223. }
  11224. return ggml_sycl_info().device_count;
  11225. }