ggml-quants.c 402 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134
  1. #include "ggml-quants.h"
  2. #include "ggml-impl.h"
  3. #include <math.h>
  4. #include <string.h>
  5. #include <assert.h>
  6. #include <float.h>
  7. #include <stdlib.h> // for qsort
  8. #include <stdio.h> // for GGML_ASSERT
  9. #ifdef __ARM_NEON
  10. // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
  11. //
  12. // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
  13. //
  14. #include <arm_neon.h>
  15. #else
  16. #ifdef __wasm_simd128__
  17. #include <wasm_simd128.h>
  18. #else
  19. #if defined(__POWER9_VECTOR__) || defined(__powerpc64__)
  20. #include <altivec.h>
  21. #undef bool
  22. #define bool _Bool
  23. #else
  24. #if defined(_MSC_VER) || defined(__MINGW32__)
  25. #include <intrin.h>
  26. #else
  27. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
  28. #if !defined(__riscv)
  29. #include <immintrin.h>
  30. #endif
  31. #endif
  32. #endif
  33. #endif
  34. #endif
  35. #endif
  36. #ifdef __riscv_v_intrinsic
  37. #include <riscv_vector.h>
  38. #endif
  39. #undef MIN
  40. #undef MAX
  41. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  42. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  43. #define UNUSED GGML_UNUSED
  44. #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
  45. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  46. // multiply int8_t, add results pairwise twice
  47. static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
  48. // Get absolute values of x vectors
  49. const __m128i ax = _mm_sign_epi8(x, x);
  50. // Sign the values of the y vectors
  51. const __m128i sy = _mm_sign_epi8(y, x);
  52. // Perform multiplication and create 16-bit values
  53. const __m128i dot = _mm_maddubs_epi16(ax, sy);
  54. const __m128i ones = _mm_set1_epi16(1);
  55. return _mm_madd_epi16(ones, dot);
  56. }
  57. #if __AVX__ || __AVX2__ || __AVX512F__
  58. // horizontally add 8 floats
  59. static inline float hsum_float_8(const __m256 x) {
  60. __m128 res = _mm256_extractf128_ps(x, 1);
  61. res = _mm_add_ps(res, _mm256_castps256_ps128(x));
  62. res = _mm_add_ps(res, _mm_movehl_ps(res, res));
  63. res = _mm_add_ss(res, _mm_movehdup_ps(res));
  64. return _mm_cvtss_f32(res);
  65. }
  66. // horizontally add 8 int32_t
  67. static inline int hsum_i32_8(const __m256i a) {
  68. const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
  69. const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
  70. const __m128i sum64 = _mm_add_epi32(hi64, sum128);
  71. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  72. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  73. }
  74. // horizontally add 4 int32_t
  75. static inline int hsum_i32_4(const __m128i a) {
  76. const __m128i hi64 = _mm_unpackhi_epi64(a, a);
  77. const __m128i sum64 = _mm_add_epi32(hi64, a);
  78. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  79. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  80. }
  81. #if defined(__AVX2__) || defined(__AVX512F__)
  82. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  83. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  84. uint32_t x32;
  85. memcpy(&x32, x, sizeof(uint32_t));
  86. const __m256i shuf_mask = _mm256_set_epi64x(
  87. 0x0303030303030303, 0x0202020202020202,
  88. 0x0101010101010101, 0x0000000000000000);
  89. __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
  90. const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
  91. bytes = _mm256_or_si256(bytes, bit_mask);
  92. return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1));
  93. }
  94. // Unpack 32 4-bit fields into 32 bytes
  95. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  96. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  97. {
  98. const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
  99. const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
  100. const __m256i lowMask = _mm256_set1_epi8( 0xF );
  101. return _mm256_and_si256(lowMask, bytes);
  102. }
  103. // add int16_t pairwise and return as float vector
  104. static inline __m256 sum_i16_pairs_float(const __m256i x) {
  105. const __m256i ones = _mm256_set1_epi16(1);
  106. const __m256i summed_pairs = _mm256_madd_epi16(ones, x);
  107. return _mm256_cvtepi32_ps(summed_pairs);
  108. }
  109. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  110. #if __AVXVNNI__
  111. const __m256i zero = _mm256_setzero_si256();
  112. const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy);
  113. return _mm256_cvtepi32_ps(summed_pairs);
  114. #else
  115. // Perform multiplication and create 16-bit values
  116. const __m256i dot = _mm256_maddubs_epi16(ax, sy);
  117. return sum_i16_pairs_float(dot);
  118. #endif
  119. }
  120. // multiply int8_t, add results pairwise twice and return as float vector
  121. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  122. #if __AVXVNNIINT8__
  123. const __m256i zero = _mm256_setzero_si256();
  124. const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y);
  125. return _mm256_cvtepi32_ps(summed_pairs);
  126. #else
  127. // Get absolute values of x vectors
  128. const __m256i ax = _mm256_sign_epi8(x, x);
  129. // Sign the values of the y vectors
  130. const __m256i sy = _mm256_sign_epi8(y, x);
  131. return mul_sum_us8_pairs_float(ax, sy);
  132. #endif
  133. }
  134. static inline __m128i packNibbles( __m256i bytes )
  135. {
  136. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  137. #if __AVX512F__
  138. const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000
  139. bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh
  140. return _mm256_cvtepi16_epi8(bytes); // abcd_efgh
  141. #else
  142. const __m256i lowByte = _mm256_set1_epi16( 0xFF );
  143. __m256i high = _mm256_andnot_si256( lowByte, bytes );
  144. __m256i low = _mm256_and_si256( lowByte, bytes );
  145. high = _mm256_srli_epi16( high, 4 );
  146. bytes = _mm256_or_si256( low, high );
  147. // Compress uint16_t lanes into bytes
  148. __m128i r0 = _mm256_castsi256_si128( bytes );
  149. __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
  150. return _mm_packus_epi16( r0, r1 );
  151. #endif
  152. }
  153. #elif defined(__AVX__)
  154. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  155. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  156. uint32_t x32;
  157. memcpy(&x32, x, sizeof(uint32_t));
  158. const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  159. const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202);
  160. __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl);
  161. __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh);
  162. const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe);
  163. bytesl = _mm_or_si128(bytesl, bit_mask);
  164. bytesh = _mm_or_si128(bytesh, bit_mask);
  165. bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
  166. bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
  167. return MM256_SET_M128I(bytesh, bytesl);
  168. }
  169. // Unpack 32 4-bit fields into 32 bytes
  170. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  171. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  172. {
  173. // Load 16 bytes from memory
  174. __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi);
  175. __m128i tmph = _mm_srli_epi16(tmpl, 4);
  176. const __m128i lowMask = _mm_set1_epi8(0xF);
  177. tmpl = _mm_and_si128(lowMask, tmpl);
  178. tmph = _mm_and_si128(lowMask, tmph);
  179. return MM256_SET_M128I(tmph, tmpl);
  180. }
  181. // add int16_t pairwise and return as float vector
  182. static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
  183. const __m128i ones = _mm_set1_epi16(1);
  184. const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
  185. const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
  186. const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
  187. return _mm256_cvtepi32_ps(summed_pairs);
  188. }
  189. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  190. const __m128i axl = _mm256_castsi256_si128(ax);
  191. const __m128i axh = _mm256_extractf128_si256(ax, 1);
  192. const __m128i syl = _mm256_castsi256_si128(sy);
  193. const __m128i syh = _mm256_extractf128_si256(sy, 1);
  194. // Perform multiplication and create 16-bit values
  195. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  196. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  197. return sum_i16_pairs_float(doth, dotl);
  198. }
  199. // multiply int8_t, add results pairwise twice and return as float vector
  200. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  201. const __m128i xl = _mm256_castsi256_si128(x);
  202. const __m128i xh = _mm256_extractf128_si256(x, 1);
  203. const __m128i yl = _mm256_castsi256_si128(y);
  204. const __m128i yh = _mm256_extractf128_si256(y, 1);
  205. // Get absolute values of x vectors
  206. const __m128i axl = _mm_sign_epi8(xl, xl);
  207. const __m128i axh = _mm_sign_epi8(xh, xh);
  208. // Sign the values of the y vectors
  209. const __m128i syl = _mm_sign_epi8(yl, xl);
  210. const __m128i syh = _mm_sign_epi8(yh, xh);
  211. // Perform multiplication and create 16-bit values
  212. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  213. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  214. return sum_i16_pairs_float(doth, dotl);
  215. }
  216. static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
  217. {
  218. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  219. const __m128i lowByte = _mm_set1_epi16( 0xFF );
  220. __m128i high = _mm_andnot_si128( lowByte, bytes1 );
  221. __m128i low = _mm_and_si128( lowByte, bytes1 );
  222. high = _mm_srli_epi16( high, 4 );
  223. bytes1 = _mm_or_si128( low, high );
  224. high = _mm_andnot_si128( lowByte, bytes2 );
  225. low = _mm_and_si128( lowByte, bytes2 );
  226. high = _mm_srli_epi16( high, 4 );
  227. bytes2 = _mm_or_si128( low, high );
  228. return _mm_packus_epi16( bytes1, bytes2);
  229. }
  230. #endif
  231. #elif defined(__SSSE3__)
  232. // horizontally add 4x4 floats
  233. static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
  234. __m128 res_0 =_mm_hadd_ps(a, b);
  235. __m128 res_1 =_mm_hadd_ps(c, d);
  236. __m128 res =_mm_hadd_ps(res_0, res_1);
  237. res =_mm_hadd_ps(res, res);
  238. res =_mm_hadd_ps(res, res);
  239. return _mm_cvtss_f32(res);
  240. }
  241. #endif // __AVX__ || __AVX2__ || __AVX512F__
  242. #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  243. #if defined(__ARM_NEON)
  244. #ifdef _MSC_VER
  245. #define ggml_vld1q_u32(w,x,y,z) { ((w) + ((uint64_t)(x) << 32)), ((y) + ((uint64_t)(z) << 32)) }
  246. #else
  247. #define ggml_vld1q_u32(w,x,y,z) { (w), (x), (y), (z) }
  248. #endif
  249. #if !defined(__aarch64__)
  250. // 64-bit compatibility
  251. // vaddvq_s16
  252. // vpaddq_s16
  253. // vpaddq_s32
  254. // vaddvq_s32
  255. // vaddvq_f32
  256. // vmaxvq_f32
  257. // vcvtnq_s32_f32
  258. // vzip1_u8
  259. // vzip2_u8
  260. inline static int32_t vaddvq_s16(int16x8_t v) {
  261. return
  262. (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) +
  263. (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) +
  264. (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) +
  265. (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7);
  266. }
  267. inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) {
  268. int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a));
  269. int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b));
  270. return vcombine_s16(a0, b0);
  271. }
  272. inline static int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) {
  273. int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a));
  274. int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b));
  275. return vcombine_s32(a0, b0);
  276. }
  277. inline static int32_t vaddvq_s32(int32x4_t v) {
  278. return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
  279. }
  280. inline static float vaddvq_f32(float32x4_t v) {
  281. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  282. }
  283. inline static float vmaxvq_f32(float32x4_t v) {
  284. return
  285. MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
  286. MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
  287. }
  288. inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
  289. int32x4_t res;
  290. res[0] = roundf(vgetq_lane_f32(v, 0));
  291. res[1] = roundf(vgetq_lane_f32(v, 1));
  292. res[2] = roundf(vgetq_lane_f32(v, 2));
  293. res[3] = roundf(vgetq_lane_f32(v, 3));
  294. return res;
  295. }
  296. inline static uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) {
  297. uint8x8_t res;
  298. res[0] = a[0]; res[1] = b[0];
  299. res[2] = a[1]; res[3] = b[1];
  300. res[4] = a[2]; res[5] = b[2];
  301. res[6] = a[3]; res[7] = b[3];
  302. return res;
  303. }
  304. inline static uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) {
  305. uint8x8_t res;
  306. res[0] = a[4]; res[1] = b[4];
  307. res[2] = a[5]; res[3] = b[5];
  308. res[4] = a[6]; res[5] = b[6];
  309. res[6] = a[7]; res[7] = b[7];
  310. return res;
  311. }
  312. // vld1q_s16_x2
  313. // vld1q_u8_x2
  314. // vld1q_u8_x4
  315. // vld1q_s8_x2
  316. // vld1q_s8_x4
  317. // TODO: double-check these work correctly
  318. typedef struct ggml_int16x8x2_t {
  319. int16x8_t val[2];
  320. } ggml_int16x8x2_t;
  321. inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) {
  322. ggml_int16x8x2_t res;
  323. res.val[0] = vld1q_s16(ptr + 0);
  324. res.val[1] = vld1q_s16(ptr + 8);
  325. return res;
  326. }
  327. typedef struct ggml_uint8x16x2_t {
  328. uint8x16_t val[2];
  329. } ggml_uint8x16x2_t;
  330. inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) {
  331. ggml_uint8x16x2_t res;
  332. res.val[0] = vld1q_u8(ptr + 0);
  333. res.val[1] = vld1q_u8(ptr + 16);
  334. return res;
  335. }
  336. typedef struct ggml_uint8x16x4_t {
  337. uint8x16_t val[4];
  338. } ggml_uint8x16x4_t;
  339. inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) {
  340. ggml_uint8x16x4_t res;
  341. res.val[0] = vld1q_u8(ptr + 0);
  342. res.val[1] = vld1q_u8(ptr + 16);
  343. res.val[2] = vld1q_u8(ptr + 32);
  344. res.val[3] = vld1q_u8(ptr + 48);
  345. return res;
  346. }
  347. typedef struct ggml_int8x16x2_t {
  348. int8x16_t val[2];
  349. } ggml_int8x16x2_t;
  350. inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) {
  351. ggml_int8x16x2_t res;
  352. res.val[0] = vld1q_s8(ptr + 0);
  353. res.val[1] = vld1q_s8(ptr + 16);
  354. return res;
  355. }
  356. typedef struct ggml_int8x16x4_t {
  357. int8x16_t val[4];
  358. } ggml_int8x16x4_t;
  359. inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) {
  360. ggml_int8x16x4_t res;
  361. res.val[0] = vld1q_s8(ptr + 0);
  362. res.val[1] = vld1q_s8(ptr + 16);
  363. res.val[2] = vld1q_s8(ptr + 32);
  364. res.val[3] = vld1q_s8(ptr + 48);
  365. return res;
  366. }
  367. #else
  368. #define ggml_int16x8x2_t int16x8x2_t
  369. #define ggml_uint8x16x2_t uint8x16x2_t
  370. #define ggml_uint8x16x4_t uint8x16x4_t
  371. #define ggml_int8x16x2_t int8x16x2_t
  372. #define ggml_int8x16x4_t int8x16x4_t
  373. #define ggml_vld1q_s16_x2 vld1q_s16_x2
  374. #define ggml_vld1q_u8_x2 vld1q_u8_x2
  375. #define ggml_vld1q_u8_x4 vld1q_u8_x4
  376. #define ggml_vld1q_s8_x2 vld1q_s8_x2
  377. #define ggml_vld1q_s8_x4 vld1q_s8_x4
  378. #endif
  379. #if !defined(__ARM_FEATURE_DOTPROD)
  380. inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) {
  381. const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b));
  382. const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b));
  383. return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1)));
  384. }
  385. #else
  386. #define ggml_vdotq_s32(a, b, c) vdotq_s32(a, b, c)
  387. #endif
  388. #endif
  389. #if defined(__ARM_NEON) || defined(__wasm_simd128__)
  390. #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
  391. #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
  392. #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
  393. #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
  394. #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
  395. #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
  396. #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
  397. #define B8(c,s ) B7(c,s, c), B7(c,s, s)
  398. // precomputed tables for expanding 8bits to 8 bytes:
  399. static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
  400. static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
  401. #endif
  402. // reference implementation for deterministic creation of model files
  403. void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
  404. static const int qk = QK4_0;
  405. assert(k % qk == 0);
  406. const int nb = k / qk;
  407. for (int i = 0; i < nb; i++) {
  408. float amax = 0.0f; // absolute max
  409. float max = 0.0f;
  410. for (int j = 0; j < qk; j++) {
  411. const float v = x[i*qk + j];
  412. if (amax < fabsf(v)) {
  413. amax = fabsf(v);
  414. max = v;
  415. }
  416. }
  417. const float d = max / -8;
  418. const float id = d ? 1.0f/d : 0.0f;
  419. y[i].d = GGML_FP32_TO_FP16(d);
  420. for (int j = 0; j < qk/2; ++j) {
  421. const float x0 = x[i*qk + 0 + j]*id;
  422. const float x1 = x[i*qk + qk/2 + j]*id;
  423. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
  424. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
  425. y[i].qs[j] = xi0;
  426. y[i].qs[j] |= xi1 << 4;
  427. }
  428. }
  429. }
  430. void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
  431. quantize_row_q4_0_reference(x, y, k);
  432. }
  433. void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) {
  434. const int qk = QK4_1;
  435. assert(k % qk == 0);
  436. const int nb = k / qk;
  437. for (int i = 0; i < nb; i++) {
  438. float min = FLT_MAX;
  439. float max = -FLT_MAX;
  440. for (int j = 0; j < qk; j++) {
  441. const float v = x[i*qk + j];
  442. if (v < min) min = v;
  443. if (v > max) max = v;
  444. }
  445. const float d = (max - min) / ((1 << 4) - 1);
  446. const float id = d ? 1.0f/d : 0.0f;
  447. y[i].d = GGML_FP32_TO_FP16(d);
  448. y[i].m = GGML_FP32_TO_FP16(min);
  449. for (int j = 0; j < qk/2; ++j) {
  450. const float x0 = (x[i*qk + 0 + j] - min)*id;
  451. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  452. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
  453. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
  454. y[i].qs[j] = xi0;
  455. y[i].qs[j] |= xi1 << 4;
  456. }
  457. }
  458. }
  459. void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
  460. quantize_row_q4_1_reference(x, y, k);
  461. }
  462. void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
  463. static const int qk = QK5_0;
  464. assert(k % qk == 0);
  465. const int nb = k / qk;
  466. for (int i = 0; i < nb; i++) {
  467. float amax = 0.0f; // absolute max
  468. float max = 0.0f;
  469. for (int j = 0; j < qk; j++) {
  470. const float v = x[i*qk + j];
  471. if (amax < fabsf(v)) {
  472. amax = fabsf(v);
  473. max = v;
  474. }
  475. }
  476. const float d = max / -16;
  477. const float id = d ? 1.0f/d : 0.0f;
  478. y[i].d = GGML_FP32_TO_FP16(d);
  479. uint32_t qh = 0;
  480. for (int j = 0; j < qk/2; ++j) {
  481. const float x0 = x[i*qk + 0 + j]*id;
  482. const float x1 = x[i*qk + qk/2 + j]*id;
  483. const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
  484. const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
  485. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  486. // get the 5-th bit and store it in qh at the right position
  487. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  488. qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
  489. }
  490. memcpy(&y[i].qh, &qh, sizeof(qh));
  491. }
  492. }
  493. void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) {
  494. quantize_row_q5_0_reference(x, y, k);
  495. }
  496. void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
  497. const int qk = QK5_1;
  498. assert(k % qk == 0);
  499. const int nb = k / qk;
  500. for (int i = 0; i < nb; i++) {
  501. float min = FLT_MAX;
  502. float max = -FLT_MAX;
  503. for (int j = 0; j < qk; j++) {
  504. const float v = x[i*qk + j];
  505. if (v < min) min = v;
  506. if (v > max) max = v;
  507. }
  508. const float d = (max - min) / ((1 << 5) - 1);
  509. const float id = d ? 1.0f/d : 0.0f;
  510. y[i].d = GGML_FP32_TO_FP16(d);
  511. y[i].m = GGML_FP32_TO_FP16(min);
  512. uint32_t qh = 0;
  513. for (int j = 0; j < qk/2; ++j) {
  514. const float x0 = (x[i*qk + 0 + j] - min)*id;
  515. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  516. const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
  517. const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
  518. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  519. // get the 5-th bit and store it in qh at the right position
  520. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  521. qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
  522. }
  523. memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
  524. }
  525. }
  526. void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) {
  527. quantize_row_q5_1_reference(x, y, k);
  528. }
  529. // reference implementation for deterministic creation of model files
  530. void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
  531. assert(k % QK8_0 == 0);
  532. const int nb = k / QK8_0;
  533. for (int i = 0; i < nb; i++) {
  534. float amax = 0.0f; // absolute max
  535. for (int j = 0; j < QK8_0; j++) {
  536. const float v = x[i*QK8_0 + j];
  537. amax = MAX(amax, fabsf(v));
  538. }
  539. const float d = amax / ((1 << 7) - 1);
  540. const float id = d ? 1.0f/d : 0.0f;
  541. y[i].d = GGML_FP32_TO_FP16(d);
  542. for (int j = 0; j < QK8_0; ++j) {
  543. const float x0 = x[i*QK8_0 + j]*id;
  544. y[i].qs[j] = roundf(x0);
  545. }
  546. }
  547. }
  548. void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
  549. assert(QK8_0 == 32);
  550. assert(k % QK8_0 == 0);
  551. const int nb = k / QK8_0;
  552. block_q8_0 * restrict y = vy;
  553. #if defined(__ARM_NEON)
  554. for (int i = 0; i < nb; i++) {
  555. float32x4_t srcv [8];
  556. float32x4_t asrcv[8];
  557. float32x4_t amaxv[8];
  558. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  559. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  560. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  561. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  562. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  563. const float amax = vmaxvq_f32(amaxv[0]);
  564. const float d = amax / ((1 << 7) - 1);
  565. const float id = d ? 1.0f/d : 0.0f;
  566. y[i].d = GGML_FP32_TO_FP16(d);
  567. for (int j = 0; j < 8; j++) {
  568. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  569. const int32x4_t vi = vcvtnq_s32_f32(v);
  570. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  571. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  572. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  573. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  574. }
  575. }
  576. #elif defined(__wasm_simd128__)
  577. for (int i = 0; i < nb; i++) {
  578. v128_t srcv [8];
  579. v128_t asrcv[8];
  580. v128_t amaxv[8];
  581. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  582. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  583. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  584. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  585. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  586. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  587. wasm_f32x4_extract_lane(amaxv[0], 1)),
  588. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  589. wasm_f32x4_extract_lane(amaxv[0], 3)));
  590. const float d = amax / ((1 << 7) - 1);
  591. const float id = d ? 1.0f/d : 0.0f;
  592. y[i].d = GGML_FP32_TO_FP16(d);
  593. for (int j = 0; j < 8; j++) {
  594. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  595. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  596. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  597. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  598. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  599. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  600. }
  601. }
  602. #elif defined(__AVX2__) || defined(__AVX__)
  603. for (int i = 0; i < nb; i++) {
  604. // Load elements into 4 AVX vectors
  605. __m256 v0 = _mm256_loadu_ps( x );
  606. __m256 v1 = _mm256_loadu_ps( x + 8 );
  607. __m256 v2 = _mm256_loadu_ps( x + 16 );
  608. __m256 v3 = _mm256_loadu_ps( x + 24 );
  609. x += 32;
  610. // Compute max(abs(e)) for the block
  611. const __m256 signBit = _mm256_set1_ps( -0.0f );
  612. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  613. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  614. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  615. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  616. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  617. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  618. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  619. const float maxScalar = _mm_cvtss_f32( max4 );
  620. // Quantize these floats
  621. const float d = maxScalar / 127.f;
  622. y[i].d = GGML_FP32_TO_FP16(d);
  623. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  624. const __m256 mul = _mm256_set1_ps( id );
  625. // Apply the multiplier
  626. v0 = _mm256_mul_ps( v0, mul );
  627. v1 = _mm256_mul_ps( v1, mul );
  628. v2 = _mm256_mul_ps( v2, mul );
  629. v3 = _mm256_mul_ps( v3, mul );
  630. // Round to nearest integer
  631. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  632. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  633. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  634. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  635. // Convert floats to integers
  636. __m256i i0 = _mm256_cvtps_epi32( v0 );
  637. __m256i i1 = _mm256_cvtps_epi32( v1 );
  638. __m256i i2 = _mm256_cvtps_epi32( v2 );
  639. __m256i i3 = _mm256_cvtps_epi32( v3 );
  640. #if defined(__AVX2__)
  641. // Convert int32 to int16
  642. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  643. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  644. // Convert int16 to int8
  645. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  646. // We got our precious signed bytes, but the order is now wrong
  647. // These AVX2 pack instructions process 16-byte pieces independently
  648. // The following instruction is fixing the order
  649. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  650. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  651. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  652. #else
  653. // Since we don't have in AVX some necessary functions,
  654. // we split the registers in half and call AVX2 analogs from SSE
  655. __m128i ni0 = _mm256_castsi256_si128( i0 );
  656. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  657. __m128i ni2 = _mm256_castsi256_si128( i1 );
  658. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  659. __m128i ni4 = _mm256_castsi256_si128( i2 );
  660. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  661. __m128i ni6 = _mm256_castsi256_si128( i3 );
  662. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  663. // Convert int32 to int16
  664. ni0 = _mm_packs_epi32( ni0, ni1 );
  665. ni2 = _mm_packs_epi32( ni2, ni3 );
  666. ni4 = _mm_packs_epi32( ni4, ni5 );
  667. ni6 = _mm_packs_epi32( ni6, ni7 );
  668. // Convert int16 to int8
  669. ni0 = _mm_packs_epi16( ni0, ni2 );
  670. ni4 = _mm_packs_epi16( ni4, ni6 );
  671. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  672. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  673. #endif
  674. }
  675. #elif defined(__riscv_v_intrinsic)
  676. size_t vl = __riscv_vsetvl_e32m4(QK8_0);
  677. for (int i = 0; i < nb; i++) {
  678. // load elements
  679. vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_0, vl);
  680. vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
  681. vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl);
  682. vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
  683. float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
  684. const float d = amax / ((1 << 7) - 1);
  685. const float id = d ? 1.0f/d : 0.0f;
  686. y[i].d = GGML_FP32_TO_FP16(d);
  687. vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
  688. // convert to integer
  689. vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
  690. vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
  691. // store result
  692. __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
  693. }
  694. #else
  695. GGML_UNUSED(nb);
  696. // scalar
  697. quantize_row_q8_0_reference(x, y, k);
  698. #endif
  699. }
  700. // reference implementation for deterministic creation of model files
  701. void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
  702. assert(QK8_1 == 32);
  703. assert(k % QK8_1 == 0);
  704. const int nb = k / QK8_1;
  705. for (int i = 0; i < nb; i++) {
  706. float amax = 0.0f; // absolute max
  707. for (int j = 0; j < QK8_1; j++) {
  708. const float v = x[i*QK8_1 + j];
  709. amax = MAX(amax, fabsf(v));
  710. }
  711. const float d = amax / ((1 << 7) - 1);
  712. const float id = d ? 1.0f/d : 0.0f;
  713. y[i].d = d;
  714. int sum = 0;
  715. for (int j = 0; j < QK8_1/2; ++j) {
  716. const float v0 = x[i*QK8_1 + j]*id;
  717. const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
  718. y[i].qs[ j] = roundf(v0);
  719. y[i].qs[QK8_1/2 + j] = roundf(v1);
  720. sum += y[i].qs[ j];
  721. sum += y[i].qs[QK8_1/2 + j];
  722. }
  723. y[i].s = sum*d;
  724. }
  725. }
  726. void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) {
  727. assert(k % QK8_1 == 0);
  728. const int nb = k / QK8_1;
  729. block_q8_1 * restrict y = vy;
  730. #if defined(__ARM_NEON)
  731. for (int i = 0; i < nb; i++) {
  732. float32x4_t srcv [8];
  733. float32x4_t asrcv[8];
  734. float32x4_t amaxv[8];
  735. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  736. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  737. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  738. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  739. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  740. const float amax = vmaxvq_f32(amaxv[0]);
  741. const float d = amax / ((1 << 7) - 1);
  742. const float id = d ? 1.0f/d : 0.0f;
  743. y[i].d = d;
  744. int32x4_t accv = vdupq_n_s32(0);
  745. for (int j = 0; j < 8; j++) {
  746. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  747. const int32x4_t vi = vcvtnq_s32_f32(v);
  748. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  749. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  750. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  751. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  752. accv = vaddq_s32(accv, vi);
  753. }
  754. y[i].s = d * vaddvq_s32(accv);
  755. }
  756. #elif defined(__wasm_simd128__)
  757. for (int i = 0; i < nb; i++) {
  758. v128_t srcv [8];
  759. v128_t asrcv[8];
  760. v128_t amaxv[8];
  761. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  762. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  763. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  764. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  765. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  766. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  767. wasm_f32x4_extract_lane(amaxv[0], 1)),
  768. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  769. wasm_f32x4_extract_lane(amaxv[0], 3)));
  770. const float d = amax / ((1 << 7) - 1);
  771. const float id = d ? 1.0f/d : 0.0f;
  772. y[i].d = d;
  773. v128_t accv = wasm_i32x4_splat(0);
  774. for (int j = 0; j < 8; j++) {
  775. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  776. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  777. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  778. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  779. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  780. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  781. accv = wasm_i32x4_add(accv, vi);
  782. }
  783. y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) +
  784. wasm_i32x4_extract_lane(accv, 1) +
  785. wasm_i32x4_extract_lane(accv, 2) +
  786. wasm_i32x4_extract_lane(accv, 3));
  787. }
  788. #elif defined(__AVX2__) || defined(__AVX__)
  789. for (int i = 0; i < nb; i++) {
  790. // Load elements into 4 AVX vectors
  791. __m256 v0 = _mm256_loadu_ps( x );
  792. __m256 v1 = _mm256_loadu_ps( x + 8 );
  793. __m256 v2 = _mm256_loadu_ps( x + 16 );
  794. __m256 v3 = _mm256_loadu_ps( x + 24 );
  795. x += 32;
  796. // Compute max(abs(e)) for the block
  797. const __m256 signBit = _mm256_set1_ps( -0.0f );
  798. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  799. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  800. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  801. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  802. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  803. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  804. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  805. const float maxScalar = _mm_cvtss_f32( max4 );
  806. // Quantize these floats
  807. const float d = maxScalar / 127.f;
  808. y[i].d = d;
  809. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  810. const __m256 mul = _mm256_set1_ps( id );
  811. // Apply the multiplier
  812. v0 = _mm256_mul_ps( v0, mul );
  813. v1 = _mm256_mul_ps( v1, mul );
  814. v2 = _mm256_mul_ps( v2, mul );
  815. v3 = _mm256_mul_ps( v3, mul );
  816. // Round to nearest integer
  817. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  818. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  819. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  820. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  821. // Convert floats to integers
  822. __m256i i0 = _mm256_cvtps_epi32( v0 );
  823. __m256i i1 = _mm256_cvtps_epi32( v1 );
  824. __m256i i2 = _mm256_cvtps_epi32( v2 );
  825. __m256i i3 = _mm256_cvtps_epi32( v3 );
  826. #if defined(__AVX2__)
  827. // Compute the sum of the quants and set y[i].s
  828. y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)));
  829. // Convert int32 to int16
  830. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  831. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  832. // Convert int16 to int8
  833. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  834. // We got our precious signed bytes, but the order is now wrong
  835. // These AVX2 pack instructions process 16-byte pieces independently
  836. // The following instruction is fixing the order
  837. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  838. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  839. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  840. #else
  841. // Since we don't have in AVX some necessary functions,
  842. // we split the registers in half and call AVX2 analogs from SSE
  843. __m128i ni0 = _mm256_castsi256_si128( i0 );
  844. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  845. __m128i ni2 = _mm256_castsi256_si128( i1 );
  846. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  847. __m128i ni4 = _mm256_castsi256_si128( i2 );
  848. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  849. __m128i ni6 = _mm256_castsi256_si128( i3 );
  850. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  851. // Compute the sum of the quants and set y[i].s
  852. const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
  853. const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
  854. y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1));
  855. // Convert int32 to int16
  856. ni0 = _mm_packs_epi32( ni0, ni1 );
  857. ni2 = _mm_packs_epi32( ni2, ni3 );
  858. ni4 = _mm_packs_epi32( ni4, ni5 );
  859. ni6 = _mm_packs_epi32( ni6, ni7 );
  860. // Convert int16 to int8
  861. ni0 = _mm_packs_epi16( ni0, ni2 );
  862. ni4 = _mm_packs_epi16( ni4, ni6 );
  863. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  864. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  865. #endif
  866. }
  867. #elif defined(__riscv_v_intrinsic)
  868. size_t vl = __riscv_vsetvl_e32m4(QK8_1);
  869. for (int i = 0; i < nb; i++) {
  870. // load elements
  871. vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_1, vl);
  872. vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
  873. vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl);
  874. vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
  875. float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
  876. const float d = amax / ((1 << 7) - 1);
  877. const float id = d ? 1.0f/d : 0.0f;
  878. y[i].d = d;
  879. vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
  880. // convert to integer
  881. vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
  882. vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
  883. // store result
  884. __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
  885. // compute sum for y[i].s
  886. vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl);
  887. vint16m1_t vwrs = __riscv_vwredsum_vs_i8m1_i16m1(vs, tmp2, vl);
  888. // set y[i].s
  889. int sum = __riscv_vmv_x_s_i16m1_i16(vwrs);
  890. y[i].s = sum*d;
  891. }
  892. #else
  893. GGML_UNUSED(nb);
  894. // scalar
  895. quantize_row_q8_1_reference(x, y, k);
  896. #endif
  897. }
  898. void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) {
  899. static const int qk = QK4_0;
  900. assert(k % qk == 0);
  901. const int nb = k / qk;
  902. for (int i = 0; i < nb; i++) {
  903. const float d = GGML_FP16_TO_FP32(x[i].d);
  904. for (int j = 0; j < qk/2; ++j) {
  905. const int x0 = (x[i].qs[j] & 0x0F) - 8;
  906. const int x1 = (x[i].qs[j] >> 4) - 8;
  907. y[i*qk + j + 0 ] = x0*d;
  908. y[i*qk + j + qk/2] = x1*d;
  909. }
  910. }
  911. }
  912. void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
  913. static const int qk = QK4_1;
  914. assert(k % qk == 0);
  915. const int nb = k / qk;
  916. for (int i = 0; i < nb; i++) {
  917. const float d = GGML_FP16_TO_FP32(x[i].d);
  918. const float m = GGML_FP16_TO_FP32(x[i].m);
  919. for (int j = 0; j < qk/2; ++j) {
  920. const int x0 = (x[i].qs[j] & 0x0F);
  921. const int x1 = (x[i].qs[j] >> 4);
  922. y[i*qk + j + 0 ] = x0*d + m;
  923. y[i*qk + j + qk/2] = x1*d + m;
  924. }
  925. }
  926. }
  927. void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) {
  928. static const int qk = QK5_0;
  929. assert(k % qk == 0);
  930. const int nb = k / qk;
  931. for (int i = 0; i < nb; i++) {
  932. const float d = GGML_FP16_TO_FP32(x[i].d);
  933. uint32_t qh;
  934. memcpy(&qh, x[i].qh, sizeof(qh));
  935. for (int j = 0; j < qk/2; ++j) {
  936. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  937. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  938. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  939. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  940. y[i*qk + j + 0 ] = x0*d;
  941. y[i*qk + j + qk/2] = x1*d;
  942. }
  943. }
  944. }
  945. void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) {
  946. static const int qk = QK5_1;
  947. assert(k % qk == 0);
  948. const int nb = k / qk;
  949. for (int i = 0; i < nb; i++) {
  950. const float d = GGML_FP16_TO_FP32(x[i].d);
  951. const float m = GGML_FP16_TO_FP32(x[i].m);
  952. uint32_t qh;
  953. memcpy(&qh, x[i].qh, sizeof(qh));
  954. for (int j = 0; j < qk/2; ++j) {
  955. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  956. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  957. const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
  958. const int x1 = (x[i].qs[j] >> 4) | xh_1;
  959. y[i*qk + j + 0 ] = x0*d + m;
  960. y[i*qk + j + qk/2] = x1*d + m;
  961. }
  962. }
  963. }
  964. void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int k) {
  965. static const int qk = QK8_0;
  966. assert(k % qk == 0);
  967. const int nb = k / qk;
  968. for (int i = 0; i < nb; i++) {
  969. const float d = GGML_FP16_TO_FP32(x[i].d);
  970. for (int j = 0; j < qk; ++j) {
  971. y[i*qk + j] = x[i].qs[j]*d;
  972. }
  973. }
  974. }
  975. //
  976. // 2-6 bit quantization in super-blocks
  977. //
  978. //
  979. // ===================== Helper functions
  980. //
  981. static inline int nearest_int(float fval) {
  982. assert(fval <= 4194303.f);
  983. float val = fval + 12582912.f;
  984. int i; memcpy(&i, &val, sizeof(int));
  985. return (i & 0x007fffff) - 0x00400000;
  986. }
  987. static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, int rmse_type,
  988. const float * restrict qw) {
  989. float max = 0;
  990. float amax = 0;
  991. for (int i = 0; i < n; ++i) {
  992. float ax = fabsf(x[i]);
  993. if (ax > amax) { amax = ax; max = x[i]; }
  994. }
  995. if (amax < 1e-30f) { // all zero
  996. for (int i = 0; i < n; ++i) {
  997. L[i] = 0;
  998. }
  999. return 0.f;
  1000. }
  1001. float iscale = -nmax / max;
  1002. if (rmse_type == 0) {
  1003. for (int i = 0; i < n; ++i) {
  1004. int l = nearest_int(iscale * x[i]);
  1005. L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
  1006. }
  1007. return 1/iscale;
  1008. }
  1009. bool return_early = false;
  1010. if (rmse_type < 0) {
  1011. rmse_type = -rmse_type;
  1012. return_early = true;
  1013. }
  1014. float sumlx = 0;
  1015. float suml2 = 0;
  1016. #ifdef HAVE_BUGGY_APPLE_LINKER
  1017. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  1018. for (volatile int i = 0; i < n; ++i) {
  1019. #else
  1020. for (int i = 0; i < n; ++i) {
  1021. #endif
  1022. int l = nearest_int(iscale * x[i]);
  1023. l = MAX(-nmax, MIN(nmax-1, l));
  1024. L[i] = l + nmax;
  1025. float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i]));
  1026. sumlx += w*x[i]*l;
  1027. suml2 += w*l*l;
  1028. }
  1029. float scale = sumlx/suml2;
  1030. if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale;
  1031. float best = scale * sumlx;
  1032. for (int is = -9; is <= 9; ++is) {
  1033. if (is == 0) {
  1034. continue;
  1035. }
  1036. iscale = -(nmax + 0.1f*is) / max;
  1037. sumlx = suml2 = 0;
  1038. for (int i = 0; i < n; ++i) {
  1039. int l = nearest_int(iscale * x[i]);
  1040. l = MAX(-nmax, MIN(nmax-1, l));
  1041. float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i]));
  1042. sumlx += w*x[i]*l;
  1043. suml2 += w*l*l;
  1044. }
  1045. if (suml2 > 0 && sumlx*sumlx > best*suml2) {
  1046. for (int i = 0; i < n; ++i) {
  1047. int l = nearest_int(iscale * x[i]);
  1048. L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
  1049. }
  1050. scale = sumlx/suml2; best = scale*sumlx;
  1051. }
  1052. }
  1053. return scale;
  1054. }
  1055. static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, bool do_rmse) {
  1056. float max = 0;
  1057. float amax = 0;
  1058. for (int i = 0; i < n; ++i) {
  1059. float ax = fabsf(x[i]);
  1060. if (ax > amax) { amax = ax; max = x[i]; }
  1061. }
  1062. if (!amax) { // all zero
  1063. for (int i = 0; i < n; ++i) { L[i] = 0; }
  1064. return 0.f;
  1065. }
  1066. float iscale = -nmax / max;
  1067. if (do_rmse) {
  1068. float sumlx = 0;
  1069. float suml2 = 0;
  1070. for (int i = 0; i < n; ++i) {
  1071. int l = nearest_int(iscale * x[i]);
  1072. l = MAX(-nmax, MIN(nmax-1, l));
  1073. L[i] = l;
  1074. float w = x[i]*x[i];
  1075. sumlx += w*x[i]*l;
  1076. suml2 += w*l*l;
  1077. }
  1078. for (int itry = 0; itry < 5; ++itry) {
  1079. int n_changed = 0;
  1080. for (int i = 0; i < n; ++i) {
  1081. float w = x[i]*x[i];
  1082. float slx = sumlx - w*x[i]*L[i];
  1083. if (slx > 0) {
  1084. float sl2 = suml2 - w*L[i]*L[i];
  1085. int new_l = nearest_int(x[i] * sl2 / slx);
  1086. new_l = MAX(-nmax, MIN(nmax-1, new_l));
  1087. if (new_l != L[i]) {
  1088. slx += w*x[i]*new_l;
  1089. sl2 += w*new_l*new_l;
  1090. if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) {
  1091. L[i] = new_l; sumlx = slx; suml2 = sl2;
  1092. ++n_changed;
  1093. }
  1094. }
  1095. }
  1096. }
  1097. if (!n_changed) {
  1098. break;
  1099. }
  1100. }
  1101. for (int i = 0; i < n; ++i) {
  1102. L[i] += nmax;
  1103. }
  1104. return sumlx / suml2;
  1105. }
  1106. for (int i = 0; i < n; ++i) {
  1107. int l = nearest_int(iscale * x[i]);
  1108. l = MAX(-nmax, MIN(nmax-1, l));
  1109. L[i] = l + nmax;
  1110. }
  1111. return 1/iscale;
  1112. }
  1113. static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min,
  1114. int ntry, float alpha) {
  1115. float min = x[0];
  1116. float max = x[0];
  1117. for (int i = 1; i < n; ++i) {
  1118. if (x[i] < min) min = x[i];
  1119. if (x[i] > max) max = x[i];
  1120. }
  1121. if (max == min) {
  1122. for (int i = 0; i < n; ++i) L[i] = 0;
  1123. *the_min = 0;
  1124. return 0.f;
  1125. }
  1126. if (min > 0) min = 0;
  1127. float iscale = nmax/(max - min);
  1128. float scale = 1/iscale;
  1129. for (int itry = 0; itry < ntry; ++itry) {
  1130. float sumlx = 0; int suml2 = 0;
  1131. bool did_change = false;
  1132. for (int i = 0; i < n; ++i) {
  1133. int l = nearest_int(iscale*(x[i] - min));
  1134. l = MAX(0, MIN(nmax, l));
  1135. if (l != L[i]) {
  1136. L[i] = l;
  1137. did_change = true;
  1138. }
  1139. sumlx += (x[i] - min)*l;
  1140. suml2 += l*l;
  1141. }
  1142. scale = sumlx/suml2;
  1143. float sum = 0;
  1144. for (int i = 0; i < n; ++i) {
  1145. sum += x[i] - scale*L[i];
  1146. }
  1147. min = alpha*min + (1 - alpha)*sum/n;
  1148. if (min > 0) min = 0;
  1149. iscale = 1/scale;
  1150. if (!did_change) break;
  1151. }
  1152. *the_min = -min;
  1153. return scale;
  1154. }
  1155. static float make_qkx2_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
  1156. uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
  1157. float rmin, float rdelta, int nstep, bool use_mad) {
  1158. float min = x[0];
  1159. float max = x[0];
  1160. float sum_w = weights[0];
  1161. float sum_x = sum_w * x[0];
  1162. #ifdef HAVE_BUGGY_APPLE_LINKER
  1163. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  1164. for (volatile int i = 1; i < n; ++i) {
  1165. #else
  1166. for (int i = 1; i < n; ++i) {
  1167. #endif
  1168. if (x[i] < min) min = x[i];
  1169. if (x[i] > max) max = x[i];
  1170. float w = weights[i];
  1171. sum_w += w;
  1172. sum_x += w * x[i];
  1173. }
  1174. if (min > 0) min = 0;
  1175. if (max == min) {
  1176. for (int i = 0; i < n; ++i) L[i] = 0;
  1177. *the_min = -min;
  1178. return 0.f;
  1179. }
  1180. float iscale = nmax/(max - min);
  1181. float scale = 1/iscale;
  1182. float best_mad = 0;
  1183. for (int i = 0; i < n; ++i) {
  1184. int l = nearest_int(iscale*(x[i] - min));
  1185. L[i] = MAX(0, MIN(nmax, l));
  1186. float diff = scale * L[i] + min - x[i];
  1187. diff = use_mad ? fabsf(diff) : diff * diff;
  1188. float w = weights[i];
  1189. best_mad += w * diff;
  1190. }
  1191. if (nstep < 1) {
  1192. *the_min = -min;
  1193. return scale;
  1194. }
  1195. for (int is = 0; is <= nstep; ++is) {
  1196. iscale = (rmin + rdelta*is + nmax)/(max - min);
  1197. float sum_l = 0, sum_l2 = 0, sum_xl = 0;
  1198. for (int i = 0; i < n; ++i) {
  1199. int l = nearest_int(iscale*(x[i] - min));
  1200. l = MAX(0, MIN(nmax, l));
  1201. Laux[i] = l;
  1202. float w = weights[i];
  1203. sum_l += w*l;
  1204. sum_l2 += w*l*l;
  1205. sum_xl += w*l*x[i];
  1206. }
  1207. float D = sum_w * sum_l2 - sum_l * sum_l;
  1208. if (D > 0) {
  1209. float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
  1210. float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
  1211. if (this_min > 0) {
  1212. this_min = 0;
  1213. this_scale = sum_xl / sum_l2;
  1214. }
  1215. float mad = 0;
  1216. for (int i = 0; i < n; ++i) {
  1217. float diff = this_scale * Laux[i] + this_min - x[i];
  1218. diff = use_mad ? fabsf(diff) : diff * diff;
  1219. float w = weights[i];
  1220. mad += w * diff;
  1221. }
  1222. if (mad < best_mad) {
  1223. for (int i = 0; i < n; ++i) {
  1224. L[i] = Laux[i];
  1225. }
  1226. best_mad = mad;
  1227. scale = this_scale;
  1228. min = this_min;
  1229. }
  1230. }
  1231. }
  1232. *the_min = -min;
  1233. return scale;
  1234. }
  1235. #if QK_K == 256
  1236. static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) {
  1237. if (j < 4) {
  1238. *d = q[j] & 63; *m = q[j + 4] & 63;
  1239. } else {
  1240. *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
  1241. *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
  1242. }
  1243. }
  1244. #endif
  1245. //========================- 2-bit (de)-quantization
  1246. void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k) {
  1247. assert(k % QK_K == 0);
  1248. const int nb = k / QK_K;
  1249. uint8_t L[QK_K];
  1250. uint8_t Laux[16];
  1251. float weights[16];
  1252. float mins[QK_K/16];
  1253. float scales[QK_K/16];
  1254. const float q4scale = 15.f;
  1255. for (int i = 0; i < nb; i++) {
  1256. float max_scale = 0; // as we are deducting the min, scales are always positive
  1257. float max_min = 0;
  1258. for (int j = 0; j < QK_K/16; ++j) {
  1259. for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]);
  1260. scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true);
  1261. float scale = scales[j];
  1262. if (scale > max_scale) {
  1263. max_scale = scale;
  1264. }
  1265. float min = mins[j];
  1266. if (min > max_min) {
  1267. max_min = min;
  1268. }
  1269. }
  1270. if (max_scale > 0) {
  1271. float iscale = q4scale/max_scale;
  1272. for (int j = 0; j < QK_K/16; ++j) {
  1273. int l = nearest_int(iscale*scales[j]);
  1274. y[i].scales[j] = l;
  1275. }
  1276. y[i].d = GGML_FP32_TO_FP16(max_scale/q4scale);
  1277. } else {
  1278. for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0;
  1279. y[i].d = GGML_FP32_TO_FP16(0.f);
  1280. }
  1281. if (max_min > 0) {
  1282. float iscale = q4scale/max_min;
  1283. for (int j = 0; j < QK_K/16; ++j) {
  1284. int l = nearest_int(iscale*mins[j]);
  1285. y[i].scales[j] |= (l << 4);
  1286. }
  1287. y[i].dmin = GGML_FP32_TO_FP16(max_min/q4scale);
  1288. } else {
  1289. y[i].dmin = GGML_FP32_TO_FP16(0.f);
  1290. }
  1291. for (int j = 0; j < QK_K/16; ++j) {
  1292. const float d = GGML_FP16_TO_FP32(y[i].d) * (y[i].scales[j] & 0xF);
  1293. if (!d) continue;
  1294. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * (y[i].scales[j] >> 4);
  1295. for (int ii = 0; ii < 16; ++ii) {
  1296. int l = nearest_int((x[16*j + ii] + dm)/d);
  1297. l = MAX(0, MIN(3, l));
  1298. L[16*j + ii] = l;
  1299. }
  1300. }
  1301. #if QK_K == 256
  1302. for (int j = 0; j < QK_K; j += 128) {
  1303. for (int l = 0; l < 32; ++l) {
  1304. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1305. }
  1306. }
  1307. #else
  1308. for (int l = 0; l < 16; ++l) {
  1309. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1310. }
  1311. #endif
  1312. x += QK_K;
  1313. }
  1314. }
  1315. void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k) {
  1316. assert(k % QK_K == 0);
  1317. const int nb = k / QK_K;
  1318. for (int i = 0; i < nb; i++) {
  1319. const float d = GGML_FP16_TO_FP32(x[i].d);
  1320. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  1321. const uint8_t * q = x[i].qs;
  1322. #if QK_K == 256
  1323. int is = 0;
  1324. float dl, ml;
  1325. for (int n = 0; n < QK_K; n += 128) {
  1326. int shift = 0;
  1327. for (int j = 0; j < 4; ++j) {
  1328. uint8_t sc = x[i].scales[is++];
  1329. dl = d * (sc & 0xF); ml = min * (sc >> 4);
  1330. for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml;
  1331. sc = x[i].scales[is++];
  1332. dl = d * (sc & 0xF); ml = min * (sc >> 4);
  1333. for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml;
  1334. shift += 2;
  1335. }
  1336. q += 32;
  1337. }
  1338. #else
  1339. float dl1 = d * (x[i].scales[0] & 0xF), ml1 = min * (x[i].scales[0] >> 4);
  1340. float dl2 = d * (x[i].scales[1] & 0xF), ml2 = min * (x[i].scales[1] >> 4);
  1341. float dl3 = d * (x[i].scales[2] & 0xF), ml3 = min * (x[i].scales[2] >> 4);
  1342. float dl4 = d * (x[i].scales[3] & 0xF), ml4 = min * (x[i].scales[3] >> 4);
  1343. for (int l = 0; l < 16; ++l) {
  1344. y[l+ 0] = dl1 * ((int8_t)((q[l] >> 0) & 3)) - ml1;
  1345. y[l+16] = dl2 * ((int8_t)((q[l] >> 2) & 3)) - ml2;
  1346. y[l+32] = dl3 * ((int8_t)((q[l] >> 4) & 3)) - ml3;
  1347. y[l+48] = dl4 * ((int8_t)((q[l] >> 6) & 3)) - ml4;
  1348. }
  1349. y += QK_K;
  1350. #endif
  1351. }
  1352. }
  1353. void quantize_row_q2_K(const float * restrict x, void * restrict vy, int k) {
  1354. quantize_row_q2_K_reference(x, vy, k);
  1355. }
  1356. size_t ggml_quantize_q2_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
  1357. (void)hist; // TODO: collect histograms
  1358. for (int j = 0; j < n; j += k) {
  1359. block_q2_K * restrict y = (block_q2_K *)dst + j/QK_K;
  1360. quantize_row_q2_K_reference(src + j, y, k);
  1361. }
  1362. return (n/QK_K*sizeof(block_q2_K));
  1363. }
  1364. static float make_qkx3_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
  1365. uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
  1366. float rmin, float rdelta, int nstep, bool use_mad) {
  1367. float min = x[0];
  1368. float max = x[0];
  1369. float sum_w = weights ? weights[0] : x[0]*x[0];
  1370. float sum_x = sum_w * x[0];
  1371. #ifdef HAVE_BUGGY_APPLE_LINKER
  1372. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  1373. for (volatile int i = 1; i < n; ++i) {
  1374. #else
  1375. for (int i = 1; i < n; ++i) {
  1376. #endif
  1377. if (x[i] < min) min = x[i];
  1378. if (x[i] > max) max = x[i];
  1379. float w = weights ? weights[i] : x[i]*x[i];
  1380. sum_w += w;
  1381. sum_x += w * x[i];
  1382. }
  1383. if (min > 0) {
  1384. min = 0;
  1385. }
  1386. if (max <= min) {
  1387. memset(L, 0, n);
  1388. *the_min = -min;
  1389. return 0.f;
  1390. }
  1391. float iscale = nmax/(max - min);
  1392. float scale = 1/iscale;
  1393. float best_mad = 0;
  1394. for (int i = 0; i < n; ++i) {
  1395. int l = nearest_int(iscale*(x[i] - min));
  1396. L[i] = MAX(0, MIN(nmax, l));
  1397. float diff = scale * L[i] + min - x[i];
  1398. diff = use_mad ? fabsf(diff) : diff*diff;
  1399. float w = weights ? weights[i] : x[i]*x[i];
  1400. best_mad += w * diff;
  1401. }
  1402. if (nstep < 1) {
  1403. *the_min = -min;
  1404. return scale;
  1405. }
  1406. for (int is = 0; is <= nstep; ++is) {
  1407. iscale = (rmin + rdelta*is + nmax)/(max - min);
  1408. float sum_l = 0, sum_l2 = 0, sum_xl = 0;
  1409. for (int i = 0; i < n; ++i) {
  1410. int l = nearest_int(iscale*(x[i] - min));
  1411. l = MAX(0, MIN(nmax, l));
  1412. Laux[i] = l;
  1413. float w = weights ? weights[i] : x[i]*x[i];
  1414. sum_l += w*l;
  1415. sum_l2 += w*l*l;
  1416. sum_xl += w*l*x[i];
  1417. }
  1418. float D = sum_w * sum_l2 - sum_l * sum_l;
  1419. if (D > 0) {
  1420. float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
  1421. float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
  1422. if (this_min > 0) {
  1423. this_min = 0;
  1424. this_scale = sum_xl / sum_l2;
  1425. }
  1426. float mad = 0;
  1427. for (int i = 0; i < n; ++i) {
  1428. float diff = this_scale * Laux[i] + this_min - x[i];
  1429. diff = use_mad ? fabsf(diff) : diff*diff;
  1430. float w = weights ? weights[i] : x[i]*x[i];
  1431. mad += w * diff;
  1432. }
  1433. if (mad < best_mad) {
  1434. for (int i = 0; i < n; ++i) {
  1435. L[i] = Laux[i];
  1436. }
  1437. best_mad = mad;
  1438. scale = this_scale;
  1439. min = this_min;
  1440. }
  1441. }
  1442. }
  1443. *the_min = -min;
  1444. return scale;
  1445. }
  1446. static float make_qp_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, const float * quant_weights) {
  1447. float max = 0;
  1448. for (int i = 0; i < n; ++i) {
  1449. max = MAX(max, x[i]);
  1450. }
  1451. if (!max) { // all zero
  1452. for (int i = 0; i < n; ++i) { L[i] = 0; }
  1453. return 0.f;
  1454. }
  1455. float iscale = nmax / max;
  1456. for (int i = 0; i < n; ++i) {
  1457. L[i] = nearest_int(iscale * x[i]);
  1458. }
  1459. float scale = 1/iscale;
  1460. float best_mse = 0;
  1461. for (int i = 0; i < n; ++i) {
  1462. float diff = x[i] - scale*L[i];
  1463. float w = quant_weights[i];
  1464. best_mse += w*diff*diff;
  1465. }
  1466. for (int is = -4; is <= 4; ++is) {
  1467. if (is == 0) continue;
  1468. float iscale_is = (0.1f*is + nmax)/max;
  1469. float scale_is = 1/iscale_is;
  1470. float mse = 0;
  1471. for (int i = 0; i < n; ++i) {
  1472. int l = nearest_int(iscale_is*x[i]);
  1473. l = MIN(nmax, l);
  1474. float diff = x[i] - scale_is*l;
  1475. float w = quant_weights[i];
  1476. mse += w*diff*diff;
  1477. }
  1478. if (mse < best_mse) {
  1479. best_mse = mse;
  1480. iscale = iscale_is;
  1481. }
  1482. }
  1483. float sumlx = 0;
  1484. float suml2 = 0;
  1485. for (int i = 0; i < n; ++i) {
  1486. int l = nearest_int(iscale * x[i]);
  1487. l = MIN(nmax, l);
  1488. L[i] = l;
  1489. float w = quant_weights[i];
  1490. sumlx += w*x[i]*l;
  1491. suml2 += w*l*l;
  1492. }
  1493. for (int itry = 0; itry < 5; ++itry) {
  1494. int n_changed = 0;
  1495. for (int i = 0; i < n; ++i) {
  1496. float w = quant_weights[i];
  1497. float slx = sumlx - w*x[i]*L[i];
  1498. float sl2 = suml2 - w*L[i]*L[i];
  1499. if (slx > 0 && sl2 > 0) {
  1500. int new_l = nearest_int(x[i] * sl2 / slx);
  1501. new_l = MIN(nmax, new_l);
  1502. if (new_l != L[i]) {
  1503. slx += w*x[i]*new_l;
  1504. sl2 += w*new_l*new_l;
  1505. if (slx*slx*suml2 > sumlx*sumlx*sl2) {
  1506. L[i] = new_l; sumlx = slx; suml2 = sl2;
  1507. ++n_changed;
  1508. }
  1509. }
  1510. }
  1511. }
  1512. if (!n_changed) {
  1513. break;
  1514. }
  1515. }
  1516. return sumlx / suml2;
  1517. }
  1518. static void quantize_row_q2_K_impl(const float * restrict x, block_q2_K * restrict y, int k, const float * restrict quant_weights) {
  1519. GGML_ASSERT(quant_weights);
  1520. assert(k % QK_K == 0);
  1521. const int nb = k / QK_K;
  1522. const bool requantize = true;
  1523. uint8_t L[QK_K];
  1524. uint8_t Laux[16];
  1525. float mins[QK_K/16];
  1526. float scales[QK_K/16];
  1527. float sw[QK_K/16];
  1528. float weight[QK_K/16];
  1529. uint8_t Ls[QK_K/16], Lm[QK_K/16];
  1530. for (int i = 0; i < nb; i++) {
  1531. memset(sw, 0, QK_K/16*sizeof(float));
  1532. float sumx2 = 0;
  1533. for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j];
  1534. float sigma2 = sumx2/QK_K;
  1535. for (int j = 0; j < QK_K/16; ++j) {
  1536. const float * restrict qw = quant_weights + QK_K * i + 16*j;
  1537. for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j + l]*x[16*j + l]);
  1538. for (int l = 0; l < 16; ++l) sw[j] += weight[l];
  1539. scales[j] = make_qkx3_quants(16, 3, x + 16*j, weight, L + 16*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  1540. }
  1541. float dm = make_qp_quants(QK_K/16, 15, scales, Ls, sw);
  1542. float mm = make_qp_quants(QK_K/16, 15, mins, Lm, sw);
  1543. y[i].d = GGML_FP32_TO_FP16(dm);
  1544. y[i].dmin = GGML_FP32_TO_FP16(mm);
  1545. dm = GGML_FP16_TO_FP32(y[i].d);
  1546. mm = GGML_FP16_TO_FP32(y[i].dmin);
  1547. for (int j = 0; j < QK_K/16; ++j) {
  1548. y[i].scales[j] = Ls[j] | (Lm[j] << 4);
  1549. }
  1550. if (requantize) {
  1551. for (int j = 0; j < QK_K/16; ++j) {
  1552. const float d = dm * (y[i].scales[j] & 0xF);
  1553. if (!d) continue;
  1554. const float m = mm * (y[i].scales[j] >> 4);
  1555. for (int ii = 0; ii < 16; ++ii) {
  1556. int l = nearest_int((x[16*j + ii] + m)/d);
  1557. l = MAX(0, MIN(3, l));
  1558. L[16*j + ii] = l;
  1559. }
  1560. }
  1561. }
  1562. #if QK_K == 256
  1563. for (int j = 0; j < QK_K; j += 128) {
  1564. for (int l = 0; l < 32; ++l) {
  1565. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1566. }
  1567. }
  1568. #else
  1569. for (int l = 0; l < 16; ++l) {
  1570. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1571. }
  1572. #endif
  1573. x += QK_K;
  1574. }
  1575. }
  1576. size_t quantize_q2_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  1577. (void)hist;
  1578. size_t row_size = ggml_row_size(GGML_TYPE_Q2_K, n_per_row);
  1579. if (!quant_weights) {
  1580. quantize_row_q2_K_reference(src, dst, nrow*n_per_row);
  1581. }
  1582. else {
  1583. char * qrow = (char *)dst;
  1584. for (int row = 0; row < nrow; ++row) {
  1585. quantize_row_q2_K_impl(src, (block_q2_K*)qrow, n_per_row, quant_weights);
  1586. src += n_per_row;
  1587. qrow += row_size;
  1588. }
  1589. }
  1590. return nrow * row_size;
  1591. }
  1592. //========================= 3-bit (de)-quantization
  1593. void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k) {
  1594. assert(k % QK_K == 0);
  1595. const int nb = k / QK_K;
  1596. int8_t L[QK_K];
  1597. float scales[QK_K / 16];
  1598. for (int i = 0; i < nb; i++) {
  1599. float max_scale = 0;
  1600. float amax = 0;
  1601. for (int j = 0; j < QK_K/16; ++j) {
  1602. scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true);
  1603. float scale = fabsf(scales[j]);
  1604. if (scale > amax) {
  1605. amax = scale; max_scale = scales[j];
  1606. }
  1607. }
  1608. #if QK_K == 256
  1609. memset(y[i].scales, 0, 12);
  1610. if (max_scale) {
  1611. float iscale = -32.f/max_scale;
  1612. for (int j = 0; j < QK_K/16; ++j) {
  1613. int8_t l = nearest_int(iscale*scales[j]);
  1614. l = MAX(-32, MIN(31, l)) + 32;
  1615. if (j < 8) {
  1616. y[i].scales[j] = l & 0xF;
  1617. } else {
  1618. y[i].scales[j-8] |= ((l & 0xF) << 4);
  1619. }
  1620. l >>= 4;
  1621. y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
  1622. }
  1623. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  1624. } else {
  1625. y[i].d = GGML_FP32_TO_FP16(0.f);
  1626. }
  1627. int8_t sc;
  1628. for (int j = 0; j < QK_K/16; ++j) {
  1629. sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
  1630. sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
  1631. float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1632. if (!d) {
  1633. continue;
  1634. }
  1635. for (int ii = 0; ii < 16; ++ii) {
  1636. int l = nearest_int(x[16*j + ii]/d);
  1637. l = MAX(-4, MIN(3, l));
  1638. L[16*j + ii] = l + 4;
  1639. }
  1640. }
  1641. #else
  1642. if (max_scale) {
  1643. float iscale = -8.f/max_scale;
  1644. for (int j = 0; j < QK_K/16; j+=2) {
  1645. int l1 = nearest_int(iscale*scales[j]);
  1646. l1 = 8 + MAX(-8, MIN(7, l1));
  1647. int l2 = nearest_int(iscale*scales[j+1]);
  1648. l2 = 8 + MAX(-8, MIN(7, l2));
  1649. y[i].scales[j/2] = l1 | (l2 << 4);
  1650. }
  1651. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  1652. } else {
  1653. for (int j = 0; j < QK_K/16; j+=2) {
  1654. y[i].scales[j/2] = 0;
  1655. }
  1656. y[i].d = GGML_FP32_TO_FP16(0.f);
  1657. }
  1658. for (int j = 0; j < QK_K/16; ++j) {
  1659. int s = j%2 == 0 ? y[i].scales[j/2] & 0xF : y[i].scales[j/2] >> 4;
  1660. float d = GGML_FP16_TO_FP32(y[i].d) * (s - 8);
  1661. if (!d) {
  1662. continue;
  1663. }
  1664. for (int ii = 0; ii < 16; ++ii) {
  1665. int l = nearest_int(x[16*j + ii]/d);
  1666. l = MAX(-4, MIN(3, l));
  1667. L[16*j + ii] = l + 4;
  1668. }
  1669. }
  1670. #endif
  1671. memset(y[i].hmask, 0, QK_K/8);
  1672. // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
  1673. int m = 0;
  1674. uint8_t hm = 1;
  1675. for (int j = 0; j < QK_K; ++j) {
  1676. if (L[j] > 3) {
  1677. y[i].hmask[m] |= hm;
  1678. L[j] -= 4;
  1679. }
  1680. if (++m == QK_K/8) {
  1681. m = 0; hm <<= 1;
  1682. }
  1683. }
  1684. #if QK_K == 256
  1685. for (int j = 0; j < QK_K; j += 128) {
  1686. for (int l = 0; l < 32; ++l) {
  1687. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1688. }
  1689. }
  1690. #else
  1691. for (int l = 0; l < 16; ++l) {
  1692. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1693. }
  1694. #endif
  1695. x += QK_K;
  1696. }
  1697. }
  1698. #if QK_K == 256
  1699. void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
  1700. assert(k % QK_K == 0);
  1701. const int nb = k / QK_K;
  1702. const uint32_t kmask1 = 0x03030303;
  1703. const uint32_t kmask2 = 0x0f0f0f0f;
  1704. uint32_t aux[4];
  1705. const int8_t * scales = (const int8_t*)aux;
  1706. for (int i = 0; i < nb; i++) {
  1707. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  1708. const uint8_t * restrict q = x[i].qs;
  1709. const uint8_t * restrict hm = x[i].hmask;
  1710. uint8_t m = 1;
  1711. memcpy(aux, x[i].scales, 12);
  1712. uint32_t tmp = aux[2];
  1713. aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
  1714. aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
  1715. aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
  1716. aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
  1717. int is = 0;
  1718. float dl;
  1719. for (int n = 0; n < QK_K; n += 128) {
  1720. int shift = 0;
  1721. for (int j = 0; j < 4; ++j) {
  1722. dl = d_all * (scales[is++] - 32);
  1723. for (int l = 0; l < 16; ++l) {
  1724. *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4));
  1725. }
  1726. dl = d_all * (scales[is++] - 32);
  1727. for (int l = 0; l < 16; ++l) {
  1728. *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4));
  1729. }
  1730. shift += 2;
  1731. m <<= 1;
  1732. }
  1733. q += 32;
  1734. }
  1735. }
  1736. }
  1737. #else
  1738. void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
  1739. assert(k % QK_K == 0);
  1740. assert(QK_K == 64);
  1741. const int nb = k / QK_K;
  1742. for (int i = 0; i < nb; i++) {
  1743. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  1744. const uint8_t * restrict q = x[i].qs;
  1745. const uint8_t * restrict hm = x[i].hmask;
  1746. const float d1 = d_all * ((x[i].scales[0] & 0xF) - 8);
  1747. const float d2 = d_all * ((x[i].scales[0] >> 4) - 8);
  1748. const float d3 = d_all * ((x[i].scales[1] & 0xF) - 8);
  1749. const float d4 = d_all * ((x[i].scales[1] >> 4) - 8);
  1750. for (int l=0; l<8; ++l) {
  1751. uint8_t h = hm[l];
  1752. y[l+ 0] = d1 * ((int8_t)((q[l+0] >> 0) & 3) - ((h & 0x01) ? 0 : 4));
  1753. y[l+ 8] = d1 * ((int8_t)((q[l+8] >> 0) & 3) - ((h & 0x02) ? 0 : 4));
  1754. y[l+16] = d2 * ((int8_t)((q[l+0] >> 2) & 3) - ((h & 0x04) ? 0 : 4));
  1755. y[l+24] = d2 * ((int8_t)((q[l+8] >> 2) & 3) - ((h & 0x08) ? 0 : 4));
  1756. y[l+32] = d3 * ((int8_t)((q[l+0] >> 4) & 3) - ((h & 0x10) ? 0 : 4));
  1757. y[l+40] = d3 * ((int8_t)((q[l+8] >> 4) & 3) - ((h & 0x20) ? 0 : 4));
  1758. y[l+48] = d4 * ((int8_t)((q[l+0] >> 6) & 3) - ((h & 0x40) ? 0 : 4));
  1759. y[l+56] = d4 * ((int8_t)((q[l+8] >> 6) & 3) - ((h & 0x80) ? 0 : 4));
  1760. }
  1761. y += QK_K;
  1762. }
  1763. }
  1764. #endif
  1765. void quantize_row_q3_K(const float * restrict x, void * restrict vy, int k) {
  1766. quantize_row_q3_K_reference(x, vy, k);
  1767. }
  1768. size_t ggml_quantize_q3_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
  1769. (void)hist; // TODO: collect histograms
  1770. for (int j = 0; j < n; j += k) {
  1771. block_q3_K * restrict y = (block_q3_K *)dst + j/QK_K;
  1772. quantize_row_q3_K_reference(src + j, y, k);
  1773. }
  1774. return (n/QK_K*sizeof(block_q3_K));
  1775. }
  1776. static void quantize_row_q3_K_impl(const float * restrict x, block_q3_K * restrict y, int n_per_row, const float * restrict quant_weights) {
  1777. #if QK_K != 256
  1778. (void)quant_weights;
  1779. quantize_row_q3_K_reference(x, y, n_per_row);
  1780. #else
  1781. assert(n_per_row % QK_K == 0);
  1782. const int nb = n_per_row / QK_K;
  1783. int8_t L[QK_K];
  1784. float scales[QK_K / 16];
  1785. float weight[16];
  1786. float sw[QK_K / 16];
  1787. int8_t Ls[QK_K / 16];
  1788. for (int i = 0; i < nb; i++) {
  1789. float sumx2 = 0;
  1790. for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j];
  1791. float sigma2 = 2*sumx2/QK_K;
  1792. for (int j = 0; j < QK_K/16; ++j) {
  1793. if (quant_weights) {
  1794. const float * qw = quant_weights ? quant_weights + QK_K * i + 16*j : NULL;
  1795. for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j+l]*x[16*j+l]);
  1796. } else {
  1797. for (int l = 0; l < 16; ++l) weight[l] = x[16*j+l]*x[16*j+l];
  1798. }
  1799. float sumw = 0;
  1800. for (int l = 0; l < 16; ++l) sumw += weight[l];
  1801. sw[j] = sumw;
  1802. scales[j] = make_qx_quants(16, 4, x + 16*j, L + 16*j, 1, weight);
  1803. }
  1804. memset(y[i].scales, 0, 12);
  1805. float d_block = make_qx_quants(QK_K/16, 32, scales, Ls, 1, sw);
  1806. for (int j = 0; j < QK_K/16; ++j) {
  1807. int l = Ls[j];
  1808. if (j < 8) {
  1809. y[i].scales[j] = l & 0xF;
  1810. } else {
  1811. y[i].scales[j-8] |= ((l & 0xF) << 4);
  1812. }
  1813. l >>= 4;
  1814. y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
  1815. }
  1816. y[i].d = GGML_FP32_TO_FP16(d_block);
  1817. int8_t sc;
  1818. for (int j = 0; j < QK_K/16; ++j) {
  1819. sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
  1820. sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
  1821. float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1822. if (!d) {
  1823. continue;
  1824. }
  1825. for (int ii = 0; ii < 16; ++ii) {
  1826. int l = nearest_int(x[16*j + ii]/d);
  1827. l = MAX(-4, MIN(3, l));
  1828. L[16*j + ii] = l + 4;
  1829. }
  1830. }
  1831. memset(y[i].hmask, 0, QK_K/8);
  1832. // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
  1833. int m = 0;
  1834. uint8_t hm = 1;
  1835. for (int j = 0; j < QK_K; ++j) {
  1836. if (L[j] > 3) {
  1837. y[i].hmask[m] |= hm;
  1838. L[j] -= 4;
  1839. }
  1840. if (++m == QK_K/8) {
  1841. m = 0; hm <<= 1;
  1842. }
  1843. }
  1844. for (int j = 0; j < QK_K; j += 128) {
  1845. for (int l = 0; l < 32; ++l) {
  1846. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1847. }
  1848. }
  1849. x += QK_K;
  1850. }
  1851. #endif
  1852. }
  1853. size_t quantize_q3_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  1854. (void)hist;
  1855. size_t row_size = ggml_row_size(GGML_TYPE_Q3_K, n_per_row);
  1856. if (!quant_weights) {
  1857. quantize_row_q3_K_reference(src, dst, nrow*n_per_row);
  1858. }
  1859. else {
  1860. char * qrow = (char *)dst;
  1861. for (int row = 0; row < nrow; ++row) {
  1862. quantize_row_q3_K_impl(src, (block_q3_K*)qrow, n_per_row, quant_weights);
  1863. src += n_per_row;
  1864. qrow += row_size;
  1865. }
  1866. }
  1867. return nrow * row_size;
  1868. }
  1869. // ====================== 4-bit (de)-quantization
  1870. void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k) {
  1871. assert(k % QK_K == 0);
  1872. const int nb = k / QK_K;
  1873. uint8_t L[QK_K];
  1874. uint8_t Laux[32];
  1875. float weights[32];
  1876. float mins[QK_K/32];
  1877. float scales[QK_K/32];
  1878. for (int i = 0; i < nb; i++) {
  1879. float max_scale = 0; // as we are deducting the min, scales are always positive
  1880. float max_min = 0;
  1881. for (int j = 0; j < QK_K/32; ++j) {
  1882. //scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
  1883. float sum_x2 = 0;
  1884. for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
  1885. float av_x = sqrtf(sum_x2/32);
  1886. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  1887. scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false);
  1888. float scale = scales[j];
  1889. if (scale > max_scale) {
  1890. max_scale = scale;
  1891. }
  1892. float min = mins[j];
  1893. if (min > max_min) {
  1894. max_min = min;
  1895. }
  1896. }
  1897. #if QK_K == 256
  1898. float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
  1899. float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
  1900. for (int j = 0; j < QK_K/32; ++j) {
  1901. uint8_t ls = nearest_int(inv_scale*scales[j]);
  1902. uint8_t lm = nearest_int(inv_min*mins[j]);
  1903. ls = MIN(63, ls);
  1904. lm = MIN(63, lm);
  1905. if (j < 4) {
  1906. y[i].scales[j] = ls;
  1907. y[i].scales[j+4] = lm;
  1908. } else {
  1909. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  1910. y[i].scales[j-4] |= ((ls >> 4) << 6);
  1911. y[i].scales[j-0] |= ((lm >> 4) << 6);
  1912. }
  1913. }
  1914. y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
  1915. y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
  1916. uint8_t sc, m;
  1917. for (int j = 0; j < QK_K/32; ++j) {
  1918. get_scale_min_k4(j, y[i].scales, &sc, &m);
  1919. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1920. if (!d) continue;
  1921. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  1922. for (int ii = 0; ii < 32; ++ii) {
  1923. int l = nearest_int((x[32*j + ii] + dm)/d);
  1924. l = MAX(0, MIN(15, l));
  1925. L[32*j + ii] = l;
  1926. }
  1927. }
  1928. #else
  1929. const float s_factor = 15.f;
  1930. float inv_scale = max_scale > 0 ? s_factor/max_scale : 0.f;
  1931. float inv_min = max_min > 0 ? s_factor/max_min : 0.f;
  1932. int d1 = nearest_int(inv_scale*scales[0]);
  1933. int m1 = nearest_int(inv_min*mins[0]);
  1934. int d2 = nearest_int(inv_scale*scales[1]);
  1935. int m2 = nearest_int(inv_min*mins[1]);
  1936. y[i].scales[0] = d1 | (m1 << 4);
  1937. y[i].scales[1] = d2 | (m2 << 4);
  1938. y[i].d[0] = GGML_FP32_TO_FP16(max_scale/s_factor);
  1939. y[i].d[1] = GGML_FP32_TO_FP16(max_min/s_factor);
  1940. float sumlx = 0;
  1941. int suml2 = 0;
  1942. for (int j = 0; j < QK_K/32; ++j) {
  1943. const uint8_t sd = y[i].scales[j] & 0xF;
  1944. const uint8_t sm = y[i].scales[j] >> 4;
  1945. const float d = GGML_FP16_TO_FP32(y[i].d[0]) * sd;
  1946. if (!d) continue;
  1947. const float m = GGML_FP16_TO_FP32(y[i].d[1]) * sm;
  1948. for (int ii = 0; ii < 32; ++ii) {
  1949. int l = nearest_int((x[32*j + ii] + m)/d);
  1950. l = MAX(0, MIN(15, l));
  1951. L[32*j + ii] = l;
  1952. sumlx += (x[32*j + ii] + m)*l*sd;
  1953. suml2 += l*l*sd*sd;
  1954. }
  1955. }
  1956. if (suml2) {
  1957. y[i].d[0] = GGML_FP32_TO_FP16(sumlx/suml2);
  1958. }
  1959. #endif
  1960. uint8_t * q = y[i].qs;
  1961. for (int j = 0; j < QK_K; j += 64) {
  1962. for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
  1963. q += 32;
  1964. }
  1965. x += QK_K;
  1966. }
  1967. }
  1968. void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k) {
  1969. assert(k % QK_K == 0);
  1970. const int nb = k / QK_K;
  1971. for (int i = 0; i < nb; i++) {
  1972. const uint8_t * q = x[i].qs;
  1973. #if QK_K == 256
  1974. const float d = GGML_FP16_TO_FP32(x[i].d);
  1975. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  1976. int is = 0;
  1977. uint8_t sc, m;
  1978. for (int j = 0; j < QK_K; j += 64) {
  1979. get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
  1980. const float d1 = d * sc; const float m1 = min * m;
  1981. get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
  1982. const float d2 = d * sc; const float m2 = min * m;
  1983. for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1;
  1984. for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2;
  1985. q += 32; is += 2;
  1986. }
  1987. #else
  1988. const float dall = GGML_FP16_TO_FP32(x[i].d[0]);
  1989. const float mall = GGML_FP16_TO_FP32(x[i].d[1]);
  1990. const float d1 = dall * (x[i].scales[0] & 0xF), m1 = mall * (x[i].scales[0] >> 4);
  1991. const float d2 = dall * (x[i].scales[1] & 0xF), m2 = mall * (x[i].scales[1] >> 4);
  1992. for (int l = 0; l < 32; ++l) {
  1993. y[l+ 0] = d1 * (q[l] & 0xF) - m1;
  1994. y[l+32] = d2 * (q[l] >> 4) - m2;
  1995. }
  1996. y += QK_K;
  1997. #endif
  1998. }
  1999. }
  2000. void quantize_row_q4_K(const float * restrict x, void * restrict vy, int k) {
  2001. assert(k % QK_K == 0);
  2002. block_q4_K * restrict y = vy;
  2003. quantize_row_q4_K_reference(x, y, k);
  2004. }
  2005. size_t ggml_quantize_q4_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
  2006. assert(k % QK_K == 0);
  2007. (void)hist; // TODO: collect histograms
  2008. for (int j = 0; j < n; j += k) {
  2009. block_q4_K * restrict y = (block_q4_K *)dst + j/QK_K;
  2010. quantize_row_q4_K_reference(src + j, y, k);
  2011. }
  2012. return (n/QK_K*sizeof(block_q4_K));
  2013. }
  2014. static void quantize_row_q4_K_impl(const float * restrict x, block_q4_K * restrict y, int n_per_row, const float * quant_weights) {
  2015. #if QK_K != 256
  2016. (void)quant_weights;
  2017. quantize_row_q4_K_reference(x, y, n_per_row);
  2018. #else
  2019. assert(n_per_row % QK_K == 0);
  2020. const int nb = n_per_row / QK_K;
  2021. uint8_t L[QK_K];
  2022. uint8_t Laux[32];
  2023. uint8_t Ls[QK_K/32];
  2024. uint8_t Lm[QK_K/32];
  2025. float weights[32];
  2026. float sw[QK_K/32];
  2027. float mins[QK_K/32];
  2028. float scales[QK_K/32];
  2029. for (int i = 0; i < nb; i++) {
  2030. float sum_x2 = 0;
  2031. for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l];
  2032. float sigma2 = 2*sum_x2/QK_K;
  2033. float av_x = sqrtf(sigma2);
  2034. for (int j = 0; j < QK_K/32; ++j) {
  2035. if (quant_weights) {
  2036. const float * qw = quant_weights + QK_K*i + 32*j;
  2037. for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]);
  2038. } else {
  2039. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  2040. }
  2041. float sumw = 0;
  2042. for (int l = 0; l < 32; ++l) sumw += weights[l];
  2043. sw[j] = sumw;
  2044. scales[j] = make_qkx3_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  2045. }
  2046. float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw);
  2047. float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw);
  2048. for (int j = 0; j < QK_K/32; ++j) {
  2049. uint8_t ls = Ls[j];
  2050. uint8_t lm = Lm[j];
  2051. if (j < 4) {
  2052. y[i].scales[j] = ls;
  2053. y[i].scales[j+4] = lm;
  2054. } else {
  2055. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  2056. y[i].scales[j-4] |= ((ls >> 4) << 6);
  2057. y[i].scales[j-0] |= ((lm >> 4) << 6);
  2058. }
  2059. }
  2060. y[i].d = GGML_FP32_TO_FP16(d_block);
  2061. y[i].dmin = GGML_FP32_TO_FP16(m_block);
  2062. uint8_t sc, m;
  2063. for (int j = 0; j < QK_K/32; ++j) {
  2064. get_scale_min_k4(j, y[i].scales, &sc, &m);
  2065. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  2066. if (!d) continue;
  2067. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  2068. for (int ii = 0; ii < 32; ++ii) {
  2069. int l = nearest_int((x[32*j + ii] + dm)/d);
  2070. l = MAX(0, MIN(15, l));
  2071. L[32*j + ii] = l;
  2072. }
  2073. }
  2074. uint8_t * q = y[i].qs;
  2075. for (int j = 0; j < QK_K; j += 64) {
  2076. for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
  2077. q += 32;
  2078. }
  2079. x += QK_K;
  2080. }
  2081. #endif
  2082. }
  2083. size_t quantize_q4_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2084. (void)hist;
  2085. size_t row_size = ggml_row_size(GGML_TYPE_Q4_K, n_per_row);
  2086. if (!quant_weights) {
  2087. quantize_row_q4_K_reference(src, dst, nrow*n_per_row);
  2088. }
  2089. else {
  2090. char * qrow = (char *)dst;
  2091. for (int row = 0; row < nrow; ++row) {
  2092. quantize_row_q4_K_impl(src, (block_q4_K*)qrow, n_per_row, quant_weights);
  2093. src += n_per_row;
  2094. qrow += row_size;
  2095. }
  2096. }
  2097. return nrow * row_size;
  2098. }
  2099. // ====================== 5-bit (de)-quantization
  2100. void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k) {
  2101. assert(k % QK_K == 0);
  2102. const int nb = k / QK_K;
  2103. #if QK_K == 256
  2104. uint8_t L[QK_K];
  2105. float mins[QK_K/32];
  2106. float scales[QK_K/32];
  2107. float weights[32];
  2108. uint8_t Laux[32];
  2109. #else
  2110. int8_t L[QK_K];
  2111. float scales[QK_K/16];
  2112. #endif
  2113. for (int i = 0; i < nb; i++) {
  2114. #if QK_K == 256
  2115. float max_scale = 0; // as we are deducting the min, scales are always positive
  2116. float max_min = 0;
  2117. for (int j = 0; j < QK_K/32; ++j) {
  2118. //scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
  2119. float sum_x2 = 0;
  2120. for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
  2121. float av_x = sqrtf(sum_x2/32);
  2122. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  2123. scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false);
  2124. float scale = scales[j];
  2125. if (scale > max_scale) {
  2126. max_scale = scale;
  2127. }
  2128. float min = mins[j];
  2129. if (min > max_min) {
  2130. max_min = min;
  2131. }
  2132. }
  2133. float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
  2134. float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
  2135. for (int j = 0; j < QK_K/32; ++j) {
  2136. uint8_t ls = nearest_int(inv_scale*scales[j]);
  2137. uint8_t lm = nearest_int(inv_min*mins[j]);
  2138. ls = MIN(63, ls);
  2139. lm = MIN(63, lm);
  2140. if (j < 4) {
  2141. y[i].scales[j] = ls;
  2142. y[i].scales[j+4] = lm;
  2143. } else {
  2144. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  2145. y[i].scales[j-4] |= ((ls >> 4) << 6);
  2146. y[i].scales[j-0] |= ((lm >> 4) << 6);
  2147. }
  2148. }
  2149. y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
  2150. y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
  2151. uint8_t sc, m;
  2152. for (int j = 0; j < QK_K/32; ++j) {
  2153. get_scale_min_k4(j, y[i].scales, &sc, &m);
  2154. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  2155. if (!d) continue;
  2156. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  2157. for (int ii = 0; ii < 32; ++ii) {
  2158. int l = nearest_int((x[32*j + ii] + dm)/d);
  2159. l = MAX(0, MIN(31, l));
  2160. L[32*j + ii] = l;
  2161. }
  2162. }
  2163. uint8_t * restrict qh = y[i].qh;
  2164. uint8_t * restrict ql = y[i].qs;
  2165. memset(qh, 0, QK_K/8);
  2166. uint8_t m1 = 1, m2 = 2;
  2167. for (int n = 0; n < QK_K; n += 64) {
  2168. for (int j = 0; j < 32; ++j) {
  2169. int l1 = L[n + j];
  2170. if (l1 > 15) {
  2171. l1 -= 16; qh[j] |= m1;
  2172. }
  2173. int l2 = L[n + j + 32];
  2174. if (l2 > 15) {
  2175. l2 -= 16; qh[j] |= m2;
  2176. }
  2177. ql[j] = l1 | (l2 << 4);
  2178. }
  2179. m1 <<= 2; m2 <<= 2;
  2180. ql += 32;
  2181. }
  2182. #else
  2183. float max_scale = 0, amax = 0;
  2184. for (int j = 0; j < QK_K/16; ++j) {
  2185. scales[j] = make_qx_quants(16, 16, x + 16*j, L + 16*j, 1, NULL);
  2186. float abs_scale = fabsf(scales[j]);
  2187. if (abs_scale > amax) {
  2188. amax = abs_scale;
  2189. max_scale = scales[j];
  2190. }
  2191. }
  2192. float iscale = -128.f/max_scale;
  2193. for (int j = 0; j < QK_K/16; ++j) {
  2194. int l = nearest_int(iscale*scales[j]);
  2195. y[i].scales[j] = MAX(-128, MIN(127, l));
  2196. }
  2197. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2198. for (int j = 0; j < QK_K/16; ++j) {
  2199. const float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2200. if (!d) continue;
  2201. for (int ii = 0; ii < 16; ++ii) {
  2202. int l = nearest_int(x[16*j + ii]/d);
  2203. l = MAX(-16, MIN(15, l));
  2204. L[16*j + ii] = l + 16;
  2205. }
  2206. }
  2207. uint8_t * restrict qh = y[i].qh;
  2208. uint8_t * restrict ql = y[i].qs;
  2209. memset(qh, 0, QK_K/8);
  2210. for (int j = 0; j < 32; ++j) {
  2211. int jm = j%8;
  2212. int is = j/8;
  2213. int l1 = L[j];
  2214. if (l1 > 15) {
  2215. l1 -= 16; qh[jm] |= (1 << is);
  2216. }
  2217. int l2 = L[j + 32];
  2218. if (l2 > 15) {
  2219. l2 -= 16; qh[jm] |= (1 << (4 + is));
  2220. }
  2221. ql[j] = l1 | (l2 << 4);
  2222. }
  2223. #endif
  2224. x += QK_K;
  2225. }
  2226. }
  2227. void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k) {
  2228. assert(k % QK_K == 0);
  2229. const int nb = k / QK_K;
  2230. for (int i = 0; i < nb; i++) {
  2231. const uint8_t * ql = x[i].qs;
  2232. const uint8_t * qh = x[i].qh;
  2233. #if QK_K == 256
  2234. const float d = GGML_FP16_TO_FP32(x[i].d);
  2235. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  2236. int is = 0;
  2237. uint8_t sc, m;
  2238. uint8_t u1 = 1, u2 = 2;
  2239. for (int j = 0; j < QK_K; j += 64) {
  2240. get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
  2241. const float d1 = d * sc; const float m1 = min * m;
  2242. get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
  2243. const float d2 = d * sc; const float m2 = min * m;
  2244. for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1;
  2245. for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2;
  2246. ql += 32; is += 2;
  2247. u1 <<= 2; u2 <<= 2;
  2248. }
  2249. #else
  2250. float d = GGML_FP16_TO_FP32(x[i].d);
  2251. const int8_t * restrict s = x[i].scales;
  2252. for (int l = 0; l < 8; ++l) {
  2253. y[l+ 0] = d * s[0] * ((ql[l+ 0] & 0xF) - (qh[l] & 0x01 ? 0 : 16));
  2254. y[l+ 8] = d * s[0] * ((ql[l+ 8] & 0xF) - (qh[l] & 0x02 ? 0 : 16));
  2255. y[l+16] = d * s[1] * ((ql[l+16] & 0xF) - (qh[l] & 0x04 ? 0 : 16));
  2256. y[l+24] = d * s[1] * ((ql[l+24] & 0xF) - (qh[l] & 0x08 ? 0 : 16));
  2257. y[l+32] = d * s[2] * ((ql[l+ 0] >> 4) - (qh[l] & 0x10 ? 0 : 16));
  2258. y[l+40] = d * s[2] * ((ql[l+ 8] >> 4) - (qh[l] & 0x20 ? 0 : 16));
  2259. y[l+48] = d * s[3] * ((ql[l+16] >> 4) - (qh[l] & 0x40 ? 0 : 16));
  2260. y[l+56] = d * s[3] * ((ql[l+24] >> 4) - (qh[l] & 0x80 ? 0 : 16));
  2261. }
  2262. y += QK_K;
  2263. #endif
  2264. }
  2265. }
  2266. void quantize_row_q5_K(const float * restrict x, void * restrict vy, int k) {
  2267. assert(k % QK_K == 0);
  2268. block_q5_K * restrict y = vy;
  2269. quantize_row_q5_K_reference(x, y, k);
  2270. }
  2271. size_t ggml_quantize_q5_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
  2272. assert(k % QK_K == 0);
  2273. (void)hist; // TODO: collect histograms
  2274. for (int j = 0; j < n; j += k) {
  2275. block_q5_K * restrict y = (block_q5_K *)dst + j/QK_K;
  2276. quantize_row_q5_K_reference(src + j, y, k);
  2277. }
  2278. return (n/QK_K*sizeof(block_q5_K));
  2279. }
  2280. static void quantize_row_q5_K_impl(const float * restrict x, block_q5_K * restrict y, int n_per_row, const float * quant_weights) {
  2281. #if QK_K != 256
  2282. (void)quant_weights;
  2283. quantize_row_q5_K_reference(x, y, n_per_row);
  2284. #else
  2285. assert(n_per_row % QK_K == 0);
  2286. const int nb = n_per_row / QK_K;
  2287. uint8_t L[QK_K];
  2288. uint8_t Laux[32];
  2289. uint8_t Ls[QK_K/32];
  2290. uint8_t Lm[QK_K/32];
  2291. float mins[QK_K/32];
  2292. float scales[QK_K/32];
  2293. float sw[QK_K/32];
  2294. float weights[32];
  2295. for (int i = 0; i < nb; i++) {
  2296. float sum_x2 = 0;
  2297. for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l];
  2298. float sigma2 = 2*sum_x2/QK_K;
  2299. float av_x = sqrtf(sigma2);
  2300. for (int j = 0; j < QK_K/32; ++j) {
  2301. if (quant_weights) {
  2302. const float * qw = quant_weights + QK_K*i + 32*j;
  2303. for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]);
  2304. } else {
  2305. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  2306. }
  2307. float sumw = 0;
  2308. for (int l = 0; l < 32; ++l) sumw += weights[l];
  2309. sw[j] = sumw;
  2310. scales[j] = make_qkx3_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  2311. }
  2312. float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw);
  2313. float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw);
  2314. for (int j = 0; j < QK_K/32; ++j) {
  2315. uint8_t ls = Ls[j];
  2316. uint8_t lm = Lm[j];
  2317. ls = MIN(63, ls);
  2318. lm = MIN(63, lm);
  2319. if (j < 4) {
  2320. y[i].scales[j] = ls;
  2321. y[i].scales[j+4] = lm;
  2322. } else {
  2323. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  2324. y[i].scales[j-4] |= ((ls >> 4) << 6);
  2325. y[i].scales[j-0] |= ((lm >> 4) << 6);
  2326. }
  2327. }
  2328. y[i].d = GGML_FP32_TO_FP16(d_block);
  2329. y[i].dmin = GGML_FP32_TO_FP16(m_block);
  2330. uint8_t sc, m;
  2331. for (int j = 0; j < QK_K/32; ++j) {
  2332. get_scale_min_k4(j, y[i].scales, &sc, &m);
  2333. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  2334. if (!d) continue;
  2335. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  2336. for (int ii = 0; ii < 32; ++ii) {
  2337. int l = nearest_int((x[32*j + ii] + dm)/d);
  2338. l = MAX(0, MIN(31, l));
  2339. L[32*j + ii] = l;
  2340. }
  2341. }
  2342. uint8_t * restrict qh = y[i].qh;
  2343. uint8_t * restrict ql = y[i].qs;
  2344. memset(qh, 0, QK_K/8);
  2345. uint8_t m1 = 1, m2 = 2;
  2346. for (int n = 0; n < QK_K; n += 64) {
  2347. for (int j = 0; j < 32; ++j) {
  2348. int l1 = L[n + j];
  2349. if (l1 > 15) {
  2350. l1 -= 16; qh[j] |= m1;
  2351. }
  2352. int l2 = L[n + j + 32];
  2353. if (l2 > 15) {
  2354. l2 -= 16; qh[j] |= m2;
  2355. }
  2356. ql[j] = l1 | (l2 << 4);
  2357. }
  2358. m1 <<= 2; m2 <<= 2;
  2359. ql += 32;
  2360. }
  2361. x += QK_K;
  2362. }
  2363. #endif
  2364. }
  2365. size_t quantize_q5_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2366. (void)hist;
  2367. size_t row_size = ggml_row_size(GGML_TYPE_Q5_K, n_per_row);
  2368. if (!quant_weights) {
  2369. quantize_row_q5_K_reference(src, dst, nrow*n_per_row);
  2370. }
  2371. else {
  2372. char * qrow = (char *)dst;
  2373. for (int row = 0; row < nrow; ++row) {
  2374. quantize_row_q5_K_impl(src, (block_q5_K*)qrow, n_per_row, quant_weights);
  2375. src += n_per_row;
  2376. qrow += row_size;
  2377. }
  2378. }
  2379. return nrow * row_size;
  2380. }
  2381. // ====================== 6-bit (de)-quantization
  2382. void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k) {
  2383. assert(k % QK_K == 0);
  2384. const int nb = k / QK_K;
  2385. int8_t L[QK_K];
  2386. float scales[QK_K/16];
  2387. for (int i = 0; i < nb; i++) {
  2388. float max_scale = 0;
  2389. float max_abs_scale = 0;
  2390. for (int ib = 0; ib < QK_K/16; ++ib) {
  2391. const float scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL);
  2392. scales[ib] = scale;
  2393. const float abs_scale = fabsf(scale);
  2394. if (abs_scale > max_abs_scale) {
  2395. max_abs_scale = abs_scale;
  2396. max_scale = scale;
  2397. }
  2398. }
  2399. if (!max_abs_scale) {
  2400. memset(&y[i], 0, sizeof(block_q6_K));
  2401. y[i].d = GGML_FP32_TO_FP16(0.f);
  2402. x += QK_K;
  2403. continue;
  2404. }
  2405. float iscale = -128.f/max_scale;
  2406. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2407. for (int ib = 0; ib < QK_K/16; ++ib) {
  2408. y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
  2409. }
  2410. for (int j = 0; j < QK_K/16; ++j) {
  2411. float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2412. if (!d) {
  2413. continue;
  2414. }
  2415. for (int ii = 0; ii < 16; ++ii) {
  2416. int l = nearest_int(x[16*j + ii]/d);
  2417. l = MAX(-32, MIN(31, l));
  2418. L[16*j + ii] = l + 32;
  2419. }
  2420. }
  2421. uint8_t * restrict ql = y[i].ql;
  2422. uint8_t * restrict qh = y[i].qh;
  2423. #if QK_K == 256
  2424. for (int j = 0; j < QK_K; j += 128) {
  2425. for (int l = 0; l < 32; ++l) {
  2426. const uint8_t q1 = L[j + l + 0] & 0xF;
  2427. const uint8_t q2 = L[j + l + 32] & 0xF;
  2428. const uint8_t q3 = L[j + l + 64] & 0xF;
  2429. const uint8_t q4 = L[j + l + 96] & 0xF;
  2430. ql[l+ 0] = q1 | (q3 << 4);
  2431. ql[l+32] = q2 | (q4 << 4);
  2432. qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
  2433. }
  2434. ql += 64;
  2435. qh += 32;
  2436. }
  2437. #else
  2438. for (int l = 0; l < 32; ++l) {
  2439. const uint8_t q1 = L[l + 0] & 0xF;
  2440. const uint8_t q2 = L[l + 32] & 0xF;
  2441. ql[l] = q1 | (q2 << 4);
  2442. }
  2443. for (int l = 0; l < 16; ++l) {
  2444. qh[l] = (L[l] >> 4) | ((L[l + 16] >> 4) << 2) | ((L[l + 32] >> 4) << 4) | ((L[l + 48] >> 4) << 6);
  2445. }
  2446. #endif
  2447. x += QK_K;
  2448. }
  2449. }
  2450. void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k) {
  2451. assert(k % QK_K == 0);
  2452. const int nb = k / QK_K;
  2453. for (int i = 0; i < nb; i++) {
  2454. const float d = GGML_FP16_TO_FP32(x[i].d);
  2455. const uint8_t * restrict ql = x[i].ql;
  2456. const uint8_t * restrict qh = x[i].qh;
  2457. const int8_t * restrict sc = x[i].scales;
  2458. #if QK_K == 256
  2459. for (int n = 0; n < QK_K; n += 128) {
  2460. for (int l = 0; l < 32; ++l) {
  2461. int is = l/16;
  2462. const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  2463. const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  2464. const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  2465. const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  2466. y[l + 0] = d * sc[is + 0] * q1;
  2467. y[l + 32] = d * sc[is + 2] * q2;
  2468. y[l + 64] = d * sc[is + 4] * q3;
  2469. y[l + 96] = d * sc[is + 6] * q4;
  2470. }
  2471. y += 128;
  2472. ql += 64;
  2473. qh += 32;
  2474. sc += 8;
  2475. }
  2476. #else
  2477. for (int l = 0; l < 16; ++l) {
  2478. const int8_t q1 = (int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  2479. const int8_t q2 = (int8_t)((ql[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  2480. const int8_t q3 = (int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  2481. const int8_t q4 = (int8_t)((ql[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  2482. y[l+ 0] = d * sc[0] * q1;
  2483. y[l+16] = d * sc[1] * q2;
  2484. y[l+32] = d * sc[2] * q3;
  2485. y[l+48] = d * sc[3] * q4;
  2486. }
  2487. y += 64;
  2488. #endif
  2489. }
  2490. }
  2491. void quantize_row_q6_K(const float * restrict x, void * restrict vy, int k) {
  2492. assert(k % QK_K == 0);
  2493. block_q6_K * restrict y = vy;
  2494. quantize_row_q6_K_reference(x, y, k);
  2495. }
  2496. size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist) {
  2497. assert(k % QK_K == 0);
  2498. (void)hist; // TODO: collect histograms
  2499. for (int j = 0; j < n; j += k) {
  2500. block_q6_K * restrict y = (block_q6_K *)dst + j/QK_K;
  2501. quantize_row_q6_K_reference(src + j, y, k);
  2502. }
  2503. return (n/QK_K*sizeof(block_q6_K));
  2504. }
  2505. static void quantize_row_q6_K_impl(const float * restrict x, block_q6_K * restrict y, int n_per_row, const float * quant_weights) {
  2506. #if QK_K != 256
  2507. (void)quant_weights;
  2508. quantize_row_q6_K_reference(x, y, n_per_row);
  2509. #else
  2510. assert(n_per_row % QK_K == 0);
  2511. const int nb = n_per_row / QK_K;
  2512. int8_t L[QK_K];
  2513. float scales[QK_K/16];
  2514. //float weights[16];
  2515. for (int i = 0; i < nb; i++) {
  2516. //float sum_x2 = 0;
  2517. //for (int j = 0; j < QK_K; ++j) sum_x2 += x[j]*x[j];
  2518. //float sigma2 = sum_x2/QK_K;
  2519. float max_scale = 0;
  2520. float max_abs_scale = 0;
  2521. for (int ib = 0; ib < QK_K/16; ++ib) {
  2522. float scale;
  2523. if (quant_weights) {
  2524. const float * qw = quant_weights + QK_K*i + 16*ib;
  2525. //for (int j = 0; j < 16; ++j) weights[j] = qw[j] * sqrtf(sigma2 + x[16*ib + j]*x[16*ib + j]);
  2526. //scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, weights);
  2527. scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, qw);
  2528. } else {
  2529. scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL);
  2530. }
  2531. scales[ib] = scale;
  2532. const float abs_scale = fabsf(scale);
  2533. if (abs_scale > max_abs_scale) {
  2534. max_abs_scale = abs_scale;
  2535. max_scale = scale;
  2536. }
  2537. }
  2538. if (!max_abs_scale) {
  2539. memset(&y[i], 0, sizeof(block_q6_K));
  2540. y[i].d = GGML_FP32_TO_FP16(0.f);
  2541. x += QK_K;
  2542. continue;
  2543. }
  2544. float iscale = -128.f/max_scale;
  2545. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2546. for (int ib = 0; ib < QK_K/16; ++ib) {
  2547. y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
  2548. }
  2549. for (int j = 0; j < QK_K/16; ++j) {
  2550. float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2551. if (!d) {
  2552. continue;
  2553. }
  2554. for (int ii = 0; ii < 16; ++ii) {
  2555. int l = nearest_int(x[16*j + ii]/d);
  2556. l = MAX(-32, MIN(31, l));
  2557. L[16*j + ii] = l + 32;
  2558. }
  2559. }
  2560. uint8_t * restrict ql = y[i].ql;
  2561. uint8_t * restrict qh = y[i].qh;
  2562. for (int j = 0; j < QK_K; j += 128) {
  2563. for (int l = 0; l < 32; ++l) {
  2564. const uint8_t q1 = L[j + l + 0] & 0xF;
  2565. const uint8_t q2 = L[j + l + 32] & 0xF;
  2566. const uint8_t q3 = L[j + l + 64] & 0xF;
  2567. const uint8_t q4 = L[j + l + 96] & 0xF;
  2568. ql[l+ 0] = q1 | (q3 << 4);
  2569. ql[l+32] = q2 | (q4 << 4);
  2570. qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
  2571. }
  2572. ql += 64;
  2573. qh += 32;
  2574. }
  2575. x += QK_K;
  2576. }
  2577. #endif
  2578. }
  2579. size_t quantize_q6_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2580. (void)hist;
  2581. size_t row_size = ggml_row_size(GGML_TYPE_Q6_K, n_per_row);
  2582. if (!quant_weights) {
  2583. quantize_row_q6_K_reference(src, dst, nrow*n_per_row);
  2584. }
  2585. else {
  2586. char * qrow = (char *)dst;
  2587. for (int row = 0; row < nrow; ++row) {
  2588. quantize_row_q6_K_impl(src, (block_q6_K*)qrow, n_per_row, quant_weights);
  2589. src += n_per_row;
  2590. qrow += row_size;
  2591. }
  2592. }
  2593. return nrow * row_size;
  2594. }
  2595. static void quantize_row_q4_0_impl(const float * restrict x, block_q4_0 * restrict y, int n_per_row, const float * quant_weights) {
  2596. static_assert(QK4_0 == 32, "QK4_0 must be 32");
  2597. if (!quant_weights) {
  2598. quantize_row_q4_0_reference(x, y, n_per_row);
  2599. return;
  2600. }
  2601. float weight[QK4_0];
  2602. int8_t L[QK4_0];
  2603. float sum_x2 = 0;
  2604. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2605. float sigma2 = sum_x2/n_per_row;
  2606. const int nb = n_per_row/QK4_0;
  2607. for (int ib = 0; ib < nb; ++ib) {
  2608. const float * xb = x + QK4_0 * ib;
  2609. const float * qw = quant_weights + QK4_0 * ib;
  2610. for (int j = 0; j < QK4_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2611. float d = make_qx_quants(QK4_0, 8, xb, L, 1, weight);
  2612. y[ib].d = GGML_FP32_TO_FP16(d);
  2613. for (int j = 0; j < 16; ++j) {
  2614. y[ib].qs[j] = L[j] | (L[j+16] << 4);
  2615. }
  2616. }
  2617. }
  2618. size_t quantize_q4_0(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2619. if (!quant_weights) {
  2620. return ggml_quantize_q4_0(src, dst, nrow*n_per_row, n_per_row, hist);
  2621. }
  2622. size_t row_size = ggml_row_size(GGML_TYPE_Q4_0, n_per_row);
  2623. char * qrow = (char *)dst;
  2624. for (int row = 0; row < nrow; ++row) {
  2625. quantize_row_q4_0_impl(src, (block_q4_0*)qrow, n_per_row, quant_weights);
  2626. src += n_per_row;
  2627. qrow += row_size;
  2628. }
  2629. return nrow * row_size;
  2630. }
  2631. static void quantize_row_q4_1_impl(const float * restrict x, block_q4_1 * restrict y, int n_per_row, const float * quant_weights) {
  2632. static_assert(QK4_1 == 32, "QK4_1 must be 32");
  2633. if (!quant_weights) {
  2634. quantize_row_q4_1_reference(x, y, n_per_row);
  2635. return;
  2636. }
  2637. float weight[QK4_1];
  2638. uint8_t L[QK4_1], Laux[QK4_1];
  2639. float sum_x2 = 0;
  2640. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2641. float sigma2 = sum_x2/n_per_row;
  2642. const int nb = n_per_row/QK4_1;
  2643. for (int ib = 0; ib < nb; ++ib) {
  2644. const float * xb = x + QK4_1 * ib;
  2645. const float * qw = quant_weights + QK4_1 * ib;
  2646. for (int j = 0; j < QK4_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2647. float min;
  2648. float d = make_qkx3_quants(QK4_1, 15, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false);
  2649. y[ib].d = GGML_FP32_TO_FP16(d);
  2650. y[ib].m = GGML_FP32_TO_FP16(-min);
  2651. for (int j = 0; j < 16; ++j) {
  2652. y[ib].qs[j] = L[j] | (L[j+16] << 4);
  2653. }
  2654. }
  2655. }
  2656. size_t quantize_q4_1(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2657. if (!quant_weights) {
  2658. return ggml_quantize_q4_1(src, dst, nrow*n_per_row, n_per_row, hist);
  2659. }
  2660. size_t row_size = ggml_row_size(GGML_TYPE_Q4_1, n_per_row);
  2661. char * qrow = (char *)dst;
  2662. for (int row = 0; row < nrow; ++row) {
  2663. quantize_row_q4_1_impl(src, (block_q4_1*)qrow, n_per_row, quant_weights);
  2664. src += n_per_row;
  2665. qrow += row_size;
  2666. }
  2667. return nrow * row_size;
  2668. }
  2669. static void quantize_row_q5_0_impl(const float * restrict x, block_q5_0 * restrict y, int n_per_row, const float * quant_weights) {
  2670. static_assert(QK5_0 == 32, "QK5_0 must be 32");
  2671. if (!quant_weights) {
  2672. quantize_row_q5_0_reference(x, y, n_per_row);
  2673. return;
  2674. }
  2675. float weight[QK5_0];
  2676. int8_t L[QK5_0];
  2677. float sum_x2 = 0;
  2678. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2679. float sigma2 = sum_x2/n_per_row;
  2680. const int nb = n_per_row/QK5_0;
  2681. for (int ib = 0; ib < nb; ++ib) {
  2682. const float * xb = x + QK5_0 * ib;
  2683. const float * qw = quant_weights + QK5_0 * ib;
  2684. for (int j = 0; j < QK5_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2685. float d = make_qx_quants(QK5_0, 16, xb, L, 1, weight);
  2686. y[ib].d = GGML_FP32_TO_FP16(d);
  2687. uint32_t qh = 0;
  2688. for (int j = 0; j < 16; ++j) {
  2689. const uint8_t xi0 = L[j];
  2690. const uint8_t xi1 = L[j+16];
  2691. y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  2692. // get the 5-th bit and store it in qh at the right position
  2693. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  2694. qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
  2695. }
  2696. memcpy(&y[ib].qh, &qh, sizeof(qh));
  2697. }
  2698. }
  2699. size_t quantize_q5_0(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2700. if (!quant_weights) {
  2701. return ggml_quantize_q5_0(src, dst, nrow*n_per_row, n_per_row, hist);
  2702. }
  2703. size_t row_size = ggml_row_size(GGML_TYPE_Q5_0, n_per_row);
  2704. char * qrow = (char *)dst;
  2705. for (int row = 0; row < nrow; ++row) {
  2706. quantize_row_q5_0_impl(src, (block_q5_0*)qrow, n_per_row, quant_weights);
  2707. src += n_per_row;
  2708. qrow += row_size;
  2709. }
  2710. return nrow * row_size;
  2711. }
  2712. static void quantize_row_q5_1_impl(const float * restrict x, block_q5_1 * restrict y, int n_per_row, const float * quant_weights) {
  2713. static_assert(QK5_1 == 32, "QK5_1 must be 32");
  2714. if (!quant_weights) {
  2715. quantize_row_q5_1_reference(x, y, n_per_row);
  2716. return;
  2717. }
  2718. float weight[QK5_1];
  2719. uint8_t L[QK5_1], Laux[QK5_1];
  2720. float sum_x2 = 0;
  2721. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2722. float sigma2 = sum_x2/n_per_row;
  2723. const int nb = n_per_row/QK5_1;
  2724. for (int ib = 0; ib < nb; ++ib) {
  2725. const float * xb = x + QK5_1 * ib;
  2726. const float * qw = quant_weights + QK5_1 * ib;
  2727. for (int j = 0; j < QK5_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2728. float min;
  2729. float d = make_qkx3_quants(QK5_1, 31, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false);
  2730. y[ib].d = GGML_FP32_TO_FP16(d);
  2731. y[ib].m = GGML_FP32_TO_FP16(-min);
  2732. uint32_t qh = 0;
  2733. for (int j = 0; j < 16; ++j) {
  2734. const uint8_t xi0 = L[j];
  2735. const uint8_t xi1 = L[j+16];
  2736. y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  2737. // get the 5-th bit and store it in qh at the right position
  2738. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  2739. qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
  2740. }
  2741. memcpy(&y[ib].qh, &qh, sizeof(qh));
  2742. }
  2743. }
  2744. size_t quantize_q5_1(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2745. if (!quant_weights) {
  2746. return ggml_quantize_q5_1(src, dst, nrow*n_per_row, n_per_row, hist);
  2747. }
  2748. size_t row_size = ggml_row_size(GGML_TYPE_Q5_1, n_per_row);
  2749. char * qrow = (char *)dst;
  2750. for (int row = 0; row < nrow; ++row) {
  2751. quantize_row_q5_1_impl(src, (block_q5_1*)qrow, n_per_row, quant_weights);
  2752. src += n_per_row;
  2753. qrow += row_size;
  2754. }
  2755. return nrow * row_size;
  2756. }
  2757. // ====================== "True" 2-bit (de)-quantization
  2758. static const uint64_t iq2xxs_grid[256] = {
  2759. 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08,
  2760. 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x08080808082b0808,
  2761. 0x08080808082b082b, 0x08080808082b2b08, 0x08080808082b2b2b, 0x0808080819080819,
  2762. 0x0808080819081908, 0x0808080819190808, 0x0808080819192b08, 0x08080808192b0819,
  2763. 0x08080808192b1908, 0x080808082b080808, 0x080808082b08082b, 0x080808082b082b2b,
  2764. 0x080808082b2b082b, 0x0808081908080819, 0x0808081908081908, 0x0808081908190808,
  2765. 0x0808081908191919, 0x0808081919080808, 0x080808192b081908, 0x080808192b192b08,
  2766. 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b082b082b, 0x0808082b2b08082b,
  2767. 0x0808190808080819, 0x0808190808081908, 0x0808190808190808, 0x08081908082b0819,
  2768. 0x08081908082b1908, 0x0808190819080808, 0x080819081908082b, 0x0808190819082b08,
  2769. 0x08081908192b0808, 0x080819082b080819, 0x080819082b081908, 0x080819082b190808,
  2770. 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, 0x0808191908082b08,
  2771. 0x08081919082b0808, 0x080819191908192b, 0x08081919192b2b19, 0x080819192b080808,
  2772. 0x080819192b190819, 0x0808192b08082b19, 0x0808192b08190808, 0x0808192b19080808,
  2773. 0x0808192b2b081908, 0x0808192b2b2b1908, 0x08082b0808080808, 0x08082b0808081919,
  2774. 0x08082b0808082b08, 0x08082b0808191908, 0x08082b08082b2b08, 0x08082b0819080819,
  2775. 0x08082b0819081908, 0x08082b0819190808, 0x08082b081919082b, 0x08082b082b082b08,
  2776. 0x08082b1908081908, 0x08082b1919080808, 0x08082b2b0808082b, 0x08082b2b08191908,
  2777. 0x0819080808080819, 0x0819080808081908, 0x0819080808190808, 0x08190808082b0819,
  2778. 0x0819080819080808, 0x08190808192b0808, 0x081908082b081908, 0x081908082b190808,
  2779. 0x081908082b191919, 0x0819081908080808, 0x0819081908082b08, 0x08190819082b0808,
  2780. 0x0819081919190808, 0x0819081919192b2b, 0x081908192b080808, 0x0819082b082b1908,
  2781. 0x0819082b19081919, 0x0819190808080808, 0x0819190808082b08, 0x08191908082b0808,
  2782. 0x08191908082b1919, 0x0819190819082b19, 0x081919082b080808, 0x0819191908192b08,
  2783. 0x08191919192b082b, 0x0819192b08080808, 0x0819192b0819192b, 0x08192b0808080819,
  2784. 0x08192b0808081908, 0x08192b0808190808, 0x08192b0819080808, 0x08192b082b080819,
  2785. 0x08192b1908080808, 0x08192b1908081919, 0x08192b192b2b0808, 0x08192b2b19190819,
  2786. 0x082b080808080808, 0x082b08080808082b, 0x082b080808082b2b, 0x082b080819081908,
  2787. 0x082b0808192b0819, 0x082b08082b080808, 0x082b08082b08082b, 0x082b0819082b2b19,
  2788. 0x082b081919082b08, 0x082b082b08080808, 0x082b082b0808082b, 0x082b190808080819,
  2789. 0x082b190808081908, 0x082b190808190808, 0x082b190819080808, 0x082b19081919192b,
  2790. 0x082b191908080808, 0x082b191919080819, 0x082b1919192b1908, 0x082b192b2b190808,
  2791. 0x082b2b0808082b08, 0x082b2b08082b0808, 0x082b2b082b191908, 0x082b2b2b19081908,
  2792. 0x1908080808080819, 0x1908080808081908, 0x1908080808190808, 0x1908080808192b08,
  2793. 0x19080808082b0819, 0x19080808082b1908, 0x1908080819080808, 0x1908080819082b08,
  2794. 0x190808081919192b, 0x19080808192b0808, 0x190808082b080819, 0x190808082b081908,
  2795. 0x190808082b190808, 0x1908081908080808, 0x19080819082b0808, 0x19080819192b0819,
  2796. 0x190808192b080808, 0x190808192b081919, 0x1908082b08080819, 0x1908082b08190808,
  2797. 0x1908082b19082b08, 0x1908082b1919192b, 0x1908082b192b2b08, 0x1908190808080808,
  2798. 0x1908190808082b08, 0x19081908082b0808, 0x190819082b080808, 0x190819082b192b19,
  2799. 0x190819190819082b, 0x19081919082b1908, 0x1908192b08080808, 0x19082b0808080819,
  2800. 0x19082b0808081908, 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919,
  2801. 0x19082b1908080808, 0x19082b1919192b08, 0x19082b19192b0819, 0x19082b192b08082b,
  2802. 0x19082b2b19081919, 0x19082b2b2b190808, 0x1919080808080808, 0x1919080808082b08,
  2803. 0x1919080808190819, 0x1919080808192b19, 0x19190808082b0808, 0x191908082b080808,
  2804. 0x191908082b082b08, 0x1919081908081908, 0x191908191908082b, 0x191908192b2b1908,
  2805. 0x1919082b2b190819, 0x191919082b190808, 0x191919082b19082b, 0x1919191908082b2b,
  2806. 0x1919192b08080819, 0x1919192b19191908, 0x19192b0808080808, 0x19192b0808190819,
  2807. 0x19192b0808192b19, 0x19192b08192b1908, 0x19192b1919080808, 0x19192b2b08082b08,
  2808. 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, 0x192b0808192b2b08,
  2809. 0x192b081908080808, 0x192b081919191919, 0x192b082b08192b08, 0x192b082b192b0808,
  2810. 0x192b190808080808, 0x192b190808081919, 0x192b191908190808, 0x192b19190819082b,
  2811. 0x192b19192b081908, 0x192b2b081908082b, 0x2b08080808080808, 0x2b0808080808082b,
  2812. 0x2b08080808082b2b, 0x2b08080819080819, 0x2b0808082b08082b, 0x2b08081908081908,
  2813. 0x2b08081908192b08, 0x2b08081919080808, 0x2b08082b08190819, 0x2b08190808080819,
  2814. 0x2b08190808081908, 0x2b08190808190808, 0x2b08190808191919, 0x2b08190819080808,
  2815. 0x2b081908192b0808, 0x2b08191908080808, 0x2b0819191908192b, 0x2b0819192b191908,
  2816. 0x2b08192b08082b19, 0x2b08192b19080808, 0x2b08192b192b0808, 0x2b082b080808082b,
  2817. 0x2b082b1908081908, 0x2b082b2b08190819, 0x2b19080808081908, 0x2b19080808190808,
  2818. 0x2b190808082b1908, 0x2b19080819080808, 0x2b1908082b2b0819, 0x2b1908190819192b,
  2819. 0x2b1908192b080808, 0x2b19082b19081919, 0x2b19190808080808, 0x2b191908082b082b,
  2820. 0x2b19190819081908, 0x2b19191919190819, 0x2b192b082b080819, 0x2b192b19082b0808,
  2821. 0x2b2b08080808082b, 0x2b2b080819190808, 0x2b2b08082b081919, 0x2b2b081908082b19,
  2822. 0x2b2b082b08080808, 0x2b2b190808192b08, 0x2b2b2b0819190808, 0x2b2b2b1908081908,
  2823. };
  2824. static const uint64_t iq2xs_grid[512] = {
  2825. 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08,
  2826. 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b,
  2827. 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919,
  2828. 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b,
  2829. 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919,
  2830. 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x080808082b080808,
  2831. 0x080808082b08082b, 0x080808082b081919, 0x080808082b082b08, 0x080808082b190819,
  2832. 0x080808082b191908, 0x080808082b192b19, 0x080808082b2b0808, 0x0808081908080819,
  2833. 0x0808081908081908, 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808,
  2834. 0x080808190819082b, 0x0808081908191919, 0x0808081908192b08, 0x0808081908192b2b,
  2835. 0x08080819082b0819, 0x08080819082b1908, 0x0808081919080808, 0x080808191908082b,
  2836. 0x0808081919081919, 0x0808081919082b08, 0x0808081919190819, 0x0808081919191908,
  2837. 0x08080819192b0808, 0x08080819192b2b08, 0x080808192b080819, 0x080808192b081908,
  2838. 0x080808192b190808, 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b08081919,
  2839. 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, 0x0808082b082b0808,
  2840. 0x0808082b19080819, 0x0808082b19081908, 0x0808082b19190808, 0x0808082b19191919,
  2841. 0x0808082b2b080808, 0x0808082b2b082b2b, 0x0808190808080819, 0x0808190808081908,
  2842. 0x080819080808192b, 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b,
  2843. 0x0808190808191919, 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908,
  2844. 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, 0x0808190819082b08,
  2845. 0x0808190819190819, 0x0808190819191908, 0x080819081919192b, 0x08081908192b0808,
  2846. 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, 0x0808191908080808,
  2847. 0x080819190808082b, 0x0808191908081919, 0x0808191908082b08, 0x0808191908190819,
  2848. 0x0808191908191908, 0x08081919082b0808, 0x0808191919080819, 0x0808191919081908,
  2849. 0x0808191919190808, 0x08081919192b0819, 0x080819192b080808, 0x0808192b08080819,
  2850. 0x0808192b08081908, 0x0808192b08190808, 0x0808192b082b192b, 0x0808192b19080808,
  2851. 0x0808192b1908082b, 0x0808192b2b081908, 0x08082b0808080808, 0x08082b080808082b,
  2852. 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808082b2b, 0x08082b0808190819,
  2853. 0x08082b0808191908, 0x08082b08082b0808, 0x08082b08082b1919, 0x08082b0819080819,
  2854. 0x08082b0819081908, 0x08082b0819190808, 0x08082b0819192b08, 0x08082b082b080808,
  2855. 0x08082b082b2b0808, 0x08082b082b2b2b2b, 0x08082b1908080819, 0x08082b1908081908,
  2856. 0x08082b1908190808, 0x08082b1919080808, 0x08082b192b080819, 0x08082b192b082b19,
  2857. 0x08082b2b08080808, 0x08082b2b082b0808, 0x08082b2b082b2b08, 0x08082b2b2b19192b,
  2858. 0x08082b2b2b2b0808, 0x0819080808080819, 0x0819080808081908, 0x081908080808192b,
  2859. 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, 0x0819080808191919,
  2860. 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, 0x0819080819080808,
  2861. 0x081908081908082b, 0x0819080819081919, 0x0819080819082b08, 0x0819080819190819,
  2862. 0x0819080819191908, 0x08190808192b0808, 0x08190808192b2b2b, 0x081908082b080819,
  2863. 0x081908082b081908, 0x081908082b190808, 0x0819081908080808, 0x081908190808082b,
  2864. 0x0819081908081919, 0x0819081908082b08, 0x0819081908190819, 0x0819081908191908,
  2865. 0x08190819082b0808, 0x0819081919080819, 0x0819081919081908, 0x0819081919190808,
  2866. 0x081908192b080808, 0x081908192b191908, 0x081908192b19192b, 0x0819082b08080819,
  2867. 0x0819082b08081908, 0x0819082b0808192b, 0x0819082b08190808, 0x0819082b19080808,
  2868. 0x0819082b192b0808, 0x0819190808080808, 0x081919080808082b, 0x0819190808081919,
  2869. 0x0819190808082b08, 0x0819190808190819, 0x0819190808191908, 0x08191908082b0808,
  2870. 0x0819190819080819, 0x0819190819081908, 0x0819190819082b19, 0x0819190819190808,
  2871. 0x08191908192b1908, 0x081919082b080808, 0x0819191908080819, 0x0819191908081908,
  2872. 0x0819191908190808, 0x0819191919080808, 0x0819192b08080808, 0x0819192b08191908,
  2873. 0x0819192b19082b19, 0x08192b0808080819, 0x08192b0808081908, 0x08192b0808190808,
  2874. 0x08192b080819082b, 0x08192b0819080808, 0x08192b0819191908, 0x08192b082b08192b,
  2875. 0x08192b1908080808, 0x08192b1908081919, 0x08192b19192b192b, 0x08192b2b19190819,
  2876. 0x08192b2b2b2b2b19, 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919,
  2877. 0x082b080808082b08, 0x082b080808082b2b, 0x082b080808190819, 0x082b080808191908,
  2878. 0x082b0808082b0808, 0x082b080819080819, 0x082b080819081908, 0x082b080819190808,
  2879. 0x082b08082b080808, 0x082b08082b2b0808, 0x082b081908080819, 0x082b081908081908,
  2880. 0x082b081908190808, 0x082b081919080808, 0x082b081919082b08, 0x082b0819192b1919,
  2881. 0x082b082b08080808, 0x082b082b082b082b, 0x082b082b2b080808, 0x082b082b2b2b2b08,
  2882. 0x082b190808080819, 0x082b190808081908, 0x082b190808190808, 0x082b1908082b2b19,
  2883. 0x082b190819080808, 0x082b191908080808, 0x082b191919080819, 0x082b19191919082b,
  2884. 0x082b19192b192b19, 0x082b192b08080819, 0x082b192b08192b2b, 0x082b192b2b2b192b,
  2885. 0x082b2b0808080808, 0x082b2b0808082b08, 0x082b2b0808082b2b, 0x082b2b08082b0808,
  2886. 0x082b2b0819191919, 0x082b2b082b082b08, 0x082b2b082b2b082b, 0x082b2b19192b2b08,
  2887. 0x082b2b192b190808, 0x082b2b2b08082b08, 0x082b2b2b082b0808, 0x082b2b2b2b08082b,
  2888. 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, 0x1908080808081908,
  2889. 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, 0x190808080819082b,
  2890. 0x1908080808191919, 0x1908080808192b08, 0x19080808082b0819, 0x19080808082b1908,
  2891. 0x1908080819080808, 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08,
  2892. 0x1908080819082b2b, 0x1908080819190819, 0x1908080819191908, 0x19080808192b0808,
  2893. 0x19080808192b1919, 0x190808082b080819, 0x190808082b081908, 0x190808082b190808,
  2894. 0x1908081908080808, 0x190808190808082b, 0x1908081908081919, 0x1908081908082b08,
  2895. 0x1908081908190819, 0x1908081908191908, 0x19080819082b0808, 0x1908081919080819,
  2896. 0x1908081919081908, 0x1908081919190808, 0x190808192b080808, 0x190808192b081919,
  2897. 0x190808192b2b082b, 0x1908082b08080819, 0x1908082b08081908, 0x1908082b08190808,
  2898. 0x1908082b0819082b, 0x1908082b082b2b19, 0x1908082b19080808, 0x1908190808080808,
  2899. 0x190819080808082b, 0x1908190808081919, 0x1908190808082b08, 0x1908190808190819,
  2900. 0x1908190808191908, 0x1908190808192b19, 0x19081908082b0808, 0x1908190819080819,
  2901. 0x1908190819081908, 0x1908190819190808, 0x190819082b080808, 0x190819082b191908,
  2902. 0x1908191908080819, 0x1908191908081908, 0x1908191908190808, 0x19081919082b1908,
  2903. 0x1908191919080808, 0x190819192b192b2b, 0x1908192b08080808, 0x1908192b08082b2b,
  2904. 0x1908192b19081908, 0x1908192b19190808, 0x19082b0808080819, 0x19082b0808081908,
  2905. 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, 0x19082b0819191908,
  2906. 0x19082b08192b082b, 0x19082b1908080808, 0x19082b1908190819, 0x19082b1919081908,
  2907. 0x19082b1919190808, 0x19082b19192b2b19, 0x19082b2b08081908, 0x1919080808080808,
  2908. 0x191908080808082b, 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819,
  2909. 0x1919080808191908, 0x19190808082b0808, 0x19190808082b2b08, 0x1919080819080819,
  2910. 0x1919080819081908, 0x1919080819190808, 0x191908082b080808, 0x1919081908080819,
  2911. 0x1919081908081908, 0x1919081908190808, 0x1919081908191919, 0x1919081919080808,
  2912. 0x191908191908082b, 0x1919082b08080808, 0x1919082b19081908, 0x1919082b2b2b2b2b,
  2913. 0x1919190808080819, 0x1919190808081908, 0x1919190808190808, 0x19191908082b0819,
  2914. 0x1919190819080808, 0x19191908192b0808, 0x191919082b080819, 0x191919082b2b0819,
  2915. 0x1919191908080808, 0x1919191908082b08, 0x191919192b080808, 0x191919192b082b08,
  2916. 0x1919192b082b0819, 0x1919192b192b2b08, 0x1919192b2b2b0819, 0x19192b0808080808,
  2917. 0x19192b0808191908, 0x19192b0819080819, 0x19192b0819190808, 0x19192b082b192b19,
  2918. 0x19192b1908192b2b, 0x19192b1919080808, 0x19192b191908082b, 0x19192b2b2b081919,
  2919. 0x192b080808080819, 0x192b080808081908, 0x192b080808190808, 0x192b080819080808,
  2920. 0x192b080819191908, 0x192b0808192b082b, 0x192b08082b08192b, 0x192b08082b2b2b19,
  2921. 0x192b081908080808, 0x192b082b082b1908, 0x192b082b19082b2b, 0x192b082b2b19082b,
  2922. 0x192b190808080808, 0x192b19080819192b, 0x192b191908190808, 0x192b191919080808,
  2923. 0x192b191919081919, 0x192b19192b2b1908, 0x192b2b0808080819, 0x192b2b08192b2b2b,
  2924. 0x192b2b19082b1919, 0x192b2b2b0808192b, 0x192b2b2b19191908, 0x192b2b2b192b082b,
  2925. 0x2b08080808080808, 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08,
  2926. 0x2b08080808190819, 0x2b08080808191908, 0x2b080808082b0808, 0x2b080808082b2b2b,
  2927. 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808082b080808,
  2928. 0x2b0808082b08082b, 0x2b0808082b2b2b08, 0x2b0808082b2b2b2b, 0x2b08081908080819,
  2929. 0x2b08081908081908, 0x2b0808190808192b, 0x2b08081908190808, 0x2b08081919080808,
  2930. 0x2b08081919190819, 0x2b08081919192b19, 0x2b08082b08080808, 0x2b08082b082b0808,
  2931. 0x2b08082b2b080808, 0x2b08082b2b08082b, 0x2b08082b2b2b0808, 0x2b08082b2b2b2b08,
  2932. 0x2b08190808080819, 0x2b08190808081908, 0x2b08190808190808, 0x2b0819080819082b,
  2933. 0x2b08190808191919, 0x2b08190819080808, 0x2b081908192b0808, 0x2b0819082b082b19,
  2934. 0x2b08191908080808, 0x2b08191919081908, 0x2b0819192b2b1919, 0x2b08192b08192b08,
  2935. 0x2b08192b192b2b2b, 0x2b082b0808080808, 0x2b082b0808082b08, 0x2b082b08082b1919,
  2936. 0x2b082b0819192b2b, 0x2b082b082b080808, 0x2b082b082b08082b, 0x2b082b082b2b2b08,
  2937. 0x2b082b190808192b, 0x2b082b2b082b082b, 0x2b082b2b2b080808, 0x2b082b2b2b082b08,
  2938. 0x2b082b2b2b19192b, 0x2b082b2b2b2b2b08, 0x2b19080808080819, 0x2b19080808081908,
  2939. 0x2b19080808190808, 0x2b19080819080808, 0x2b1908081919192b, 0x2b1908082b081908,
  2940. 0x2b19081908080808, 0x2b190819082b082b, 0x2b190819192b1908, 0x2b19082b1919192b,
  2941. 0x2b19082b2b082b19, 0x2b19190808080808, 0x2b19190808081919, 0x2b19190819081908,
  2942. 0x2b19190819190808, 0x2b19190819192b08, 0x2b191919082b2b19, 0x2b1919192b190808,
  2943. 0x2b1919192b19082b, 0x2b19192b19080819, 0x2b192b0819190819, 0x2b192b082b2b192b,
  2944. 0x2b192b1919082b19, 0x2b192b2b08191919, 0x2b192b2b192b0808, 0x2b2b080808080808,
  2945. 0x2b2b08080808082b, 0x2b2b080808082b08, 0x2b2b080808082b2b, 0x2b2b0808082b0808,
  2946. 0x2b2b0808082b2b2b, 0x2b2b08082b2b0808, 0x2b2b081919190819, 0x2b2b081919192b19,
  2947. 0x2b2b08192b2b192b, 0x2b2b082b08080808, 0x2b2b082b0808082b, 0x2b2b082b08082b08,
  2948. 0x2b2b082b082b2b2b, 0x2b2b082b2b080808, 0x2b2b082b2b2b0808, 0x2b2b190819080808,
  2949. 0x2b2b19082b191919, 0x2b2b192b192b1919, 0x2b2b192b2b192b08, 0x2b2b2b0808082b2b,
  2950. 0x2b2b2b08082b0808, 0x2b2b2b08082b082b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b0808,
  2951. 0x2b2b2b082b2b2b08, 0x2b2b2b1908081908, 0x2b2b2b192b081908, 0x2b2b2b192b08192b,
  2952. 0x2b2b2b2b082b2b08, 0x2b2b2b2b082b2b2b, 0x2b2b2b2b2b190819, 0x2b2b2b2b2b2b2b2b,
  2953. };
  2954. static const uint32_t iq3xxs_grid[256] = {
  2955. 0x04040404, 0x04040414, 0x04040424, 0x04040c0c, 0x04040c1c, 0x04040c3e, 0x04041404, 0x04041414,
  2956. 0x04041c0c, 0x04042414, 0x04043e1c, 0x04043e2c, 0x040c040c, 0x040c041c, 0x040c0c04, 0x040c0c14,
  2957. 0x040c140c, 0x040c142c, 0x040c1c04, 0x040c1c14, 0x040c240c, 0x040c2c24, 0x040c3e04, 0x04140404,
  2958. 0x04140414, 0x04140424, 0x04140c0c, 0x04141404, 0x04141414, 0x04141c0c, 0x04141c1c, 0x04141c3e,
  2959. 0x04142c0c, 0x04142c3e, 0x04143e2c, 0x041c040c, 0x041c043e, 0x041c0c04, 0x041c0c14, 0x041c142c,
  2960. 0x041c3e04, 0x04240c1c, 0x04241c3e, 0x04242424, 0x04242c3e, 0x04243e1c, 0x04243e2c, 0x042c040c,
  2961. 0x042c043e, 0x042c1c14, 0x042c2c14, 0x04341c2c, 0x04343424, 0x043e0c04, 0x043e0c24, 0x043e0c34,
  2962. 0x043e241c, 0x043e340c, 0x0c04040c, 0x0c04041c, 0x0c040c04, 0x0c040c14, 0x0c04140c, 0x0c04141c,
  2963. 0x0c041c04, 0x0c041c14, 0x0c041c24, 0x0c04243e, 0x0c042c04, 0x0c0c0404, 0x0c0c0414, 0x0c0c0c0c,
  2964. 0x0c0c1404, 0x0c0c1414, 0x0c14040c, 0x0c14041c, 0x0c140c04, 0x0c140c14, 0x0c14140c, 0x0c141c04,
  2965. 0x0c143e14, 0x0c1c0404, 0x0c1c0414, 0x0c1c1404, 0x0c1c1c0c, 0x0c1c2434, 0x0c1c3434, 0x0c24040c,
  2966. 0x0c24042c, 0x0c242c04, 0x0c2c1404, 0x0c2c1424, 0x0c2c2434, 0x0c2c3e0c, 0x0c34042c, 0x0c3e1414,
  2967. 0x0c3e2404, 0x14040404, 0x14040414, 0x14040c0c, 0x14040c1c, 0x14041404, 0x14041414, 0x14041434,
  2968. 0x14041c0c, 0x14042414, 0x140c040c, 0x140c041c, 0x140c042c, 0x140c0c04, 0x140c0c14, 0x140c140c,
  2969. 0x140c1c04, 0x140c341c, 0x140c343e, 0x140c3e04, 0x14140404, 0x14140414, 0x14140c0c, 0x14140c3e,
  2970. 0x14141404, 0x14141414, 0x14141c3e, 0x14142404, 0x14142c2c, 0x141c040c, 0x141c0c04, 0x141c0c24,
  2971. 0x141c3e04, 0x141c3e24, 0x14241c2c, 0x14242c1c, 0x142c041c, 0x142c143e, 0x142c240c, 0x142c3e24,
  2972. 0x143e040c, 0x143e041c, 0x143e0c34, 0x143e242c, 0x1c04040c, 0x1c040c04, 0x1c040c14, 0x1c04140c,
  2973. 0x1c04141c, 0x1c042c04, 0x1c04342c, 0x1c043e14, 0x1c0c0404, 0x1c0c0414, 0x1c0c1404, 0x1c0c1c0c,
  2974. 0x1c0c2424, 0x1c0c2434, 0x1c14040c, 0x1c14041c, 0x1c140c04, 0x1c14142c, 0x1c142c14, 0x1c143e14,
  2975. 0x1c1c0c0c, 0x1c1c1c1c, 0x1c241c04, 0x1c24243e, 0x1c243e14, 0x1c2c0404, 0x1c2c0434, 0x1c2c1414,
  2976. 0x1c2c2c2c, 0x1c340c24, 0x1c341c34, 0x1c34341c, 0x1c3e1c1c, 0x1c3e3404, 0x24040424, 0x24040c3e,
  2977. 0x24041c2c, 0x24041c3e, 0x24042c1c, 0x24042c3e, 0x240c3e24, 0x24141404, 0x24141c3e, 0x24142404,
  2978. 0x24143404, 0x24143434, 0x241c043e, 0x241c242c, 0x24240424, 0x24242c0c, 0x24243424, 0x242c142c,
  2979. 0x242c241c, 0x242c3e04, 0x243e042c, 0x243e0c04, 0x243e0c14, 0x243e1c04, 0x2c040c14, 0x2c04240c,
  2980. 0x2c043e04, 0x2c0c0404, 0x2c0c0434, 0x2c0c1434, 0x2c0c2c2c, 0x2c140c24, 0x2c141c14, 0x2c143e14,
  2981. 0x2c1c0414, 0x2c1c2c1c, 0x2c240c04, 0x2c24141c, 0x2c24143e, 0x2c243e14, 0x2c2c0414, 0x2c2c1c0c,
  2982. 0x2c342c04, 0x2c3e1424, 0x2c3e2414, 0x34041424, 0x34042424, 0x34042434, 0x34043424, 0x340c140c,
  2983. 0x340c340c, 0x34140c3e, 0x34143424, 0x341c1c04, 0x341c1c34, 0x34242424, 0x342c042c, 0x342c2c14,
  2984. 0x34341c1c, 0x343e041c, 0x343e140c, 0x3e04041c, 0x3e04042c, 0x3e04043e, 0x3e040c04, 0x3e041c14,
  2985. 0x3e042c14, 0x3e0c1434, 0x3e0c2404, 0x3e140c14, 0x3e14242c, 0x3e142c14, 0x3e1c0404, 0x3e1c0c2c,
  2986. 0x3e1c1c1c, 0x3e1c3404, 0x3e24140c, 0x3e24240c, 0x3e2c0404, 0x3e2c0414, 0x3e2c1424, 0x3e341c04,
  2987. };
  2988. static const uint8_t ksigns_iq2xs[128] = {
  2989. 0, 129, 130, 3, 132, 5, 6, 135, 136, 9, 10, 139, 12, 141, 142, 15,
  2990. 144, 17, 18, 147, 20, 149, 150, 23, 24, 153, 154, 27, 156, 29, 30, 159,
  2991. 160, 33, 34, 163, 36, 165, 166, 39, 40, 169, 170, 43, 172, 45, 46, 175,
  2992. 48, 177, 178, 51, 180, 53, 54, 183, 184, 57, 58, 187, 60, 189, 190, 63,
  2993. 192, 65, 66, 195, 68, 197, 198, 71, 72, 201, 202, 75, 204, 77, 78, 207,
  2994. 80, 209, 210, 83, 212, 85, 86, 215, 216, 89, 90, 219, 92, 221, 222, 95,
  2995. 96, 225, 226, 99, 228, 101, 102, 231, 232, 105, 106, 235, 108, 237, 238, 111,
  2996. 240, 113, 114, 243, 116, 245, 246, 119, 120, 249, 250, 123, 252, 125, 126, 255,
  2997. };
  2998. static const uint8_t kmask_iq2xs[8] = {1, 2, 4, 8, 16, 32, 64, 128};
  2999. void dequantize_row_iq2_xxs(const block_iq2_xxs * restrict x, float * restrict y, int k) {
  3000. assert(k % QK_K == 0);
  3001. const int nb = k / QK_K;
  3002. uint32_t aux32[2];
  3003. const uint8_t * aux8 = (const uint8_t *)aux32;
  3004. for (int i = 0; i < nb; i++) {
  3005. const float d = GGML_FP16_TO_FP32(x[i].d);
  3006. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  3007. memcpy(aux32, x[i].qs + 4*ib32, 2*sizeof(uint32_t));
  3008. const float db = d * (0.5f + (aux32[1] >> 28)) * 0.25f;
  3009. for (int l = 0; l < 4; ++l) {
  3010. const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
  3011. const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
  3012. for (int j = 0; j < 8; ++j) {
  3013. y[j] = db * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
  3014. }
  3015. y += 8;
  3016. }
  3017. }
  3018. }
  3019. }
  3020. // ====================== 2.3125 bpw (de)-quantization
  3021. void dequantize_row_iq2_xs(const block_iq2_xs * restrict x, float * restrict y, int k) {
  3022. assert(k % QK_K == 0);
  3023. const int nb = k / QK_K;
  3024. float db[2];
  3025. for (int i = 0; i < nb; i++) {
  3026. const float d = GGML_FP16_TO_FP32(x[i].d);
  3027. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  3028. db[0] = d * (0.5f + (x[i].scales[ib32] & 0xf)) * 0.25f;
  3029. db[1] = d * (0.5f + (x[i].scales[ib32] >> 4)) * 0.25f;
  3030. for (int l = 0; l < 4; ++l) {
  3031. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (x[i].qs[4*ib32 + l] & 511));
  3032. const uint8_t signs = ksigns_iq2xs[x[i].qs[4*ib32 + l] >> 9];
  3033. for (int j = 0; j < 8; ++j) {
  3034. y[j] = db[l/2] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
  3035. }
  3036. y += 8;
  3037. }
  3038. }
  3039. }
  3040. }
  3041. // ====================== 3.0625 bpw (de)-quantization
  3042. void dequantize_row_iq3_xxs(const block_iq3_xxs * restrict x, float * restrict y, int k) {
  3043. assert(k % QK_K == 0);
  3044. const int nb = k / QK_K;
  3045. uint32_t aux32;
  3046. for (int i = 0; i < nb; i++) {
  3047. const float d = GGML_FP16_TO_FP32(x[i].d);
  3048. const uint8_t * qs = x[i].qs;
  3049. const uint8_t * scales_and_signs = qs + QK_K/4;
  3050. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  3051. memcpy(&aux32, scales_and_signs + 4*ib32, sizeof(uint32_t));
  3052. const float db = d * (0.5f + (aux32 >> 28)) * 0.5f;
  3053. for (int l = 0; l < 4; ++l) {
  3054. const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
  3055. const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + qs[2*l+0]);
  3056. const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + qs[2*l+1]);
  3057. for (int j = 0; j < 4; ++j) {
  3058. y[j+0] = db * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f);
  3059. y[j+4] = db * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f);
  3060. }
  3061. y += 8;
  3062. }
  3063. qs += 8;
  3064. }
  3065. }
  3066. }
  3067. //===================================== Q8_K ==============================================
  3068. void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k) {
  3069. assert(k % QK_K == 0);
  3070. const int nb = k / QK_K;
  3071. for (int i = 0; i < nb; i++) {
  3072. float max = 0;
  3073. float amax = 0;
  3074. for (int j = 0; j < QK_K; ++j) {
  3075. float ax = fabsf(x[j]);
  3076. if (ax > amax) {
  3077. amax = ax; max = x[j];
  3078. }
  3079. }
  3080. if (!amax) {
  3081. y[i].d = 0;
  3082. memset(y[i].qs, 0, QK_K);
  3083. x += QK_K;
  3084. continue;
  3085. }
  3086. //const float iscale = -128.f/max;
  3087. // We need this change for IQ2_XXS, else the AVX implementation becomes very awkward
  3088. const float iscale = -127.f/max;
  3089. for (int j = 0; j < QK_K; ++j) {
  3090. int v = nearest_int(iscale*x[j]);
  3091. y[i].qs[j] = MIN(127, v);
  3092. }
  3093. for (int j = 0; j < QK_K/16; ++j) {
  3094. int sum = 0;
  3095. for (int ii = 0; ii < 16; ++ii) {
  3096. sum += y[i].qs[j*16 + ii];
  3097. }
  3098. y[i].bsums[j] = sum;
  3099. }
  3100. y[i].d = 1/iscale;
  3101. x += QK_K;
  3102. }
  3103. }
  3104. void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k) {
  3105. assert(k % QK_K == 0);
  3106. const int nb = k / QK_K;
  3107. for (int i = 0; i < nb; i++) {
  3108. for (int j = 0; j < QK_K; ++j) {
  3109. *y++ = x[i].d * x[i].qs[j];
  3110. }
  3111. }
  3112. }
  3113. void quantize_row_q8_K(const float * restrict x, void * restrict y, int k) {
  3114. quantize_row_q8_K_reference(x, y, k);
  3115. }
  3116. //===================================== Dot ptoducts =================================
  3117. //
  3118. // Helper functions
  3119. //
  3120. #if __AVX__ || __AVX2__ || __AVX512F__
  3121. // shuffles to pick the required scales in dot products
  3122. static inline __m256i get_scale_shuffle_q3k(int i) {
  3123. static const uint8_t k_shuffle[128] = {
  3124. 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
  3125. 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
  3126. 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
  3127. 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,
  3128. };
  3129. return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
  3130. }
  3131. static inline __m256i get_scale_shuffle_k4(int i) {
  3132. static const uint8_t k_shuffle[256] = {
  3133. 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
  3134. 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
  3135. 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5,
  3136. 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
  3137. 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9,
  3138. 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
  3139. 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,
  3140. 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15
  3141. };
  3142. return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
  3143. }
  3144. static inline __m128i get_scale_shuffle(int i) {
  3145. static const uint8_t k_shuffle[128] = {
  3146. 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
  3147. 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
  3148. 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
  3149. 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
  3150. 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
  3151. 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11,
  3152. 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13,
  3153. 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15
  3154. };
  3155. return _mm_loadu_si128((const __m128i*)k_shuffle + i);
  3156. }
  3157. #endif
  3158. void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  3159. const int qk = QK8_0;
  3160. const int nb = n / qk;
  3161. assert(n % qk == 0);
  3162. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3163. assert((nrc == 2) || (nrc == 1));
  3164. #else
  3165. assert(nrc == 1);
  3166. #endif
  3167. UNUSED(nrc);
  3168. UNUSED(bx);
  3169. UNUSED(by);
  3170. UNUSED(bs);
  3171. const block_q4_0 * restrict x = vx;
  3172. const block_q8_0 * restrict y = vy;
  3173. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3174. if (nrc == 2) {
  3175. const block_q4_0 * restrict vx0 = vx;
  3176. const block_q4_0 * restrict vx1 = vx + bx;
  3177. const block_q8_0 * restrict vy0 = vy;
  3178. const block_q8_0 * restrict vy1 = vy + by;
  3179. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3180. for (int i = 0; i < nb; i++) {
  3181. const block_q4_0 * restrict b_x0 = &vx0[i];
  3182. const block_q4_0 * restrict b_x1 = &vx1[i];
  3183. const block_q8_0 * restrict b_y0 = &vy0[i];
  3184. const block_q8_0 * restrict b_y1 = &vy1[i];
  3185. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3186. const int8x16_t s8b = vdupq_n_s8(0x8);
  3187. const uint8x16_t v0_0 = vld1q_u8(b_x0->qs);
  3188. const uint8x16_t v0_1 = vld1q_u8(b_x1->qs);
  3189. // 4-bit -> 8-bit
  3190. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3191. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3192. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3193. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3194. // sub 8
  3195. const int8x16_t x0_l = vsubq_s8(v0_0l, s8b);
  3196. const int8x16_t x0_h = vsubq_s8(v0_0h, s8b);
  3197. const int8x16_t x1_l = vsubq_s8(v0_1l, s8b);
  3198. const int8x16_t x1_h = vsubq_s8(v0_1h, s8b);
  3199. // load y
  3200. const int8x16_t y0_l = vld1q_s8(b_y0->qs);
  3201. const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
  3202. const int8x16_t y1_l = vld1q_s8(b_y1->qs);
  3203. const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
  3204. float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d),
  3205. GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d),
  3206. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d),
  3207. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)};
  3208. int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  3209. int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  3210. int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  3211. int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  3212. int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  3213. int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  3214. int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  3215. int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  3216. sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
  3217. l1, r1)), l2, r2)), l3, r3))), scale);
  3218. }
  3219. float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
  3220. float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
  3221. vst1_f32(s, vget_low_f32(sumv2));
  3222. vst1_f32(s + bs, vget_high_f32(sumv2));
  3223. return;
  3224. }
  3225. #endif
  3226. #if defined(__ARM_NEON)
  3227. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3228. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3229. assert(nb % 2 == 0); // TODO: handle odd nb
  3230. for (int i = 0; i < nb; i += 2) {
  3231. const block_q4_0 * restrict x0 = &x[i + 0];
  3232. const block_q4_0 * restrict x1 = &x[i + 1];
  3233. const block_q8_0 * restrict y0 = &y[i + 0];
  3234. const block_q8_0 * restrict y1 = &y[i + 1];
  3235. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3236. const int8x16_t s8b = vdupq_n_s8(0x8);
  3237. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3238. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3239. // 4-bit -> 8-bit
  3240. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3241. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3242. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3243. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3244. // sub 8
  3245. const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
  3246. const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
  3247. const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
  3248. const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
  3249. // load y
  3250. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3251. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3252. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3253. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3254. // dot product into int32x4_t
  3255. const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
  3256. const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
  3257. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  3258. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  3259. }
  3260. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  3261. #elif defined(__AVX2__)
  3262. // Initialize accumulator with zeros
  3263. __m256 acc = _mm256_setzero_ps();
  3264. // Main loop
  3265. for (int i = 0; i < nb; ++i) {
  3266. /* Compute combined scale for the block */
  3267. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  3268. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  3269. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  3270. const __m256i off = _mm256_set1_epi8( 8 );
  3271. bx = _mm256_sub_epi8( bx, off );
  3272. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3273. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  3274. /* Multiply q with scale and accumulate */
  3275. acc = _mm256_fmadd_ps( d, q, acc );
  3276. }
  3277. *s = hsum_float_8(acc);
  3278. #elif defined(__AVX__)
  3279. // Initialize accumulator with zeros
  3280. __m256 acc = _mm256_setzero_ps();
  3281. // Main loop
  3282. for (int i = 0; i < nb; ++i) {
  3283. // Compute combined scale for the block
  3284. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  3285. const __m128i lowMask = _mm_set1_epi8(0xF);
  3286. const __m128i off = _mm_set1_epi8(8);
  3287. const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
  3288. __m128i bx = _mm_and_si128(lowMask, tmp);
  3289. __m128i by = _mm_loadu_si128((const __m128i *)y[i].qs);
  3290. bx = _mm_sub_epi8(bx, off);
  3291. const __m128i i32_0 = mul_sum_i8_pairs(bx, by);
  3292. bx = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
  3293. by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  3294. bx = _mm_sub_epi8(bx, off);
  3295. const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
  3296. // Convert int32_t to float
  3297. __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
  3298. // Apply the scale, and accumulate
  3299. acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
  3300. }
  3301. *s = hsum_float_8(acc);
  3302. #elif defined(__SSSE3__)
  3303. // set constants
  3304. const __m128i lowMask = _mm_set1_epi8(0xF);
  3305. const __m128i off = _mm_set1_epi8(8);
  3306. // Initialize accumulator with zeros
  3307. __m128 acc_0 = _mm_setzero_ps();
  3308. __m128 acc_1 = _mm_setzero_ps();
  3309. __m128 acc_2 = _mm_setzero_ps();
  3310. __m128 acc_3 = _mm_setzero_ps();
  3311. // First round without accumulation
  3312. {
  3313. _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
  3314. _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
  3315. // Compute combined scale for the block 0 and 1
  3316. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) );
  3317. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
  3318. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  3319. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
  3320. bx_0 = _mm_sub_epi8(bx_0, off);
  3321. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  3322. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  3323. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
  3324. bx_1 = _mm_sub_epi8(bx_1, off);
  3325. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  3326. _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
  3327. _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
  3328. // Compute combined scale for the block 2 and 3
  3329. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) );
  3330. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
  3331. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  3332. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
  3333. bx_2 = _mm_sub_epi8(bx_2, off);
  3334. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  3335. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  3336. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
  3337. bx_3 = _mm_sub_epi8(bx_3, off);
  3338. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  3339. // Convert int32_t to float
  3340. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  3341. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  3342. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  3343. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  3344. // Apply the scale
  3345. acc_0 = _mm_mul_ps( d_0_1, p0 );
  3346. acc_1 = _mm_mul_ps( d_0_1, p1 );
  3347. acc_2 = _mm_mul_ps( d_2_3, p2 );
  3348. acc_3 = _mm_mul_ps( d_2_3, p3 );
  3349. }
  3350. assert(nb % 2 == 0); // TODO: handle odd nb
  3351. // Main loop
  3352. for (int i = 2; i < nb; i+=2) {
  3353. _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
  3354. _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
  3355. // Compute combined scale for the block 0 and 1
  3356. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  3357. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
  3358. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  3359. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
  3360. bx_0 = _mm_sub_epi8(bx_0, off);
  3361. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  3362. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  3363. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  3364. bx_1 = _mm_sub_epi8(bx_1, off);
  3365. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  3366. _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
  3367. _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
  3368. // Compute combined scale for the block 2 and 3
  3369. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) );
  3370. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
  3371. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  3372. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
  3373. bx_2 = _mm_sub_epi8(bx_2, off);
  3374. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  3375. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  3376. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
  3377. bx_3 = _mm_sub_epi8(bx_3, off);
  3378. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  3379. // Convert int32_t to float
  3380. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  3381. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  3382. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  3383. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  3384. // Apply the scale
  3385. __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
  3386. __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
  3387. __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
  3388. __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
  3389. // Acummulate
  3390. acc_0 = _mm_add_ps(p0_d, acc_0);
  3391. acc_1 = _mm_add_ps(p1_d, acc_1);
  3392. acc_2 = _mm_add_ps(p2_d, acc_2);
  3393. acc_3 = _mm_add_ps(p3_d, acc_3);
  3394. }
  3395. *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
  3396. #elif defined(__riscv_v_intrinsic)
  3397. float sumf = 0.0;
  3398. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3399. for (int i = 0; i < nb; i++) {
  3400. // load elements
  3401. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3402. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3403. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3404. // mask and store lower part of x, and then upper part
  3405. vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3406. vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3407. vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3408. vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3409. // subtract offset
  3410. vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 8, vl);
  3411. vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 8, vl);
  3412. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3413. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3414. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3415. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3416. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3417. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3418. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  3419. }
  3420. *s = sumf;
  3421. #else
  3422. // scalar
  3423. float sumf = 0.0;
  3424. for (int i = 0; i < nb; i++) {
  3425. int sumi = 0;
  3426. for (int j = 0; j < qk/2; ++j) {
  3427. const int v0 = (x[i].qs[j] & 0x0F) - 8;
  3428. const int v1 = (x[i].qs[j] >> 4) - 8;
  3429. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  3430. }
  3431. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  3432. }
  3433. *s = sumf;
  3434. #endif
  3435. }
  3436. void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  3437. const int qk = QK8_1;
  3438. const int nb = n / qk;
  3439. assert(n % qk == 0);
  3440. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3441. assert((nrc == 2) || (nrc == 1));
  3442. #else
  3443. assert(nrc == 1);
  3444. #endif
  3445. UNUSED(nrc);
  3446. UNUSED(bx);
  3447. UNUSED(by);
  3448. UNUSED(bs);
  3449. const block_q4_1 * restrict x = vx;
  3450. const block_q8_1 * restrict y = vy;
  3451. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3452. if (nrc == 2) {
  3453. const block_q4_1 * restrict vx0 = vx;
  3454. const block_q4_1 * restrict vx1 = vx + bx;
  3455. const block_q8_1 * restrict vy0 = vy;
  3456. const block_q8_1 * restrict vy1 = vy + by;
  3457. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3458. float32x4_t summs0 = vdupq_n_f32(0.0f);
  3459. for (int i = 0; i < nb; i++) {
  3460. const block_q4_1 * restrict b_x0 = &vx0[i];
  3461. const block_q4_1 * restrict b_x1 = &vx1[i];
  3462. const block_q8_1 * restrict b_y0 = &vy0[i];
  3463. const block_q8_1 * restrict b_y1 = &vy1[i];
  3464. float32x4_t summs_t = {GGML_FP16_TO_FP32(b_x0->m) * b_y0->s,
  3465. GGML_FP16_TO_FP32(b_x1->m) * b_y0->s,
  3466. GGML_FP16_TO_FP32(b_x0->m) * b_y1->s,
  3467. GGML_FP16_TO_FP32(b_x1->m) * b_y1->s};
  3468. summs0 += summs_t;
  3469. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3470. const uint8x16_t v0_0 = vld1q_u8(b_x0->qs);
  3471. const uint8x16_t v0_1 = vld1q_u8(b_x1->qs);
  3472. // 4-bit -> 8-bit
  3473. const int8x16_t x0_l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3474. const int8x16_t x0_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3475. const int8x16_t x1_l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3476. const int8x16_t x1_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3477. // load y
  3478. const int8x16_t y0_l = vld1q_s8(b_y0->qs);
  3479. const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
  3480. const int8x16_t y1_l = vld1q_s8(b_y1->qs);
  3481. const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
  3482. // mmla into int32x4_t
  3483. float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d),
  3484. GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d),
  3485. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d),
  3486. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)};
  3487. int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  3488. int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  3489. int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  3490. int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  3491. int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  3492. int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  3493. int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  3494. int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  3495. sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
  3496. l1, r1)), l2, r2)), l3, r3))), scale);
  3497. }
  3498. float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
  3499. float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
  3500. sumv2 = sumv2 + summs0;
  3501. vst1_f32(s, vget_low_f32(sumv2));
  3502. vst1_f32(s + bs, vget_high_f32(sumv2));
  3503. return;
  3504. }
  3505. #endif
  3506. // TODO: add WASM SIMD
  3507. #if defined(__ARM_NEON)
  3508. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3509. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3510. float summs = 0;
  3511. assert(nb % 2 == 0); // TODO: handle odd nb
  3512. for (int i = 0; i < nb; i += 2) {
  3513. const block_q4_1 * restrict x0 = &x[i + 0];
  3514. const block_q4_1 * restrict x1 = &x[i + 1];
  3515. const block_q8_1 * restrict y0 = &y[i + 0];
  3516. const block_q8_1 * restrict y1 = &y[i + 1];
  3517. summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s;
  3518. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3519. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3520. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3521. // 4-bit -> 8-bit
  3522. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3523. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3524. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3525. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3526. // load y
  3527. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3528. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3529. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3530. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3531. // dot product into int32x4_t
  3532. const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
  3533. const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
  3534. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d);
  3535. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d);
  3536. }
  3537. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
  3538. #elif defined(__AVX2__) || defined(__AVX__)
  3539. // Initialize accumulator with zeros
  3540. __m256 acc = _mm256_setzero_ps();
  3541. float summs = 0;
  3542. // Main loop
  3543. for (int i = 0; i < nb; ++i) {
  3544. const float d0 = GGML_FP16_TO_FP32(x[i].d);
  3545. const float d1 = y[i].d;
  3546. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  3547. const __m256 d0v = _mm256_set1_ps( d0 );
  3548. const __m256 d1v = _mm256_set1_ps( d1 );
  3549. // Compute combined scales
  3550. const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
  3551. // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
  3552. const __m256i bx = bytes_from_nibbles_32(x[i].qs);
  3553. const __m256i by = _mm256_loadu_si256( (const __m256i *)y[i].qs );
  3554. const __m256 xy = mul_sum_us8_pairs_float(bx, by);
  3555. // Accumulate d0*d1*x*y
  3556. #if defined(__AVX2__)
  3557. acc = _mm256_fmadd_ps( d0d1, xy, acc );
  3558. #else
  3559. acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc );
  3560. #endif
  3561. }
  3562. *s = hsum_float_8(acc) + summs;
  3563. #elif defined(__riscv_v_intrinsic)
  3564. float sumf = 0.0;
  3565. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3566. for (int i = 0; i < nb; i++) {
  3567. // load elements
  3568. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3569. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3570. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3571. // mask and store lower part of x, and then upper part
  3572. vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3573. vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3574. vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3575. vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3576. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3577. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3578. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3579. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3580. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3581. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3582. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  3583. }
  3584. *s = sumf;
  3585. #else
  3586. // scalar
  3587. float sumf = 0.0;
  3588. for (int i = 0; i < nb; i++) {
  3589. int sumi = 0;
  3590. for (int j = 0; j < qk/2; ++j) {
  3591. const int v0 = (x[i].qs[j] & 0x0F);
  3592. const int v1 = (x[i].qs[j] >> 4);
  3593. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  3594. }
  3595. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  3596. }
  3597. *s = sumf;
  3598. #endif
  3599. }
  3600. void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  3601. const int qk = QK8_0;
  3602. const int nb = n / qk;
  3603. assert(n % qk == 0);
  3604. assert(qk == QK5_0);
  3605. assert(nrc == 1);
  3606. UNUSED(nrc);
  3607. UNUSED(bx);
  3608. UNUSED(by);
  3609. UNUSED(bs);
  3610. const block_q5_0 * restrict x = vx;
  3611. const block_q8_0 * restrict y = vy;
  3612. #if defined(__ARM_NEON)
  3613. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3614. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3615. uint32_t qh0;
  3616. uint32_t qh1;
  3617. uint64_t tmp0[4];
  3618. uint64_t tmp1[4];
  3619. assert(nb % 2 == 0); // TODO: handle odd nb
  3620. for (int i = 0; i < nb; i += 2) {
  3621. const block_q5_0 * restrict x0 = &x[i];
  3622. const block_q5_0 * restrict x1 = &x[i + 1];
  3623. const block_q8_0 * restrict y0 = &y[i];
  3624. const block_q8_0 * restrict y1 = &y[i + 1];
  3625. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3626. // extract the 5th bit via lookup table ((!b) << 4)
  3627. memcpy(&qh0, x0->qh, sizeof(qh0));
  3628. memcpy(&qh1, x1->qh, sizeof(qh1));
  3629. tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
  3630. tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
  3631. tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
  3632. tmp0[3] = table_b2b_1[(qh0 >> 24) ];
  3633. tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
  3634. tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
  3635. tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
  3636. tmp1[3] = table_b2b_1[(qh1 >> 24) ];
  3637. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  3638. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  3639. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  3640. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  3641. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3642. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3643. // 4-bit -> 8-bit
  3644. int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3645. int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3646. int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3647. int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3648. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  3649. const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
  3650. const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
  3651. const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
  3652. const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
  3653. // load y
  3654. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3655. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3656. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3657. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3658. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  3659. ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  3660. ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  3661. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  3662. ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  3663. ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  3664. }
  3665. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  3666. #elif defined(__wasm_simd128__)
  3667. v128_t sumv = wasm_f32x4_splat(0.0f);
  3668. uint32_t qh;
  3669. uint64_t tmp[4];
  3670. // TODO: check if unrolling this is better
  3671. for (int i = 0; i < nb; ++i) {
  3672. const block_q5_0 * restrict x0 = &x[i];
  3673. const block_q8_0 * restrict y0 = &y[i];
  3674. const v128_t m4b = wasm_i8x16_splat(0x0F);
  3675. // extract the 5th bit
  3676. memcpy(&qh, x0->qh, sizeof(qh));
  3677. tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
  3678. tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
  3679. tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
  3680. tmp[3] = table_b2b_1[(qh >> 24) ];
  3681. const v128_t qhl = wasm_v128_load(tmp + 0);
  3682. const v128_t qhh = wasm_v128_load(tmp + 2);
  3683. const v128_t v0 = wasm_v128_load(x0->qs);
  3684. // 4-bit -> 8-bit
  3685. const v128_t v0l = wasm_v128_and (v0, m4b);
  3686. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  3687. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  3688. const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
  3689. const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
  3690. // load y
  3691. const v128_t v1l = wasm_v128_load(y0->qs);
  3692. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  3693. // int8x16 -> int16x8
  3694. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  3695. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  3696. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  3697. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  3698. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  3699. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  3700. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  3701. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  3702. // dot product
  3703. sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
  3704. wasm_i32x4_add(
  3705. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  3706. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  3707. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  3708. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  3709. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
  3710. }
  3711. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  3712. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
  3713. #elif defined(__AVX2__)
  3714. // Initialize accumulator with zeros
  3715. __m256 acc = _mm256_setzero_ps();
  3716. // Main loop
  3717. for (int i = 0; i < nb; i++) {
  3718. /* Compute combined scale for the block */
  3719. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  3720. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  3721. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3722. bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
  3723. bx = _mm256_or_si256(bx, bxhi);
  3724. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3725. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  3726. /* Multiply q with scale and accumulate */
  3727. acc = _mm256_fmadd_ps(d, q, acc);
  3728. }
  3729. *s = hsum_float_8(acc);
  3730. #elif defined(__AVX__)
  3731. // Initialize accumulator with zeros
  3732. __m256 acc = _mm256_setzero_ps();
  3733. __m128i mask = _mm_set1_epi8((char)0xF0);
  3734. // Main loop
  3735. for (int i = 0; i < nb; i++) {
  3736. /* Compute combined scale for the block */
  3737. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  3738. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  3739. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3740. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  3741. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  3742. bxhil = _mm_andnot_si128(bxhil, mask);
  3743. bxhih = _mm_andnot_si128(bxhih, mask);
  3744. __m128i bxl = _mm256_castsi256_si128(bx);
  3745. __m128i bxh = _mm256_extractf128_si256(bx, 1);
  3746. bxl = _mm_or_si128(bxl, bxhil);
  3747. bxh = _mm_or_si128(bxh, bxhih);
  3748. bx = MM256_SET_M128I(bxh, bxl);
  3749. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3750. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  3751. /* Multiply q with scale and accumulate */
  3752. acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
  3753. }
  3754. *s = hsum_float_8(acc);
  3755. #elif defined(__riscv_v_intrinsic)
  3756. float sumf = 0.0;
  3757. uint32_t qh;
  3758. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3759. // These temporary registers are for masking and shift operations
  3760. vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
  3761. vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl);
  3762. vuint32m2_t vt_3 = __riscv_vsll_vx_u32m2(vt_2, 16, vl);
  3763. vuint32m2_t vt_4 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
  3764. for (int i = 0; i < nb; i++) {
  3765. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  3766. // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  3767. vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(vt_2, qh, vl);
  3768. vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(xha_0, vt_1, vl);
  3769. vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
  3770. // ((qh & (1u << (j + 16))) >> (j + 12));
  3771. vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(vt_3, qh, vl);
  3772. vuint32m2_t xhl_1 = __riscv_vsrl_vv_u32m2(xha_1, vt_4, vl);
  3773. // narrowing
  3774. vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xhl_0, vl);
  3775. vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
  3776. vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xhl_1, vl);
  3777. vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
  3778. // load
  3779. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3780. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3781. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3782. vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3783. vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3784. vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
  3785. vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
  3786. vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3787. vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3788. vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 16, vl);
  3789. vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 16, vl);
  3790. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3791. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3792. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3793. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3794. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3795. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3796. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  3797. }
  3798. *s = sumf;
  3799. #else
  3800. // scalar
  3801. float sumf = 0.0;
  3802. for (int i = 0; i < nb; i++) {
  3803. uint32_t qh;
  3804. memcpy(&qh, x[i].qh, sizeof(qh));
  3805. int sumi = 0;
  3806. for (int j = 0; j < qk/2; ++j) {
  3807. const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  3808. const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
  3809. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  3810. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  3811. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  3812. }
  3813. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  3814. }
  3815. *s = sumf;
  3816. #endif
  3817. }
  3818. void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  3819. const int qk = QK8_1;
  3820. const int nb = n / qk;
  3821. assert(n % qk == 0);
  3822. assert(qk == QK5_1);
  3823. assert(nrc == 1);
  3824. UNUSED(nrc);
  3825. UNUSED(bx);
  3826. UNUSED(by);
  3827. UNUSED(bs);
  3828. const block_q5_1 * restrict x = vx;
  3829. const block_q8_1 * restrict y = vy;
  3830. #if defined(__ARM_NEON)
  3831. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3832. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3833. float summs0 = 0.0f;
  3834. float summs1 = 0.0f;
  3835. uint32_t qh0;
  3836. uint32_t qh1;
  3837. uint64_t tmp0[4];
  3838. uint64_t tmp1[4];
  3839. assert(nb % 2 == 0); // TODO: handle odd nb
  3840. for (int i = 0; i < nb; i += 2) {
  3841. const block_q5_1 * restrict x0 = &x[i];
  3842. const block_q5_1 * restrict x1 = &x[i + 1];
  3843. const block_q8_1 * restrict y0 = &y[i];
  3844. const block_q8_1 * restrict y1 = &y[i + 1];
  3845. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3846. summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s;
  3847. summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s;
  3848. // extract the 5th bit via lookup table ((b) << 4)
  3849. memcpy(&qh0, x0->qh, sizeof(qh0));
  3850. memcpy(&qh1, x1->qh, sizeof(qh1));
  3851. tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
  3852. tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
  3853. tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
  3854. tmp0[3] = table_b2b_0[(qh0 >> 24) ];
  3855. tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
  3856. tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
  3857. tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
  3858. tmp1[3] = table_b2b_0[(qh1 >> 24) ];
  3859. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  3860. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  3861. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  3862. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  3863. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3864. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3865. // 4-bit -> 8-bit
  3866. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3867. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3868. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3869. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3870. // add high bit
  3871. const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
  3872. const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
  3873. const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
  3874. const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
  3875. // load y
  3876. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3877. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3878. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3879. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3880. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  3881. ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  3882. ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d);
  3883. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  3884. ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  3885. ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d);
  3886. }
  3887. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
  3888. #elif defined(__wasm_simd128__)
  3889. v128_t sumv = wasm_f32x4_splat(0.0f);
  3890. float summs = 0.0f;
  3891. uint32_t qh;
  3892. uint64_t tmp[4];
  3893. // TODO: check if unrolling this is better
  3894. for (int i = 0; i < nb; ++i) {
  3895. const block_q5_1 * restrict x0 = &x[i];
  3896. const block_q8_1 * restrict y0 = &y[i];
  3897. summs += GGML_FP16_TO_FP32(x0->m) * y0->s;
  3898. const v128_t m4b = wasm_i8x16_splat(0x0F);
  3899. // extract the 5th bit
  3900. memcpy(&qh, x0->qh, sizeof(qh));
  3901. tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
  3902. tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
  3903. tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
  3904. tmp[3] = table_b2b_0[(qh >> 24) ];
  3905. const v128_t qhl = wasm_v128_load(tmp + 0);
  3906. const v128_t qhh = wasm_v128_load(tmp + 2);
  3907. const v128_t v0 = wasm_v128_load(x0->qs);
  3908. // 4-bit -> 8-bit
  3909. const v128_t v0l = wasm_v128_and (v0, m4b);
  3910. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  3911. // add high bit
  3912. const v128_t v0lf = wasm_v128_or(v0l, qhl);
  3913. const v128_t v0hf = wasm_v128_or(v0h, qhh);
  3914. // load y
  3915. const v128_t v1l = wasm_v128_load(y0->qs);
  3916. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  3917. // int8x16 -> int16x8
  3918. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  3919. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  3920. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  3921. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  3922. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  3923. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  3924. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  3925. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  3926. // dot product
  3927. sumv = wasm_f32x4_add(sumv,
  3928. wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add(
  3929. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  3930. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  3931. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  3932. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  3933. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d)));
  3934. }
  3935. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  3936. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
  3937. #elif defined(__AVX2__)
  3938. // Initialize accumulator with zeros
  3939. __m256 acc = _mm256_setzero_ps();
  3940. float summs = 0.0f;
  3941. // Main loop
  3942. for (int i = 0; i < nb; i++) {
  3943. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  3944. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  3945. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  3946. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3947. bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
  3948. bx = _mm256_or_si256(bx, bxhi);
  3949. const __m256 dy = _mm256_set1_ps(y[i].d);
  3950. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3951. const __m256 q = mul_sum_us8_pairs_float(bx, by);
  3952. acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
  3953. }
  3954. *s = hsum_float_8(acc) + summs;
  3955. #elif defined(__AVX__)
  3956. // Initialize accumulator with zeros
  3957. __m256 acc = _mm256_setzero_ps();
  3958. __m128i mask = _mm_set1_epi8(0x10);
  3959. float summs = 0.0f;
  3960. // Main loop
  3961. for (int i = 0; i < nb; i++) {
  3962. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  3963. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  3964. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  3965. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3966. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  3967. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  3968. bxhil = _mm_and_si128(bxhil, mask);
  3969. bxhih = _mm_and_si128(bxhih, mask);
  3970. __m128i bxl = _mm256_castsi256_si128(bx);
  3971. __m128i bxh = _mm256_extractf128_si256(bx, 1);
  3972. bxl = _mm_or_si128(bxl, bxhil);
  3973. bxh = _mm_or_si128(bxh, bxhih);
  3974. bx = MM256_SET_M128I(bxh, bxl);
  3975. const __m256 dy = _mm256_set1_ps(y[i].d);
  3976. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3977. const __m256 q = mul_sum_us8_pairs_float(bx, by);
  3978. acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
  3979. }
  3980. *s = hsum_float_8(acc) + summs;
  3981. #elif defined(__riscv_v_intrinsic)
  3982. float sumf = 0.0;
  3983. uint32_t qh;
  3984. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3985. // temporary registers for shift operations
  3986. vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
  3987. vuint32m2_t vt_2 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
  3988. for (int i = 0; i < nb; i++) {
  3989. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  3990. // load qh
  3991. vuint32m2_t vqh = __riscv_vmv_v_x_u32m2(qh, vl);
  3992. // ((qh >> (j + 0)) << 4) & 0x10;
  3993. vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(vqh, vt_1, vl);
  3994. vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
  3995. vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(xhl_0, 0x10, vl);
  3996. // ((qh >> (j + 12)) ) & 0x10;
  3997. vuint32m2_t xhr_1 = __riscv_vsrl_vv_u32m2(vqh, vt_2, vl);
  3998. vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(xhr_1, 0x10, vl);
  3999. // narrowing
  4000. vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xha_0, vl);
  4001. vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
  4002. vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xha_1, vl);
  4003. vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
  4004. // load
  4005. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  4006. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  4007. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  4008. vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  4009. vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  4010. vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
  4011. vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
  4012. vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  4013. vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  4014. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  4015. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  4016. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  4017. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  4018. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  4019. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  4020. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  4021. }
  4022. *s = sumf;
  4023. #else
  4024. // scalar
  4025. float sumf = 0.0;
  4026. for (int i = 0; i < nb; i++) {
  4027. uint32_t qh;
  4028. memcpy(&qh, x[i].qh, sizeof(qh));
  4029. int sumi = 0;
  4030. for (int j = 0; j < qk/2; ++j) {
  4031. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  4032. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  4033. const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
  4034. const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
  4035. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  4036. }
  4037. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  4038. }
  4039. *s = sumf;
  4040. #endif
  4041. }
  4042. void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4043. const int qk = QK8_0;
  4044. const int nb = n / qk;
  4045. assert(n % qk == 0);
  4046. #if defined(__ARM_FEATURE_MATMUL_INT8)
  4047. assert((nrc == 2) || (nrc == 1));
  4048. #else
  4049. assert(nrc == 1);
  4050. #endif
  4051. UNUSED(nrc);
  4052. UNUSED(bx);
  4053. UNUSED(by);
  4054. UNUSED(bs);
  4055. const block_q8_0 * restrict x = vx;
  4056. const block_q8_0 * restrict y = vy;
  4057. #if defined(__ARM_FEATURE_MATMUL_INT8)
  4058. if (nrc == 2) {
  4059. const block_q8_0 * restrict vx0 = vx;
  4060. const block_q8_0 * restrict vx1 = vx + bx;
  4061. const block_q8_0 * restrict vy0 = vy;
  4062. const block_q8_0 * restrict vy1 = vy + by;
  4063. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  4064. for (int i = 0; i < nb; i++) {
  4065. const block_q8_0 * restrict b_x0 = &vx0[i];
  4066. const block_q8_0 * restrict b_y0 = &vy0[i];
  4067. const block_q8_0 * restrict b_x1 = &vx1[i];
  4068. const block_q8_0 * restrict b_y1 = &vy1[i];
  4069. const int8x16_t x0_l = vld1q_s8(b_x0->qs);
  4070. const int8x16_t x0_h = vld1q_s8(b_x0->qs + 16);
  4071. const int8x16_t x1_l = vld1q_s8(b_x1->qs);
  4072. const int8x16_t x1_h = vld1q_s8(b_x1->qs + 16);
  4073. // load y
  4074. const int8x16_t y0_l = vld1q_s8(b_y0->qs);
  4075. const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
  4076. const int8x16_t y1_l = vld1q_s8(b_y1->qs);
  4077. const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
  4078. float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d),
  4079. GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d),
  4080. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d),
  4081. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)};
  4082. int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  4083. int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  4084. int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  4085. int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  4086. int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  4087. int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  4088. int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  4089. int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  4090. sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
  4091. l1, r1)), l2, r2)), l3, r3))), scale);
  4092. }
  4093. float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
  4094. float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
  4095. vst1_f32(s, vget_low_f32(sumv2));
  4096. vst1_f32(s + bs, vget_high_f32(sumv2));
  4097. return;
  4098. }
  4099. #endif
  4100. #if defined(__ARM_NEON)
  4101. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  4102. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  4103. assert(nb % 2 == 0); // TODO: handle odd nb
  4104. for (int i = 0; i < nb; i += 2) {
  4105. const block_q8_0 * restrict x0 = &x[i + 0];
  4106. const block_q8_0 * restrict x1 = &x[i + 1];
  4107. const block_q8_0 * restrict y0 = &y[i + 0];
  4108. const block_q8_0 * restrict y1 = &y[i + 1];
  4109. const int8x16_t x0_0 = vld1q_s8(x0->qs);
  4110. const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
  4111. const int8x16_t x1_0 = vld1q_s8(x1->qs);
  4112. const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
  4113. // load y
  4114. const int8x16_t y0_0 = vld1q_s8(y0->qs);
  4115. const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
  4116. const int8x16_t y1_0 = vld1q_s8(y1->qs);
  4117. const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
  4118. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  4119. ggml_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
  4120. ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  4121. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  4122. ggml_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
  4123. ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  4124. }
  4125. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  4126. #elif defined(__AVX2__) || defined(__AVX__)
  4127. // Initialize accumulator with zeros
  4128. __m256 acc = _mm256_setzero_ps();
  4129. // Main loop
  4130. for (int i = 0; i < nb; ++i) {
  4131. // Compute combined scale for the block
  4132. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  4133. __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs);
  4134. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  4135. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  4136. // Multiply q with scale and accumulate
  4137. #if defined(__AVX2__)
  4138. acc = _mm256_fmadd_ps( d, q, acc );
  4139. #else
  4140. acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc );
  4141. #endif
  4142. }
  4143. *s = hsum_float_8(acc);
  4144. #elif defined(__riscv_v_intrinsic)
  4145. float sumf = 0.0;
  4146. size_t vl = __riscv_vsetvl_e8m1(qk);
  4147. for (int i = 0; i < nb; i++) {
  4148. // load elements
  4149. vint8m1_t bx = __riscv_vle8_v_i8m1(x[i].qs, vl);
  4150. vint8m1_t by = __riscv_vle8_v_i8m1(y[i].qs, vl);
  4151. vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx, by, vl);
  4152. vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl);
  4153. vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl);
  4154. int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum);
  4155. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  4156. }
  4157. *s = sumf;
  4158. #else
  4159. // scalar
  4160. float sumf = 0.0;
  4161. for (int i = 0; i < nb; i++) {
  4162. int sumi = 0;
  4163. for (int j = 0; j < qk; j++) {
  4164. sumi += x[i].qs[j]*y[i].qs[j];
  4165. }
  4166. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  4167. }
  4168. *s = sumf;
  4169. #endif
  4170. }
  4171. #if QK_K == 256
  4172. void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4173. assert(nrc == 1);
  4174. UNUSED(nrc);
  4175. UNUSED(bx);
  4176. UNUSED(by);
  4177. UNUSED(bs);
  4178. const block_q2_K * restrict x = vx;
  4179. const block_q8_K * restrict y = vy;
  4180. const int nb = n / QK_K;
  4181. #ifdef __ARM_NEON
  4182. const uint8x16_t m3 = vdupq_n_u8(0x3);
  4183. const uint8x16_t m4 = vdupq_n_u8(0xF);
  4184. const int32x4_t vzero = vdupq_n_s32(0);
  4185. ggml_int8x16x2_t q2bytes;
  4186. uint8_t aux[16];
  4187. float sum = 0;
  4188. for (int i = 0; i < nb; ++i) {
  4189. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4190. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4191. const uint8_t * restrict q2 = x[i].qs;
  4192. const int8_t * restrict q8 = y[i].qs;
  4193. const uint8_t * restrict sc = x[i].scales;
  4194. const uint8x16_t mins_and_scales = vld1q_u8(sc);
  4195. const uint8x16_t scales = vandq_u8(mins_and_scales, m4);
  4196. vst1q_u8(aux, scales);
  4197. const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4);
  4198. const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
  4199. const ggml_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}};
  4200. const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])),
  4201. vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0])));
  4202. const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])),
  4203. vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1])));
  4204. sum += dmin * vaddvq_s32(vaddq_s32(s0, s1));
  4205. int isum = 0;
  4206. int is = 0;
  4207. // We use this macro instead of a function call because for some reason
  4208. // the code runs 2-3% slower, even if the function is declared inline
  4209. #define MULTIPLY_ACCUM_WITH_SCALE(index)\
  4210. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\
  4211. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)];
  4212. #define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\
  4213. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\
  4214. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\
  4215. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\
  4216. MULTIPLY_ACCUM_WITH_SCALE((index));
  4217. for (int j = 0; j < QK_K/128; ++j) {
  4218. const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32;
  4219. ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  4220. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3));
  4221. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3));
  4222. MULTIPLY_ACCUM_WITH_SCALE(0);
  4223. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2);
  4224. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4);
  4225. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6);
  4226. is += 8;
  4227. }
  4228. sum += d * isum;
  4229. }
  4230. *s = sum;
  4231. #elif defined __AVX2__
  4232. const __m256i m3 = _mm256_set1_epi8(3);
  4233. const __m128i m4 = _mm_set1_epi8(0xF);
  4234. __m256 acc = _mm256_setzero_ps();
  4235. for (int i = 0; i < nb; ++i) {
  4236. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4237. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4238. const uint8_t * restrict q2 = x[i].qs;
  4239. const int8_t * restrict q8 = y[i].qs;
  4240. const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  4241. const __m128i scales8 = _mm_and_si128(mins_and_scales, m4);
  4242. const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
  4243. const __m256i mins = _mm256_cvtepi8_epi16(mins8);
  4244. const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums));
  4245. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc);
  4246. const __m256i all_scales = _mm256_cvtepi8_epi16(scales8);
  4247. const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
  4248. const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
  4249. const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
  4250. __m256i sumi = _mm256_setzero_si256();
  4251. for (int j = 0; j < QK_K/128; ++j) {
  4252. const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32;
  4253. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4254. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4255. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4256. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4257. const __m256i q2_0 = _mm256_and_si256(q2bits, m3);
  4258. const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3);
  4259. const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3);
  4260. const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3);
  4261. __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
  4262. __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
  4263. __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2);
  4264. __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3);
  4265. p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0);
  4266. p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1);
  4267. p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2);
  4268. p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3);
  4269. p0 = _mm256_add_epi32(p0, p1);
  4270. p2 = _mm256_add_epi32(p2, p3);
  4271. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2));
  4272. }
  4273. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  4274. }
  4275. *s = hsum_float_8(acc);
  4276. #elif defined __AVX__
  4277. const __m128i m3 = _mm_set1_epi8(0x3);
  4278. const __m128i m4 = _mm_set1_epi8(0xF);
  4279. const __m128i m2 = _mm_set1_epi8(0x2);
  4280. __m256 acc = _mm256_setzero_ps();
  4281. for (int i = 0; i < nb; ++i) {
  4282. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4283. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4284. const uint8_t * restrict q2 = x[i].qs;
  4285. const int8_t * restrict q8 = y[i].qs;
  4286. // load mins and scales from block_q2_K.scales[QK_K/16]
  4287. const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  4288. const __m128i scales16 = _mm_and_si128(mins_and_scales, m4);
  4289. const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
  4290. const __m128i mins_0 = _mm_cvtepi8_epi16(mins16);
  4291. const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16));
  4292. // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2
  4293. const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0]));
  4294. const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8]));
  4295. // sumf += -dmin * summs in 32bits*8
  4296. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc);
  4297. const __m128i scales_0 = _mm_cvtepi8_epi16(scales16);
  4298. const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16));
  4299. const __m128i scales[2] = { scales_0, scales_1 };
  4300. __m128i sumi_0 = _mm_setzero_si128();
  4301. __m128i sumi_1 = _mm_setzero_si128();
  4302. for (int j = 0; j < QK_K/128; ++j) {
  4303. // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K]
  4304. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4305. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4306. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4307. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4308. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4309. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4310. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4311. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4312. // load 2bits*16*8 from block_q2_K.qs[QK_K/4]
  4313. __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
  4314. const __m128i q2_0 = _mm_and_si128(q2bits, m3);
  4315. const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  4316. const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  4317. const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  4318. q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
  4319. const __m128i q2_1 = _mm_and_si128(q2bits, m3);
  4320. const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  4321. const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  4322. const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  4323. // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8
  4324. __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0);
  4325. __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1);
  4326. __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2);
  4327. __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3);
  4328. __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4);
  4329. __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5);
  4330. __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6);
  4331. __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7);
  4332. // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8
  4333. __m128i shuffle = _mm_set1_epi16(0x0100);
  4334. p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0);
  4335. shuffle = _mm_add_epi16(shuffle, m2);
  4336. p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1);
  4337. shuffle = _mm_add_epi16(shuffle, m2);
  4338. p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2);
  4339. shuffle = _mm_add_epi16(shuffle, m2);
  4340. p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3);
  4341. shuffle = _mm_add_epi16(shuffle, m2);
  4342. p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4);
  4343. shuffle = _mm_add_epi16(shuffle, m2);
  4344. p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5);
  4345. shuffle = _mm_add_epi16(shuffle, m2);
  4346. p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6);
  4347. shuffle = _mm_add_epi16(shuffle, m2);
  4348. p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7);
  4349. p0 = _mm_add_epi32(p0, p1);
  4350. p2 = _mm_add_epi32(p2, p3);
  4351. p4 = _mm_add_epi32(p4, p5);
  4352. p6 = _mm_add_epi32(p6, p7);
  4353. // isum in 32bits*4*2
  4354. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2));
  4355. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6));
  4356. }
  4357. // sumf += dall * isum - dmin * summs in 32bits
  4358. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  4359. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc);
  4360. }
  4361. *s = hsum_float_8(acc);
  4362. #elif defined __riscv_v_intrinsic
  4363. float sumf = 0;
  4364. uint8_t temp_01[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  4365. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
  4366. for (int i = 0; i < nb; ++i) {
  4367. const uint8_t * q2 = x[i].qs;
  4368. const int8_t * q8 = y[i].qs;
  4369. const uint8_t * sc = x[i].scales;
  4370. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4371. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4372. size_t vl = 16;
  4373. vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl);
  4374. vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl);
  4375. vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl);
  4376. vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl);
  4377. vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl);
  4378. vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl));
  4379. vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl);
  4380. vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  4381. sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums);
  4382. vl = 32;
  4383. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  4384. vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl);
  4385. uint8_t is=0;
  4386. int isum=0;
  4387. for (int j = 0; j < QK_K/128; ++j) {
  4388. // load Q2
  4389. vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl);
  4390. vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl);
  4391. vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03 , vl);
  4392. vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03 , vl);
  4393. vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03 , vl);
  4394. // duplicate scale elements for product
  4395. vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0+is, vl), vl);
  4396. vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2+is, vl), vl);
  4397. vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4+is, vl), vl);
  4398. vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6+is, vl), vl);
  4399. vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl));
  4400. vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl));
  4401. vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl));
  4402. vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl));
  4403. // load Q8
  4404. vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
  4405. vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
  4406. vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8+64, vl);
  4407. vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8+96, vl);
  4408. vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl);
  4409. vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl);
  4410. vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl);
  4411. vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl);
  4412. vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl);
  4413. vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl);
  4414. isum += __riscv_vmv_x_s_i32m1_i32(isum1);
  4415. q2+=32; q8+=128; is=8;
  4416. }
  4417. sumf += dall * isum;
  4418. }
  4419. *s = sumf;
  4420. #else
  4421. float sumf = 0;
  4422. for (int i = 0; i < nb; ++i) {
  4423. const uint8_t * q2 = x[i].qs;
  4424. const int8_t * q8 = y[i].qs;
  4425. const uint8_t * sc = x[i].scales;
  4426. int summs = 0;
  4427. for (int j = 0; j < 16; ++j) {
  4428. summs += y[i].bsums[j] * (sc[j] >> 4);
  4429. }
  4430. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4431. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4432. int isum = 0;
  4433. int is = 0;
  4434. int d;
  4435. for (int k = 0; k < QK_K/128; ++k) {
  4436. int shift = 0;
  4437. for (int j = 0; j < 4; ++j) {
  4438. d = sc[is++] & 0xF;
  4439. int isuml = 0;
  4440. for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
  4441. isum += d * isuml;
  4442. d = sc[is++] & 0xF;
  4443. isuml = 0;
  4444. for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
  4445. isum += d * isuml;
  4446. shift += 2;
  4447. q8 += 32;
  4448. }
  4449. q2 += 32;
  4450. }
  4451. sumf += dall * isum - dmin * summs;
  4452. }
  4453. *s = sumf;
  4454. #endif
  4455. }
  4456. #else
  4457. void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4458. assert(nrc == 1);
  4459. UNUSED(nrc);
  4460. UNUSED(bx);
  4461. UNUSED(by);
  4462. UNUSED(bs);
  4463. const block_q2_K * restrict x = vx;
  4464. const block_q8_K * restrict y = vy;
  4465. const int nb = n / QK_K;
  4466. #ifdef __ARM_NEON
  4467. const uint8x16_t m3 = vdupq_n_u8(0x3);
  4468. const int32x4_t vzero = vdupq_n_s32(0);
  4469. ggml_int8x16x4_t q2bytes;
  4470. uint32_t aux32[2];
  4471. const uint8_t * scales = (const uint8_t *)aux32;
  4472. float sum = 0;
  4473. for (int i = 0; i < nb; ++i) {
  4474. const float d = y[i].d * (float)x[i].d;
  4475. const float dmin = -y[i].d * (float)x[i].dmin;
  4476. const uint8_t * restrict q2 = x[i].qs;
  4477. const int8_t * restrict q8 = y[i].qs;
  4478. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4479. aux32[0] = sc[0] & 0x0f0f0f0f;
  4480. aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
  4481. sum += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
  4482. int isum1 = 0, isum2 = 0;
  4483. const uint8x16_t q2bits = vld1q_u8(q2);
  4484. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  4485. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits, m3));
  4486. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 2), m3));
  4487. q2bytes.val[2] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 4), m3));
  4488. q2bytes.val[3] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 6), m3));
  4489. isum1 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * scales[0];
  4490. isum2 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * scales[1];
  4491. isum1 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[2], q8bytes.val[2])) * scales[2];
  4492. isum2 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[3], q8bytes.val[3])) * scales[3];
  4493. sum += d * (isum1 + isum2);
  4494. }
  4495. *s = sum;
  4496. #elif defined __AVX2__
  4497. const __m256i m3 = _mm256_set1_epi8(3);
  4498. __m256 acc = _mm256_setzero_ps();
  4499. uint32_t ud, um;
  4500. const uint8_t * restrict db = (const uint8_t *)&ud;
  4501. const uint8_t * restrict mb = (const uint8_t *)&um;
  4502. float summs = 0;
  4503. // TODO: optimize this
  4504. for (int i = 0; i < nb; ++i) {
  4505. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4506. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4507. const uint8_t * restrict q2 = x[i].qs;
  4508. const int8_t * restrict q8 = y[i].qs;
  4509. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4510. ud = (sc[0] >> 0) & 0x0f0f0f0f;
  4511. um = (sc[0] >> 4) & 0x0f0f0f0f;
  4512. int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
  4513. summs += dmin * smin;
  4514. const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
  4515. const __m256i q2_0 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 2), q2bits), m3);
  4516. const __m256i q2_1 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 6), _mm_srli_epi16(q2bits, 4)), m3);
  4517. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  4518. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  4519. const __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
  4520. const __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
  4521. const __m256i p_0 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 0));
  4522. const __m256i p_1 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 1));
  4523. const __m256i p_2 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 0));
  4524. const __m256i p_3 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 1));
  4525. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0), acc);
  4526. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1), acc);
  4527. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2), acc);
  4528. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3), acc);
  4529. }
  4530. *s = hsum_float_8(acc) + summs;
  4531. #elif defined __AVX__
  4532. const __m128i m3 = _mm_set1_epi8(3);
  4533. __m256 acc = _mm256_setzero_ps();
  4534. uint32_t ud, um;
  4535. const uint8_t * restrict db = (const uint8_t *)&ud;
  4536. const uint8_t * restrict mb = (const uint8_t *)&um;
  4537. float summs = 0;
  4538. // TODO: optimize this
  4539. for (int i = 0; i < nb; ++i) {
  4540. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4541. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4542. const uint8_t * restrict q2 = x[i].qs;
  4543. const int8_t * restrict q8 = y[i].qs;
  4544. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4545. ud = (sc[0] >> 0) & 0x0f0f0f0f;
  4546. um = (sc[0] >> 4) & 0x0f0f0f0f;
  4547. int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
  4548. summs += dmin * smin;
  4549. const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
  4550. const __m128i q2_0 = _mm_and_si128(q2bits, m3);
  4551. const __m128i q2_1 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  4552. const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  4553. const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  4554. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  4555. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  4556. const __m128i p0 = _mm_maddubs_epi16(q2_0, _mm256_extractf128_si256(q8_0, 0));
  4557. const __m128i p1 = _mm_maddubs_epi16(q2_1, _mm256_extractf128_si256(q8_0, 1));
  4558. const __m128i p2 = _mm_maddubs_epi16(q2_2, _mm256_extractf128_si256(q8_1, 0));
  4559. const __m128i p3 = _mm_maddubs_epi16(q2_3, _mm256_extractf128_si256(q8_1, 1));
  4560. const __m256i p_0 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p0, p0)), _mm_cvtepi16_epi32(p0));
  4561. const __m256i p_1 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p1, p1)), _mm_cvtepi16_epi32(p1));
  4562. const __m256i p_2 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p2, p2)), _mm_cvtepi16_epi32(p2));
  4563. const __m256i p_3 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p3, p3)), _mm_cvtepi16_epi32(p3));
  4564. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0)), acc);
  4565. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1)), acc);
  4566. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2)), acc);
  4567. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3)), acc);
  4568. }
  4569. *s = hsum_float_8(acc) + summs;
  4570. #elif defined __riscv_v_intrinsic
  4571. uint32_t aux32[2];
  4572. const uint8_t * scales = (const uint8_t *)aux32;
  4573. float sumf = 0;
  4574. for (int i = 0; i < nb; ++i) {
  4575. const float d = y[i].d * (float)x[i].d;
  4576. const float dmin = -y[i].d * (float)x[i].dmin;
  4577. const uint8_t * restrict q2 = x[i].qs;
  4578. const int8_t * restrict q8 = y[i].qs;
  4579. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4580. aux32[0] = sc[0] & 0x0f0f0f0f;
  4581. aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
  4582. sumf += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
  4583. int isum1 = 0;
  4584. int isum2 = 0;
  4585. size_t vl = 16;
  4586. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  4587. // load Q2
  4588. vuint8mf2_t q2_x = __riscv_vle8_v_u8mf2(q2, vl);
  4589. vint8mf2_t q2_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q2_x, 0x03, vl));
  4590. vint8mf2_t q2_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x2, vl), 0x03 , vl));
  4591. vint8mf2_t q2_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x4, vl), 0x03 , vl));
  4592. vint8mf2_t q2_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x6, vl), 0x03 , vl));
  4593. // load Q8, and take product with Q2
  4594. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q2_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  4595. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q2_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  4596. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q2_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  4597. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q2_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  4598. vint16m1_t vs_0 = __riscv_vredsum_vs_i16m1_i16m1(p0, vzero, vl);
  4599. vint16m1_t vs_1 = __riscv_vredsum_vs_i16m1_i16m1(p1, vzero, vl);
  4600. vint16m1_t vs_2 = __riscv_vredsum_vs_i16m1_i16m1(p2, vzero, vl);
  4601. vint16m1_t vs_3 = __riscv_vredsum_vs_i16m1_i16m1(p3, vzero, vl);
  4602. isum1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[0];
  4603. isum2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[1];
  4604. isum1 += __riscv_vmv_x_s_i16m1_i16(vs_2) * scales[2];
  4605. isum2 += __riscv_vmv_x_s_i16m1_i16(vs_3) * scales[3];
  4606. sumf += d * (isum1 + isum2);
  4607. }
  4608. *s = sumf;
  4609. #else
  4610. float sumf = 0;
  4611. int isum[4];
  4612. for (int i = 0; i < nb; ++i) {
  4613. const uint8_t * q2 = x[i].qs;
  4614. const int8_t * q8 = y[i].qs;
  4615. const uint8_t * sc = x[i].scales;
  4616. int summs = 0;
  4617. for (int j = 0; j < QK_K/16; ++j) {
  4618. summs += y[i].bsums[j] * (sc[j] >> 4);
  4619. }
  4620. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4621. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4622. isum[0] = isum[1] = isum[2] = isum[3] = 0;
  4623. for (int l = 0; l < 16; ++l) {
  4624. isum[0] += q8[l+ 0] * ((q2[l] >> 0) & 3);
  4625. isum[1] += q8[l+16] * ((q2[l] >> 2) & 3);
  4626. isum[2] += q8[l+32] * ((q2[l] >> 4) & 3);
  4627. isum[3] += q8[l+48] * ((q2[l] >> 6) & 3);
  4628. }
  4629. for (int l = 0; l < 4; ++l) {
  4630. isum[l] *= (sc[l] & 0xF);
  4631. }
  4632. sumf += dall * (isum[0] + isum[1] + isum[2] + isum[3]) - dmin * summs;
  4633. }
  4634. *s = sumf;
  4635. #endif
  4636. }
  4637. #endif
  4638. #if QK_K == 256
  4639. void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4640. assert(n % QK_K == 0);
  4641. assert(nrc == 1);
  4642. UNUSED(nrc);
  4643. UNUSED(bx);
  4644. UNUSED(by);
  4645. UNUSED(bs);
  4646. const uint32_t kmask1 = 0x03030303;
  4647. const uint32_t kmask2 = 0x0f0f0f0f;
  4648. const block_q3_K * restrict x = vx;
  4649. const block_q8_K * restrict y = vy;
  4650. const int nb = n / QK_K;
  4651. #ifdef __ARM_NEON
  4652. uint32_t aux[3];
  4653. uint32_t utmp[4];
  4654. const uint8x16_t m3b = vdupq_n_u8(0x3);
  4655. const int32x4_t vzero = vdupq_n_s32(0);
  4656. const uint8x16_t m0 = vdupq_n_u8(1);
  4657. const uint8x16_t m1 = vshlq_n_u8(m0, 1);
  4658. const uint8x16_t m2 = vshlq_n_u8(m0, 2);
  4659. const uint8x16_t m3 = vshlq_n_u8(m0, 3);
  4660. const int8_t m32 = 32;
  4661. ggml_int8x16x4_t q3bytes;
  4662. float sum = 0;
  4663. for (int i = 0; i < nb; ++i) {
  4664. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4665. const uint8_t * restrict q3 = x[i].qs;
  4666. const uint8_t * restrict qh = x[i].hmask;
  4667. const int8_t * restrict q8 = y[i].qs;
  4668. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
  4669. ggml_uint8x16x4_t q3h;
  4670. int32_t isum = 0;
  4671. // Set up scales
  4672. memcpy(aux, x[i].scales, 12);
  4673. utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
  4674. utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
  4675. utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
  4676. utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
  4677. int8_t * scale = (int8_t *)utmp;
  4678. for (int j = 0; j < 16; ++j) scale[j] -= m32;
  4679. for (int j = 0; j < QK_K/128; ++j) {
  4680. const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32;
  4681. const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64;
  4682. const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64;
  4683. q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2);
  4684. q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2);
  4685. q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1);
  4686. q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1);
  4687. q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0]));
  4688. q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1]));
  4689. q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
  4690. q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
  4691. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0];
  4692. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1];
  4693. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2];
  4694. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3];
  4695. scale += 4;
  4696. q3h.val[0] = vbicq_u8(m2, qhbits.val[0]);
  4697. q3h.val[1] = vbicq_u8(m2, qhbits.val[1]);
  4698. q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1);
  4699. q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1);
  4700. q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0]));
  4701. q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1]));
  4702. q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
  4703. q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
  4704. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0];
  4705. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1];
  4706. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2];
  4707. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3];
  4708. scale += 4;
  4709. if (j == 0) {
  4710. qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4);
  4711. qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4);
  4712. }
  4713. }
  4714. sum += d * isum;
  4715. }
  4716. *s = sum;
  4717. #elif defined __AVX2__
  4718. const __m256i m3 = _mm256_set1_epi8(3);
  4719. const __m256i mone = _mm256_set1_epi8(1);
  4720. const __m128i m32 = _mm_set1_epi8(32);
  4721. __m256 acc = _mm256_setzero_ps();
  4722. uint32_t aux[3];
  4723. for (int i = 0; i < nb; ++i) {
  4724. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4725. const uint8_t * restrict q3 = x[i].qs;
  4726. const int8_t * restrict q8 = y[i].qs;
  4727. // Set up scales
  4728. memcpy(aux, x[i].scales, 12);
  4729. __m128i scales128 = _mm_set_epi32(
  4730. ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
  4731. ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
  4732. (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
  4733. (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
  4734. scales128 = _mm_sub_epi8(scales128, m32);
  4735. const __m256i all_scales = _mm256_cvtepi8_epi16(scales128);
  4736. const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
  4737. const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
  4738. const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
  4739. // high bit
  4740. const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask);
  4741. // integer accumulator
  4742. __m256i sumi = _mm256_setzero_si256();
  4743. int bit = 0;
  4744. int is = 0;
  4745. for (int j = 0; j < QK_K/128; ++j) {
  4746. // load low 2 bits
  4747. const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32;
  4748. // prepare low and high bits
  4749. const __m256i q3l_0 = _mm256_and_si256(q3bits, m3);
  4750. const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4751. ++bit;
  4752. const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3);
  4753. const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4754. ++bit;
  4755. const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3);
  4756. const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4757. ++bit;
  4758. const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3);
  4759. const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4760. ++bit;
  4761. // load Q8 quants
  4762. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4763. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4764. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4765. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4766. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  4767. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  4768. // and 2 if the high bit was set)
  4769. __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
  4770. __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
  4771. __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2);
  4772. __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3);
  4773. __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
  4774. __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
  4775. __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2);
  4776. __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3);
  4777. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  4778. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  4779. p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
  4780. p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
  4781. // multiply with scales
  4782. p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0);
  4783. p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1);
  4784. p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2);
  4785. p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3);
  4786. // accumulate
  4787. p16_0 = _mm256_add_epi32(p16_0, p16_1);
  4788. p16_2 = _mm256_add_epi32(p16_2, p16_3);
  4789. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2));
  4790. }
  4791. // multiply with block scale and accumulate
  4792. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  4793. }
  4794. *s = hsum_float_8(acc);
  4795. #elif defined __AVX__
  4796. const __m128i m3 = _mm_set1_epi8(3);
  4797. const __m128i mone = _mm_set1_epi8(1);
  4798. const __m128i m32 = _mm_set1_epi8(32);
  4799. const __m128i m2 = _mm_set1_epi8(2);
  4800. __m256 acc = _mm256_setzero_ps();
  4801. const uint32_t *aux;
  4802. for (int i = 0; i < nb; ++i) {
  4803. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4804. const uint8_t * restrict q3 = x[i].qs;
  4805. const int8_t * restrict q8 = y[i].qs;
  4806. // Set up scales
  4807. aux = (const uint32_t *)x[i].scales;
  4808. __m128i scales128 = _mm_set_epi32(
  4809. ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
  4810. ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
  4811. (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
  4812. (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
  4813. scales128 = _mm_sub_epi8(scales128, m32);
  4814. const __m128i scales_0 = _mm_cvtepi8_epi16(scales128);
  4815. const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128));
  4816. const __m128i scales[2] = { scales_0, scales_1 };
  4817. // high bit *128*2 from block_q3_K.hmask[QK_K/8]
  4818. const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]);
  4819. const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]);
  4820. // integer accumulator
  4821. __m128i sumi_0 = _mm_setzero_si128();
  4822. __m128i sumi_1 = _mm_setzero_si128();
  4823. for (int j = 0; j < QK_K/128; ++j) {
  4824. // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4]
  4825. const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
  4826. const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
  4827. // prepare low and high bits
  4828. const int bit = j << 2;
  4829. const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3);
  4830. const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3);
  4831. const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2);
  4832. const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2);
  4833. const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3);
  4834. const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3);
  4835. const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
  4836. const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
  4837. const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3);
  4838. const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3);
  4839. const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
  4840. const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
  4841. const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3);
  4842. const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3);
  4843. const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
  4844. const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
  4845. // load Q8 quants from block_q8_K.qs[QK_K]
  4846. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4847. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4848. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4849. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4850. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4851. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4852. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4853. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4854. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  4855. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  4856. // and 2 if the high bit was set)
  4857. __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0);
  4858. __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1);
  4859. __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2);
  4860. __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3);
  4861. __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4);
  4862. __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5);
  4863. __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6);
  4864. __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7);
  4865. __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0);
  4866. __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1);
  4867. __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2);
  4868. __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3);
  4869. __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4);
  4870. __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5);
  4871. __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6);
  4872. __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7);
  4873. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  4874. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  4875. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  4876. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  4877. p16_4 = _mm_sub_epi16(p16_4, q8s_4);
  4878. p16_5 = _mm_sub_epi16(p16_5, q8s_5);
  4879. p16_6 = _mm_sub_epi16(p16_6, q8s_6);
  4880. p16_7 = _mm_sub_epi16(p16_7, q8s_7);
  4881. // multiply with scales
  4882. __m128i shuffle = _mm_set1_epi16(0x0100);
  4883. p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0);
  4884. shuffle = _mm_add_epi16(shuffle, m2);
  4885. p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1);
  4886. shuffle = _mm_add_epi16(shuffle, m2);
  4887. p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2);
  4888. shuffle = _mm_add_epi16(shuffle, m2);
  4889. p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3);
  4890. shuffle = _mm_add_epi16(shuffle, m2);
  4891. p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4);
  4892. shuffle = _mm_add_epi16(shuffle, m2);
  4893. p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5);
  4894. shuffle = _mm_add_epi16(shuffle, m2);
  4895. p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6);
  4896. shuffle = _mm_add_epi16(shuffle, m2);
  4897. p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7);
  4898. // accumulate
  4899. p16_0 = _mm_add_epi32(p16_0, p16_1);
  4900. p16_2 = _mm_add_epi32(p16_2, p16_3);
  4901. p16_4 = _mm_add_epi32(p16_4, p16_5);
  4902. p16_6 = _mm_add_epi32(p16_6, p16_7);
  4903. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  4904. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6));
  4905. }
  4906. // multiply with block scale and accumulate
  4907. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  4908. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
  4909. }
  4910. *s = hsum_float_8(acc);
  4911. #elif defined __riscv_v_intrinsic
  4912. uint32_t aux[3];
  4913. uint32_t utmp[4];
  4914. float sumf = 0;
  4915. for (int i = 0; i < nb; ++i) {
  4916. const uint8_t * restrict q3 = x[i].qs;
  4917. const uint8_t * restrict qh = x[i].hmask;
  4918. const int8_t * restrict q8 = y[i].qs;
  4919. memcpy(aux, x[i].scales, 12);
  4920. utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
  4921. utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
  4922. utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
  4923. utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
  4924. int8_t * scale = (int8_t *)utmp;
  4925. for (int j = 0; j < 16; ++j) scale[j] -= 32;
  4926. size_t vl = 32;
  4927. uint8_t m = 1;
  4928. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  4929. vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl);
  4930. int sum_t = 0;
  4931. for (int j = 0; j < QK_K; j += 128) {
  4932. vl = 32;
  4933. // load Q3
  4934. vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl);
  4935. vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl));
  4936. vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03 , vl));
  4937. vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03 , vl));
  4938. vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03 , vl));
  4939. // compute mask for subtraction
  4940. vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl);
  4941. vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl);
  4942. vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_m(vmask_0, q3_0, 0x4, vl);
  4943. m <<= 1;
  4944. vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
  4945. vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl);
  4946. vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_m(vmask_1, q3_1, 0x4, vl);
  4947. m <<= 1;
  4948. vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
  4949. vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl);
  4950. vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_m(vmask_2, q3_2, 0x4, vl);
  4951. m <<= 1;
  4952. vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl);
  4953. vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl);
  4954. vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_m(vmask_3, q3_3, 0x4, vl);
  4955. m <<= 1;
  4956. // load Q8 and take product with Q3
  4957. vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl);
  4958. vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  4959. vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
  4960. vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
  4961. vl = 16;
  4962. // retrieve lane to multiply with scale
  4963. vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl);
  4964. vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl);
  4965. vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl);
  4966. vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl);
  4967. vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl);
  4968. vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl);
  4969. vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl);
  4970. vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl);
  4971. vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl);
  4972. vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl);
  4973. vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl);
  4974. vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl);
  4975. sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
  4976. q3 += 32; q8 += 128; scale += 8;
  4977. }
  4978. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  4979. sumf += d*sum_t;
  4980. }
  4981. *s = sumf;
  4982. #else
  4983. // scalar version
  4984. // This function is written like this so the compiler can manage to vectorize most of it
  4985. // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
  4986. // manually vectorized version above. Every other version I tried would run at least 4 times slower.
  4987. // The ideal situation would be if we could just write the code once, and the compiler would
  4988. // automatically produce the best possible set of machine instructions, instead of us having to manually
  4989. // write vectorized versions for AVX, ARM_NEON, etc.
  4990. int8_t aux8[QK_K];
  4991. int16_t aux16[8];
  4992. float sums [8];
  4993. int32_t aux32[8];
  4994. memset(sums, 0, 8*sizeof(float));
  4995. uint32_t auxs[4];
  4996. const int8_t * scales = (const int8_t*)auxs;
  4997. float sumf = 0;
  4998. for (int i = 0; i < nb; ++i) {
  4999. const uint8_t * restrict q3 = x[i].qs;
  5000. const uint8_t * restrict hm = x[i].hmask;
  5001. const int8_t * restrict q8 = y[i].qs;
  5002. memset(aux32, 0, 8*sizeof(int32_t));
  5003. int8_t * restrict a = aux8;
  5004. uint8_t m = 1;
  5005. for (int j = 0; j < QK_K; j += 128) {
  5006. for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
  5007. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  5008. a += 32; m <<= 1;
  5009. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
  5010. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  5011. a += 32; m <<= 1;
  5012. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
  5013. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  5014. a += 32; m <<= 1;
  5015. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
  5016. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  5017. a += 32; m <<= 1;
  5018. q3 += 32;
  5019. }
  5020. a = aux8;
  5021. memcpy(auxs, x[i].scales, 12);
  5022. uint32_t tmp = auxs[2];
  5023. auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
  5024. auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
  5025. auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
  5026. auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
  5027. for (int j = 0; j < QK_K/16; ++j) {
  5028. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5029. for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
  5030. q8 += 8; a += 8;
  5031. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5032. for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
  5033. q8 += 8; a += 8;
  5034. }
  5035. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5036. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  5037. }
  5038. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5039. *s = sumf;
  5040. #endif
  5041. }
  5042. #else
  5043. void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  5044. assert(n % QK_K == 0);
  5045. assert(nrc == 1);
  5046. UNUSED(nrc);
  5047. UNUSED(bx);
  5048. UNUSED(by);
  5049. UNUSED(bs);
  5050. const block_q3_K * restrict x = vx;
  5051. const block_q8_K * restrict y = vy;
  5052. const int nb = n / QK_K;
  5053. #ifdef __ARM_NEON
  5054. const int32x4_t vzero = vdupq_n_s32(0);
  5055. const uint8x16_t m3b = vdupq_n_u8(0x3);
  5056. const uint8x16_t mh = vdupq_n_u8(4);
  5057. ggml_int8x16x4_t q3bytes;
  5058. uint16_t aux16[2];
  5059. int8_t * scales = (int8_t *)aux16;
  5060. float sum = 0;
  5061. for (int i = 0; i < nb; ++i) {
  5062. ggml_uint8x16x4_t q3h;
  5063. const uint8x8_t hbits = vld1_u8(x[i].hmask);
  5064. const uint8x16_t q3bits = vld1q_u8(x[i].qs);
  5065. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(y[i].qs);
  5066. const uint16_t a = *(const uint16_t *)x[i].scales;
  5067. aux16[0] = a & 0x0f0f;
  5068. aux16[1] = (a >> 4) & 0x0f0f;
  5069. for (int j = 0; j < 4; ++j) scales[j] -= 8;
  5070. int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
  5071. const float d = y[i].d * (float)x[i].d;
  5072. const uint8x16_t htmp = vcombine_u8(hbits, vshr_n_u8(hbits, 1));
  5073. q3h.val[0] = vandq_u8(mh, vshlq_n_u8(htmp, 2));
  5074. q3h.val[1] = vandq_u8(mh, htmp);
  5075. q3h.val[2] = vandq_u8(mh, vshrq_n_u8(htmp, 2));
  5076. q3h.val[3] = vandq_u8(mh, vshrq_n_u8(htmp, 4));
  5077. q3bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q3bits, m3b), q3h.val[0]));
  5078. q3bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 2), m3b), q3h.val[1]));
  5079. q3bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 4), m3b), q3h.val[2]));
  5080. q3bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q3bits, 6), q3h.val[3]));
  5081. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes.val[0])) * scales[0];
  5082. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes.val[1])) * scales[2];
  5083. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes.val[2])) * scales[1];
  5084. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes.val[3])) * scales[3];
  5085. sum += d * isum;
  5086. }
  5087. *s = sum;
  5088. #elif defined __AVX2__
  5089. const __m256i m3 = _mm256_set1_epi8(3);
  5090. const __m256i m1 = _mm256_set1_epi8(1);
  5091. __m256 acc = _mm256_setzero_ps();
  5092. uint64_t aux64;
  5093. uint16_t aux16[2];
  5094. const int8_t * aux8 = (const int8_t *)aux16;
  5095. for (int i = 0; i < nb; ++i) {
  5096. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5097. const uint8_t * restrict q3 = x[i].qs;
  5098. const int8_t * restrict q8 = y[i].qs;
  5099. const uint16_t a = *(const uint16_t *)x[i].scales;
  5100. aux16[0] = a & 0x0f0f;
  5101. aux16[1] = (a >> 4) & 0x0f0f;
  5102. const __m256i scale_0 = MM256_SET_M128I(_mm_set1_epi16(aux8[2] - 8), _mm_set1_epi16(aux8[0] - 8));
  5103. const __m256i scale_1 = MM256_SET_M128I(_mm_set1_epi16(aux8[3] - 8), _mm_set1_epi16(aux8[1] - 8));
  5104. memcpy(&aux64, x[i].hmask, 8);
  5105. const __m128i haux = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
  5106. __m256i q3h_0 = MM256_SET_M128I(_mm_srli_epi16(haux, 2), haux);
  5107. __m256i q3h_1 = _mm256_srli_epi16(q3h_0, 4);
  5108. q3h_0 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_0, m1), 2);
  5109. q3h_1 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_1, m1), 2);
  5110. // load low 2 bits
  5111. const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
  5112. // prepare low and high bits
  5113. const __m256i q3aux = MM256_SET_M128I(_mm_srli_epi16(q3bits, 2), q3bits);
  5114. const __m256i q3l_0 = _mm256_and_si256(q3aux, m3);
  5115. const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3aux, 4), m3);
  5116. // load Q8 quants
  5117. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5118. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5119. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  5120. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  5121. // and 2 if the high bit was set)
  5122. const __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
  5123. const __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
  5124. __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
  5125. __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
  5126. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  5127. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  5128. // multiply with scales
  5129. p16_0 = _mm256_madd_epi16(scale_0, p16_0);
  5130. p16_1 = _mm256_madd_epi16(scale_1, p16_1);
  5131. p16_0 = _mm256_add_epi32(p16_0, p16_1);
  5132. // multiply with block scale and accumulate
  5133. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16_0), acc);
  5134. }
  5135. *s = hsum_float_8(acc);
  5136. #elif defined __AVX__
  5137. const __m128i m3 = _mm_set1_epi8(3);
  5138. const __m128i m1 = _mm_set1_epi8(1);
  5139. __m256 acc = _mm256_setzero_ps();
  5140. uint64_t aux64;
  5141. uint16_t aux16[2];
  5142. const int8_t * aux8 = (const int8_t *)aux16;
  5143. for (int i = 0; i < nb; ++i) {
  5144. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5145. const uint8_t * restrict q3 = x[i].qs;
  5146. const int8_t * restrict q8 = y[i].qs;
  5147. const uint16_t a = *(const uint16_t *)x[i].scales;
  5148. aux16[0] = a & 0x0f0f;
  5149. aux16[1] = (a >> 4) & 0x0f0f;
  5150. const __m128i scale_0 = _mm_set1_epi16(aux8[0] - 8);
  5151. const __m128i scale_1 = _mm_set1_epi16(aux8[2] - 8);
  5152. const __m128i scale_2 = _mm_set1_epi16(aux8[1] - 8);
  5153. const __m128i scale_3 = _mm_set1_epi16(aux8[3] - 8);
  5154. memcpy(&aux64, x[i].hmask, 8);
  5155. __m128i q3h_0 = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
  5156. __m128i q3h_1 = _mm_srli_epi16(q3h_0, 2);
  5157. __m128i q3h_2 = _mm_srli_epi16(q3h_0, 4);
  5158. __m128i q3h_3 = _mm_srli_epi16(q3h_0, 6);
  5159. q3h_0 = _mm_slli_epi16(_mm_andnot_si128(q3h_0, m1), 2);
  5160. q3h_1 = _mm_slli_epi16(_mm_andnot_si128(q3h_1, m1), 2);
  5161. q3h_2 = _mm_slli_epi16(_mm_andnot_si128(q3h_2, m1), 2);
  5162. q3h_3 = _mm_slli_epi16(_mm_andnot_si128(q3h_3, m1), 2);
  5163. // load low 2 bits
  5164. const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
  5165. // prepare low and high bits
  5166. const __m128i q3l_0 = _mm_and_si128(q3bits, m3);
  5167. const __m128i q3l_1 = _mm_and_si128(_mm_srli_epi16(q3bits, 2), m3);
  5168. const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits, 4), m3);
  5169. const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits, 6), m3);
  5170. // load Q8 quants
  5171. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5172. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5173. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm_maddubs_epi16,
  5174. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  5175. // and 2 if the high bit was set)
  5176. const __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, _mm256_extractf128_si256(q8_0, 0));
  5177. const __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, _mm256_extractf128_si256(q8_0, 1));
  5178. const __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, _mm256_extractf128_si256(q8_1, 0));
  5179. const __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, _mm256_extractf128_si256(q8_1, 1));
  5180. __m128i p16_0 = _mm_maddubs_epi16(q3l_0, _mm256_extractf128_si256(q8_0, 0));
  5181. __m128i p16_1 = _mm_maddubs_epi16(q3l_1, _mm256_extractf128_si256(q8_0, 1));
  5182. __m128i p16_2 = _mm_maddubs_epi16(q3l_2, _mm256_extractf128_si256(q8_1, 0));
  5183. __m128i p16_3 = _mm_maddubs_epi16(q3l_3, _mm256_extractf128_si256(q8_1, 1));
  5184. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  5185. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  5186. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  5187. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  5188. // multiply with scales
  5189. p16_0 = _mm_madd_epi16(scale_0, p16_0);
  5190. p16_1 = _mm_madd_epi16(scale_1, p16_1);
  5191. p16_2 = _mm_madd_epi16(scale_2, p16_2);
  5192. p16_3 = _mm_madd_epi16(scale_3, p16_3);
  5193. p16_0 = _mm_add_epi32(p16_0, p16_2);
  5194. p16_1 = _mm_add_epi32(p16_1, p16_3);
  5195. __m256i p16 = MM256_SET_M128I(p16_1, p16_0);
  5196. // multiply with block scale and accumulate
  5197. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16)), acc);
  5198. }
  5199. *s = hsum_float_8(acc);
  5200. #elif defined __riscv_v_intrinsic
  5201. uint16_t aux16[2];
  5202. int8_t * scales = (int8_t *)aux16;
  5203. float sumf = 0;
  5204. for (int i = 0; i < nb; ++i) {
  5205. const uint8_t * restrict q3 = x[i].qs;
  5206. const int8_t * restrict q8 = y[i].qs;
  5207. const uint16_t a = *(const uint16_t *)x[i].scales;
  5208. aux16[0] = a & 0x0f0f;
  5209. aux16[1] = (a >> 4) & 0x0f0f;
  5210. for (int j = 0; j < 4; ++j) scales[j] -= 8;
  5211. int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
  5212. const float d = y[i].d * (float)x[i].d;
  5213. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  5214. // load qh
  5215. vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(x[i].hmask, 8);
  5216. vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
  5217. size_t vl = 16;
  5218. // extend and combine both qh_x1 and qh_x2
  5219. vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
  5220. vuint8mf2_t qh_0 = __riscv_vand_vx_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
  5221. vuint8mf2_t qh_1 = __riscv_vand_vx_u8mf2(qh_x, 0x4, vl);
  5222. vuint8mf2_t qh_2 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
  5223. vuint8mf2_t qh_3 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), 0x4, vl);
  5224. // load Q3
  5225. vuint8mf2_t q3_x = __riscv_vle8_v_u8mf2(q3, vl);
  5226. vuint8mf2_t q3h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q3_x, 0x3, vl), qh_0, vl);
  5227. vuint8mf2_t q3h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 2, vl), 0x3, vl), qh_1, vl);
  5228. vuint8mf2_t q3h_2 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 4, vl), 0x3, vl), qh_2, vl);
  5229. vuint8mf2_t q3h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 0x6, vl), qh_3, vl);
  5230. vint8mf2_t q3_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_0);
  5231. vint8mf2_t q3_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_1);
  5232. vint8mf2_t q3_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_2);
  5233. vint8mf2_t q3_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_3);
  5234. // load Q8 and take product with Q3
  5235. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q3_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  5236. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q3_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  5237. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q3_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  5238. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q3_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  5239. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  5240. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  5241. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  5242. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  5243. isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scales[0];
  5244. isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scales[2];
  5245. isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scales[1];
  5246. isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scales[3];
  5247. sumf += d * isum;
  5248. }
  5249. *s = sumf;
  5250. #else
  5251. int8_t aux8[QK_K];
  5252. int16_t aux16[8];
  5253. float sums [8];
  5254. int32_t aux32[8];
  5255. int32_t scales[4];
  5256. memset(sums, 0, 8*sizeof(float));
  5257. float sumf = 0;
  5258. for (int i = 0; i < nb; ++i) {
  5259. const uint8_t * restrict q3 = x[i].qs;
  5260. const uint8_t * restrict hm = x[i].hmask;
  5261. const int8_t * restrict q8 = y[i].qs;
  5262. int8_t * restrict a = aux8;
  5263. for (int l = 0; l < 8; ++l) {
  5264. a[l+ 0] = (int8_t)((q3[l+0] >> 0) & 3) - (hm[l] & 0x01 ? 0 : 4);
  5265. a[l+ 8] = (int8_t)((q3[l+8] >> 0) & 3) - (hm[l] & 0x02 ? 0 : 4);
  5266. a[l+16] = (int8_t)((q3[l+0] >> 2) & 3) - (hm[l] & 0x04 ? 0 : 4);
  5267. a[l+24] = (int8_t)((q3[l+8] >> 2) & 3) - (hm[l] & 0x08 ? 0 : 4);
  5268. a[l+32] = (int8_t)((q3[l+0] >> 4) & 3) - (hm[l] & 0x10 ? 0 : 4);
  5269. a[l+40] = (int8_t)((q3[l+8] >> 4) & 3) - (hm[l] & 0x20 ? 0 : 4);
  5270. a[l+48] = (int8_t)((q3[l+0] >> 6) & 3) - (hm[l] & 0x40 ? 0 : 4);
  5271. a[l+56] = (int8_t)((q3[l+8] >> 6) & 3) - (hm[l] & 0x80 ? 0 : 4);
  5272. }
  5273. scales[0] = (x[i].scales[0] & 0xF) - 8;
  5274. scales[1] = (x[i].scales[0] >> 4) - 8;
  5275. scales[2] = (x[i].scales[1] & 0xF) - 8;
  5276. scales[3] = (x[i].scales[1] >> 4) - 8;
  5277. memset(aux32, 0, 8*sizeof(int32_t));
  5278. for (int j = 0; j < QK_K/16; ++j) {
  5279. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5280. q8 += 8; a += 8;
  5281. for (int l = 0; l < 8; ++l) aux16[l] += q8[l] * a[l];
  5282. q8 += 8; a += 8;
  5283. for (int l = 0; l < 8; ++l) aux32[l] += scales[j] * aux16[l];
  5284. }
  5285. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5286. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  5287. }
  5288. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5289. *s = sumf;
  5290. #endif
  5291. }
  5292. #endif
  5293. #if QK_K == 256
  5294. void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  5295. assert(n % QK_K == 0);
  5296. assert(nrc == 1);
  5297. UNUSED(nrc);
  5298. UNUSED(bx);
  5299. UNUSED(by);
  5300. UNUSED(bs);
  5301. const block_q4_K * restrict x = vx;
  5302. const block_q8_K * restrict y = vy;
  5303. const int nb = n / QK_K;
  5304. static const uint32_t kmask1 = 0x3f3f3f3f;
  5305. static const uint32_t kmask2 = 0x0f0f0f0f;
  5306. static const uint32_t kmask3 = 0x03030303;
  5307. uint32_t utmp[4];
  5308. #ifdef __ARM_NEON
  5309. const uint8x16_t m4b = vdupq_n_u8(0xf);
  5310. const int32x4_t mzero = vdupq_n_s32(0);
  5311. ggml_int8x16x2_t q4bytes;
  5312. ggml_int8x16x2_t q8bytes;
  5313. float sumf = 0;
  5314. for (int i = 0; i < nb; ++i) {
  5315. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5316. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5317. const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
  5318. memcpy(utmp, x[i].scales, 12);
  5319. uint32x2_t mins8 = { 0 };
  5320. mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0);
  5321. mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1);
  5322. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5323. utmp[0] &= kmask1;
  5324. const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8)));
  5325. const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
  5326. vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
  5327. sumf -= dmin * vaddvq_s32(prod);
  5328. const uint8_t * scales = (const uint8_t *)utmp;
  5329. const uint8_t * restrict q4 = x[i].qs;
  5330. const int8_t * restrict q8 = y[i].qs;
  5331. int32_t sumi1 = 0;
  5332. int32_t sumi2 = 0;
  5333. for (int j = 0; j < QK_K/64; ++j) {
  5334. const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32;
  5335. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  5336. q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
  5337. q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
  5338. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  5339. sumi1 += vaddvq_s32(p1) * scales[2*j+0];
  5340. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  5341. q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
  5342. q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
  5343. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  5344. sumi2 += vaddvq_s32(p2) * scales[2*j+1];
  5345. }
  5346. sumf += d * (sumi1 + sumi2);
  5347. }
  5348. *s = sumf;
  5349. #elif defined __AVX2__
  5350. const __m256i m4 = _mm256_set1_epi8(0xF);
  5351. __m256 acc = _mm256_setzero_ps();
  5352. __m128 acc_m = _mm_setzero_ps();
  5353. for (int i = 0; i < nb; ++i) {
  5354. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5355. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5356. memcpy(utmp, x[i].scales, 12);
  5357. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5358. const uint32_t uaux = utmp[1] & kmask1;
  5359. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5360. utmp[2] = uaux;
  5361. utmp[0] &= kmask1;
  5362. const uint8_t * restrict q4 = x[i].qs;
  5363. const int8_t * restrict q8 = y[i].qs;
  5364. const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
  5365. const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
  5366. const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
  5367. const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
  5368. acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m);
  5369. const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
  5370. const __m256i scales = MM256_SET_M128I(sc128, sc128);
  5371. __m256i sumi = _mm256_setzero_si256();
  5372. for (int j = 0; j < QK_K/64; ++j) {
  5373. const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
  5374. const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
  5375. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  5376. const __m256i q4l = _mm256_and_si256(q4bits, m4);
  5377. const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
  5378. const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5379. __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
  5380. p16l = _mm256_madd_epi16(scale_l, p16l);
  5381. const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5382. __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
  5383. p16h = _mm256_madd_epi16(scale_h, p16h);
  5384. const __m256i sumj = _mm256_add_epi32(p16l, p16h);
  5385. sumi = _mm256_add_epi32(sumi, sumj);
  5386. }
  5387. __m256 vd = _mm256_set1_ps(d);
  5388. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
  5389. }
  5390. acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
  5391. acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
  5392. *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
  5393. #elif defined __AVX__
  5394. const __m128i m4 = _mm_set1_epi8(0xF);
  5395. const __m128i m2 = _mm_set1_epi8(0x2);
  5396. __m256 acc = _mm256_setzero_ps();
  5397. __m128 acc_m = _mm_setzero_ps();
  5398. for (int i = 0; i < nb; ++i) {
  5399. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5400. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5401. const uint8_t * restrict q4 = x[i].qs;
  5402. const int8_t * restrict q8 = y[i].qs;
  5403. memcpy(utmp, x[i].scales, 12);
  5404. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5405. const uint32_t uaux = utmp[1] & kmask1;
  5406. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5407. utmp[2] = uaux;
  5408. utmp[0] &= kmask1;
  5409. const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
  5410. const __m128i scales = _mm_cvtepu8_epi16(utmps);
  5411. const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
  5412. const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
  5413. const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
  5414. const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
  5415. const __m128i prod = _mm_madd_epi16(mins, q8s);
  5416. acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m);
  5417. __m128i sumi_0 = _mm_setzero_si128();
  5418. __m128i sumi_1 = _mm_setzero_si128();
  5419. __m128i shuffle = _mm_set1_epi16(0x0100);
  5420. for (int j = 0; j < QK_K/64; ++j) {
  5421. const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle);
  5422. shuffle = _mm_add_epi16(shuffle, m2);
  5423. const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle);
  5424. shuffle = _mm_add_epi16(shuffle, m2);
  5425. __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  5426. const __m128i q4l_0 = _mm_and_si128(q4bits, m4);
  5427. const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
  5428. q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  5429. const __m128i q4l_1 = _mm_and_si128(q4bits, m4);
  5430. const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
  5431. const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5432. __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0);
  5433. p16l = _mm_madd_epi16(scale_l, p16l);
  5434. sumi_0 = _mm_add_epi32(sumi_0, p16l);
  5435. const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5436. p16l = _mm_maddubs_epi16(q4l_1, q8l_1);
  5437. p16l = _mm_madd_epi16(scale_l, p16l);
  5438. sumi_1 = _mm_add_epi32(sumi_1, p16l);
  5439. const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5440. __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0);
  5441. p16h = _mm_madd_epi16(scale_h, p16h);
  5442. sumi_0 = _mm_add_epi32(sumi_0, p16h);
  5443. const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5444. p16h = _mm_maddubs_epi16(q4h_1, q8h_1);
  5445. p16h = _mm_madd_epi16(scale_h, p16h);
  5446. sumi_1 = _mm_add_epi32(sumi_1, p16h);
  5447. }
  5448. __m256 vd = _mm256_set1_ps(d);
  5449. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  5450. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
  5451. }
  5452. acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
  5453. acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
  5454. *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
  5455. #elif defined __riscv_v_intrinsic
  5456. const uint8_t * scales = (const uint8_t*)&utmp[0];
  5457. const uint8_t * mins = (const uint8_t*)&utmp[2];
  5458. float sumf = 0;
  5459. for (int i = 0; i < nb; ++i) {
  5460. size_t vl = 8;
  5461. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5462. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5463. vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
  5464. vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
  5465. vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
  5466. memcpy(utmp, x[i].scales, 12);
  5467. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5468. const uint32_t uaux = utmp[1] & kmask1;
  5469. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5470. utmp[2] = uaux;
  5471. utmp[0] &= kmask1;
  5472. vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
  5473. vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
  5474. vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
  5475. vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  5476. sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
  5477. const uint8_t * restrict q4 = x[i].qs;
  5478. const int8_t * restrict q8 = y[i].qs;
  5479. vl = 32;
  5480. int32_t sum_1 = 0;
  5481. int32_t sum_2 = 0;
  5482. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  5483. for (int j = 0; j < QK_K/64; ++j) {
  5484. // load Q4
  5485. vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
  5486. // load Q8 and multiply it with lower Q4 nibble
  5487. vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
  5488. vint8m1_t q4_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
  5489. vint16m2_t qv_0 = __riscv_vwmul_vv_i16m2(q4_0, q8_0, vl);
  5490. vint16m1_t vs_0 = __riscv_vredsum_vs_i16m2_i16m1(qv_0, vzero, vl);
  5491. sum_1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[2*j+0];
  5492. // load Q8 and multiply it with upper Q4 nibble
  5493. vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
  5494. vint8m1_t q4_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
  5495. vint16m2_t qv_1 = __riscv_vwmul_vv_i16m2(q4_1, q8_1, vl);
  5496. vint16m1_t vs_1 = __riscv_vredsum_vs_i16m2_i16m1(qv_1, vzero, vl);
  5497. sum_2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[2*j+1];
  5498. q4 += 32; q8 += 64;
  5499. }
  5500. sumf += d*(sum_1 + sum_2);
  5501. }
  5502. *s = sumf;
  5503. #else
  5504. const uint8_t * scales = (const uint8_t*)&utmp[0];
  5505. const uint8_t * mins = (const uint8_t*)&utmp[2];
  5506. int8_t aux8[QK_K];
  5507. int16_t aux16[8];
  5508. float sums [8];
  5509. int32_t aux32[8];
  5510. memset(sums, 0, 8*sizeof(float));
  5511. float sumf = 0;
  5512. for (int i = 0; i < nb; ++i) {
  5513. const uint8_t * restrict q4 = x[i].qs;
  5514. const int8_t * restrict q8 = y[i].qs;
  5515. memset(aux32, 0, 8*sizeof(int32_t));
  5516. int8_t * restrict a = aux8;
  5517. for (int j = 0; j < QK_K/64; ++j) {
  5518. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
  5519. a += 32;
  5520. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
  5521. a += 32; q4 += 32;
  5522. }
  5523. memcpy(utmp, x[i].scales, 12);
  5524. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5525. const uint32_t uaux = utmp[1] & kmask1;
  5526. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5527. utmp[2] = uaux;
  5528. utmp[0] &= kmask1;
  5529. int sumi = 0;
  5530. for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
  5531. a = aux8;
  5532. int is = 0;
  5533. for (int j = 0; j < QK_K/32; ++j) {
  5534. int32_t scale = scales[is++];
  5535. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5536. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5537. q8 += 8; a += 8;
  5538. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5539. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5540. q8 += 8; a += 8;
  5541. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5542. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5543. q8 += 8; a += 8;
  5544. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5545. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5546. q8 += 8; a += 8;
  5547. }
  5548. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5549. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  5550. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  5551. sumf -= dmin * sumi;
  5552. }
  5553. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5554. *s = sumf;
  5555. #endif
  5556. }
  5557. #else
  5558. void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  5559. assert(n % QK_K == 0);
  5560. assert(nrc == 1);
  5561. UNUSED(nrc);
  5562. UNUSED(bx);
  5563. UNUSED(by);
  5564. UNUSED(bs);
  5565. const block_q4_K * restrict x = vx;
  5566. const block_q8_K * restrict y = vy;
  5567. const int nb = n / QK_K;
  5568. #ifdef __ARM_NEON
  5569. const uint8x16_t m4b = vdupq_n_u8(0xf);
  5570. const int32x4_t mzero = vdupq_n_s32(0);
  5571. float sumf = 0;
  5572. ggml_int8x16x2_t q4bytes;
  5573. ggml_int8x16x4_t q8bytes;
  5574. float sum_mins = 0.f;
  5575. uint16_t aux16[2];
  5576. const uint8_t * restrict scales = (const uint8_t *)aux16;
  5577. for (int i = 0; i < nb; ++i) {
  5578. const uint8_t * restrict q4 = x[i].qs;
  5579. const int8_t * restrict q8 = y[i].qs;
  5580. const uint16_t * restrict a = (const uint16_t *)x[i].scales;
  5581. aux16[0] = a[0] & 0x0f0f;
  5582. aux16[1] = (a[0] >> 4) & 0x0f0f;
  5583. const int32_t summi = scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]);
  5584. sum_mins += y[i].d * (float)x[i].d[1] * summi;
  5585. const float d = y[i].d * (float)x[i].d[0];
  5586. const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4);
  5587. q8bytes = ggml_vld1q_s8_x4(q8);
  5588. q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
  5589. q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
  5590. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  5591. const int32_t sumi1 = vaddvq_s32(p1) * scales[0];
  5592. q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
  5593. q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
  5594. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[2]), q4bytes.val[1], q8bytes.val[3]);
  5595. const int32_t sumi2 = vaddvq_s32(p2) * scales[1];
  5596. sumf += d * (sumi1 + sumi2);
  5597. }
  5598. *s = sumf - sum_mins;
  5599. #elif defined __AVX2__
  5600. const __m256i m4 = _mm256_set1_epi8(0xF);
  5601. __m256 acc = _mm256_setzero_ps();
  5602. float summs = 0;
  5603. uint16_t aux16[2];
  5604. const uint8_t * scales = (const uint8_t *)aux16;
  5605. for (int i = 0; i < nb; ++i) {
  5606. const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
  5607. const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
  5608. const __m256 vd = _mm256_set1_ps(d);
  5609. const uint16_t * a = (const uint16_t *)x[i].scales;
  5610. aux16[0] = a[0] & 0x0f0f;
  5611. aux16[1] = (a[0] >> 4) & 0x0f0f;
  5612. summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  5613. const uint8_t * restrict q4 = x[i].qs;
  5614. const int8_t * restrict q8 = y[i].qs;
  5615. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
  5616. const __m256i q4l = _mm256_and_si256(q4bits, m4);
  5617. const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
  5618. const __m256i q8l = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5619. const __m256i q8h = _mm256_loadu_si256((const __m256i*)(q8+32));
  5620. const __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
  5621. const __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
  5622. const __m256i p32l = _mm256_madd_epi16(_mm256_set1_epi16(scales[0]), p16l);
  5623. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32l), acc);
  5624. const __m256i p32h = _mm256_madd_epi16(_mm256_set1_epi16(scales[1]), p16h);
  5625. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32h), acc);
  5626. }
  5627. *s = hsum_float_8(acc) - summs;
  5628. #elif defined __AVX__
  5629. const __m128i m4 = _mm_set1_epi8(0xF);
  5630. __m256 acc = _mm256_setzero_ps();
  5631. float summs = 0;
  5632. uint16_t aux16[2];
  5633. const uint8_t * scales = (const uint8_t *)aux16;
  5634. for (int i = 0; i < nb; ++i) {
  5635. const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
  5636. const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
  5637. const __m256 vd = _mm256_set1_ps(d);
  5638. const uint16_t * a = (const uint16_t *)x[i].scales;
  5639. aux16[0] = a[0] & 0x0f0f;
  5640. aux16[1] = (a[0] >> 4) & 0x0f0f;
  5641. summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  5642. const uint8_t * restrict q4 = x[i].qs;
  5643. const int8_t * restrict q8 = y[i].qs;
  5644. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
  5645. const __m128i q4bits_0 = _mm256_extractf128_si256(q4bits, 0);
  5646. const __m128i q4bits_1 = _mm256_extractf128_si256(q4bits, 1);
  5647. const __m128i q4_0 = _mm_and_si128(q4bits_0, m4);
  5648. const __m128i q4_1 = _mm_and_si128(q4bits_1, m4);
  5649. const __m128i q4_2 = _mm_and_si128(_mm_srli_epi16(q4bits_0, 4), m4);
  5650. const __m128i q4_3 = _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4);
  5651. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5652. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5653. const __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
  5654. const __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
  5655. const __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
  5656. const __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
  5657. const __m128i p32_0 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_0);
  5658. const __m128i p32_1 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_1);
  5659. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_1, p32_0))), acc);
  5660. const __m128i p32_2 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_2);
  5661. const __m128i p32_3 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_3);
  5662. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_3, p32_2))), acc);
  5663. }
  5664. *s = hsum_float_8(acc) - summs;
  5665. #elif defined __riscv_v_intrinsic
  5666. uint16_t s16[2];
  5667. const uint8_t * restrict scales = (const uint8_t *)s16;
  5668. float sumf = 0;
  5669. for (int i = 0; i < nb; ++i) {
  5670. const uint8_t * restrict q4 = x[i].qs;
  5671. const int8_t * restrict q8 = y[i].qs;
  5672. const uint16_t * restrict b = (const uint16_t *)x[i].scales;
  5673. s16[0] = b[0] & 0x0f0f;
  5674. s16[1] = (b[0] >> 4) & 0x0f0f;
  5675. sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  5676. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
  5677. size_t vl = 32;
  5678. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  5679. // load Q4
  5680. vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
  5681. // load Q8 and multiply it with lower Q4 nibble
  5682. vint8m1_t q4_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
  5683. vint16m2_t va_0 = __riscv_vwmul_vv_i16m2(q4_a, __riscv_vle8_v_i8m1(q8, vl), vl);
  5684. vint16m1_t aux1 = __riscv_vredsum_vs_i16m2_i16m1(va_0, vzero, vl);
  5685. sumf += d*scales[0]*__riscv_vmv_x_s_i16m1_i16(aux1);
  5686. // load Q8 and multiply it with upper Q4 nibble
  5687. vint8m1_t q4_s = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
  5688. vint16m2_t va_1 = __riscv_vwmul_vv_i16m2(q4_s, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  5689. vint16m1_t aux2 = __riscv_vredsum_vs_i16m2_i16m1(va_1, vzero, vl);
  5690. sumf += d*scales[1]*__riscv_vmv_x_s_i16m1_i16(aux2);
  5691. }
  5692. *s = sumf;
  5693. #else
  5694. uint8_t aux8[QK_K];
  5695. int16_t aux16[16];
  5696. float sums [8];
  5697. memset(sums, 0, 8*sizeof(float));
  5698. uint16_t s16[2];
  5699. const uint8_t * restrict scales = (const uint8_t *)s16;
  5700. float sumf = 0;
  5701. for (int i = 0; i < nb; ++i) {
  5702. const uint8_t * restrict q4 = x[i].qs;
  5703. const int8_t * restrict q8 = y[i].qs;
  5704. uint8_t * restrict a = aux8;
  5705. for (int l = 0; l < 32; ++l) a[l+ 0] = q4[l] & 0xF;
  5706. for (int l = 0; l < 32; ++l) a[l+32] = q4[l] >> 4;
  5707. const uint16_t * restrict b = (const uint16_t *)x[i].scales;
  5708. s16[0] = b[0] & 0x0f0f;
  5709. s16[1] = (b[0] >> 4) & 0x0f0f;
  5710. sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  5711. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
  5712. for (int j = 0; j < QK_K/32; ++j) {
  5713. for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
  5714. q8 += 16; a += 16;
  5715. for (int l = 0; l < 16; ++l) aux16[l] += q8[l] * a[l];
  5716. q8 += 16; a += 16;
  5717. const float dl = d * scales[j];
  5718. for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[l+8]);
  5719. }
  5720. }
  5721. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5722. *s = sumf;
  5723. #endif
  5724. }
  5725. #endif
  5726. #if QK_K == 256
  5727. void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  5728. assert(n % QK_K == 0);
  5729. assert(nrc == 1);
  5730. UNUSED(nrc);
  5731. UNUSED(bx);
  5732. UNUSED(by);
  5733. UNUSED(bs);
  5734. const block_q5_K * restrict x = vx;
  5735. const block_q8_K * restrict y = vy;
  5736. const int nb = n / QK_K;
  5737. static const uint32_t kmask1 = 0x3f3f3f3f;
  5738. static const uint32_t kmask2 = 0x0f0f0f0f;
  5739. static const uint32_t kmask3 = 0x03030303;
  5740. uint32_t utmp[4];
  5741. #ifdef __ARM_NEON
  5742. const uint8x16_t m4b = vdupq_n_u8(0xf);
  5743. const uint8x16_t mone = vdupq_n_u8(1);
  5744. const uint8x16_t mtwo = vdupq_n_u8(2);
  5745. const int32x4_t mzero = vdupq_n_s32(0);
  5746. ggml_int8x16x4_t q5bytes;
  5747. float sumf = 0;
  5748. for (int i = 0; i < nb; ++i) {
  5749. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5750. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5751. const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
  5752. memcpy(utmp, x[i].scales, 12);
  5753. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5754. const uint32_t uaux = utmp[1] & kmask1;
  5755. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5756. utmp[2] = uaux;
  5757. utmp[0] &= kmask1;
  5758. const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8);
  5759. const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8));
  5760. const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
  5761. vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
  5762. int32_t sumi_mins = vaddvq_s32(prod);
  5763. const uint8_t * scales = (const uint8_t *)utmp;
  5764. const uint8_t * restrict q5 = x[i].qs;
  5765. const uint8_t * restrict qh = x[i].qh;
  5766. const int8_t * restrict q8 = y[i].qs;
  5767. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
  5768. ggml_uint8x16x4_t q5h;
  5769. int32_t sumi = 0;
  5770. for (int j = 0; j < QK_K/64; ++j) {
  5771. const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32;
  5772. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  5773. q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
  5774. q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
  5775. q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3);
  5776. q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3);
  5777. qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2);
  5778. qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2);
  5779. q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0]));
  5780. q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1]));
  5781. q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2]));
  5782. q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3]));
  5783. sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++;
  5784. sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++;
  5785. }
  5786. sumf += d * sumi - dmin * sumi_mins;
  5787. }
  5788. *s = sumf;
  5789. #elif defined __AVX2__
  5790. const __m256i m4 = _mm256_set1_epi8(0xF);
  5791. const __m128i mzero = _mm_setzero_si128();
  5792. const __m256i mone = _mm256_set1_epi8(1);
  5793. __m256 acc = _mm256_setzero_ps();
  5794. float summs = 0.f;
  5795. for (int i = 0; i < nb; ++i) {
  5796. const uint8_t * restrict q5 = x[i].qs;
  5797. const int8_t * restrict q8 = y[i].qs;
  5798. #if QK_K == 256
  5799. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5800. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5801. memcpy(utmp, x[i].scales, 12);
  5802. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5803. const uint32_t uaux = utmp[1] & kmask1;
  5804. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5805. utmp[2] = uaux;
  5806. utmp[0] &= kmask1;
  5807. #else
  5808. // TODO
  5809. const float d = 0, dmin = 0;
  5810. #endif
  5811. const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
  5812. const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
  5813. const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
  5814. const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
  5815. const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
  5816. summs += dmin * _mm_extract_epi32(hsum, 0);
  5817. const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
  5818. const __m256i scales = MM256_SET_M128I(sc128, sc128);
  5819. const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh);
  5820. __m256i hmask = mone;
  5821. __m256i sumi = _mm256_setzero_si256();
  5822. int bit = 0;
  5823. for (int j = 0; j < QK_K/64; ++j) {
  5824. const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
  5825. const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
  5826. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32;
  5827. const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
  5828. const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
  5829. const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0);
  5830. hmask = _mm256_slli_epi16(hmask, 1);
  5831. const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
  5832. const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
  5833. const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1);
  5834. hmask = _mm256_slli_epi16(hmask, 1);
  5835. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5836. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5837. __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0);
  5838. __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1);
  5839. p16_0 = _mm256_madd_epi16(scale_0, p16_0);
  5840. p16_1 = _mm256_madd_epi16(scale_1, p16_1);
  5841. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  5842. }
  5843. __m256 vd = _mm256_set1_ps(d);
  5844. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
  5845. }
  5846. *s = hsum_float_8(acc) + summs;
  5847. #elif defined __AVX__
  5848. const __m128i m4 = _mm_set1_epi8(0xF);
  5849. const __m128i mzero = _mm_setzero_si128();
  5850. const __m128i mone = _mm_set1_epi8(1);
  5851. const __m128i m2 = _mm_set1_epi8(2);
  5852. __m256 acc = _mm256_setzero_ps();
  5853. float summs = 0.f;
  5854. for (int i = 0; i < nb; ++i) {
  5855. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5856. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5857. const uint8_t * restrict q5 = x[i].qs;
  5858. const int8_t * restrict q8 = y[i].qs;
  5859. memcpy(utmp, x[i].scales, 12);
  5860. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5861. const uint32_t uaux = utmp[1] & kmask1;
  5862. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5863. utmp[2] = uaux;
  5864. utmp[0] &= kmask1;
  5865. const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
  5866. const __m128i scales = _mm_cvtepu8_epi16(utmps);
  5867. const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
  5868. const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
  5869. const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
  5870. const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
  5871. const __m128i prod = _mm_madd_epi16(mins, q8s);
  5872. const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
  5873. summs += dmin * _mm_extract_epi32(hsum, 0);
  5874. const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]);
  5875. const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]);
  5876. __m128i hmask = mone;
  5877. __m128i sumi_0 = _mm_setzero_si128();
  5878. __m128i sumi_1 = _mm_setzero_si128();
  5879. int bit = 0;
  5880. __m128i shuffle = _mm_set1_epi16(0x0100);
  5881. for (int j = 0; j < QK_K/64; ++j) {
  5882. const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
  5883. shuffle = _mm_add_epi16(shuffle, m2);
  5884. const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
  5885. shuffle = _mm_add_epi16(shuffle, m2);
  5886. const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
  5887. const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
  5888. __m128i q5l_0 = _mm_and_si128(q5bits_0, m4);
  5889. __m128i q5l_1 = _mm_and_si128(q5bits_1, m4);
  5890. __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
  5891. __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
  5892. __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0);
  5893. __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1);
  5894. hmask = _mm_slli_epi16(hmask, 1);
  5895. __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5896. __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5897. __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0);
  5898. __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1);
  5899. p16_0 = _mm_madd_epi16(scale_0, p16_0);
  5900. p16_1 = _mm_madd_epi16(scale_0, p16_1);
  5901. q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4);
  5902. q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4);
  5903. q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
  5904. q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
  5905. q5_0 = _mm_add_epi8(q5l_0, q5h_0);
  5906. q5_1 = _mm_add_epi8(q5l_1, q5h_1);
  5907. hmask = _mm_slli_epi16(hmask, 1);
  5908. q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5909. q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5910. __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0);
  5911. __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1);
  5912. p16_2 = _mm_madd_epi16(scale_1, p16_2);
  5913. p16_3 = _mm_madd_epi16(scale_1, p16_3);
  5914. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  5915. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  5916. }
  5917. __m256 vd = _mm256_set1_ps(d);
  5918. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  5919. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
  5920. }
  5921. *s = hsum_float_8(acc) + summs;
  5922. #elif defined __riscv_v_intrinsic
  5923. const uint8_t * scales = (const uint8_t*)&utmp[0];
  5924. const uint8_t * mins = (const uint8_t*)&utmp[2];
  5925. float sumf = 0;
  5926. float sums = 0.0;
  5927. size_t vl;
  5928. for (int i = 0; i < nb; ++i) {
  5929. vl = 8;
  5930. const uint8_t * restrict q5 = x[i].qs;
  5931. const uint8_t * restrict hm = x[i].qh;
  5932. const int8_t * restrict q8 = y[i].qs;
  5933. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5934. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  5935. vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
  5936. vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
  5937. vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
  5938. memcpy(utmp, x[i].scales, 12);
  5939. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5940. const uint32_t uaux = utmp[1] & kmask1;
  5941. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5942. utmp[2] = uaux;
  5943. utmp[0] &= kmask1;
  5944. vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
  5945. vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
  5946. vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
  5947. vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  5948. sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
  5949. vl = 32;
  5950. int32_t aux32 = 0;
  5951. int is = 0;
  5952. uint8_t m = 1;
  5953. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  5954. vuint8m1_t vqh = __riscv_vle8_v_u8m1(hm, vl);
  5955. for (int j = 0; j < QK_K/64; ++j) {
  5956. // load Q5 and Q8
  5957. vuint8m1_t q5_x = __riscv_vle8_v_u8m1(q5, vl);
  5958. vint8m1_t q8_y1 = __riscv_vle8_v_i8m1(q8, vl);
  5959. vint8m1_t q8_y2 = __riscv_vle8_v_i8m1(q8+32, vl);
  5960. // compute mask for addition
  5961. vint8m1_t q5_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q5_x, 0x0F, vl));
  5962. vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
  5963. vbool8_t vmask_1 = __riscv_vmsne_vx_u8m1_b8(qh_m1, 0, vl);
  5964. vint8m1_t q5_m1 = __riscv_vadd_vx_i8m1_m(vmask_1, q5_a, 16, vl);
  5965. m <<= 1;
  5966. vint8m1_t q5_l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q5_x, 0x04, vl));
  5967. vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
  5968. vbool8_t vmask_2 = __riscv_vmsne_vx_u8m1_b8(qh_m2, 0, vl);
  5969. vint8m1_t q5_m2 = __riscv_vadd_vx_i8m1_m(vmask_2, q5_l, 16, vl);
  5970. m <<= 1;
  5971. vint16m2_t v0 = __riscv_vwmul_vv_i16m2(q5_m1, q8_y1, vl);
  5972. vint16m2_t v1 = __riscv_vwmul_vv_i16m2(q5_m2, q8_y2, vl);
  5973. vint32m4_t vs1 = __riscv_vwmul_vx_i32m4(v0, scales[is++], vl);
  5974. vint32m4_t vs2 = __riscv_vwmul_vx_i32m4(v1, scales[is++], vl);
  5975. vint32m1_t vacc1 = __riscv_vredsum_vs_i32m4_i32m1(vs1, vzero, vl);
  5976. vint32m1_t vacc2 = __riscv_vredsum_vs_i32m4_i32m1(vs2, vzero, vl);
  5977. aux32 += __riscv_vmv_x_s_i32m1_i32(vacc1) + __riscv_vmv_x_s_i32m1_i32(vacc2);
  5978. q5 += 32; q8 += 64;
  5979. }
  5980. vfloat32m1_t vaux = __riscv_vfmul_vf_f32m1(__riscv_vfmv_v_f_f32m1(aux32, 1), d, 1);
  5981. sums += __riscv_vfmv_f_s_f32m1_f32(vaux);
  5982. }
  5983. *s = sumf+sums;
  5984. #else
  5985. const uint8_t * scales = (const uint8_t*)&utmp[0];
  5986. const uint8_t * mins = (const uint8_t*)&utmp[2];
  5987. int8_t aux8[QK_K];
  5988. int16_t aux16[8];
  5989. float sums [8];
  5990. int32_t aux32[8];
  5991. memset(sums, 0, 8*sizeof(float));
  5992. float sumf = 0;
  5993. for (int i = 0; i < nb; ++i) {
  5994. const uint8_t * restrict q4 = x[i].qs;
  5995. const uint8_t * restrict hm = x[i].qh;
  5996. const int8_t * restrict q8 = y[i].qs;
  5997. memset(aux32, 0, 8*sizeof(int32_t));
  5998. int8_t * restrict a = aux8;
  5999. uint8_t m = 1;
  6000. for (int j = 0; j < QK_K/64; ++j) {
  6001. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
  6002. for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
  6003. a += 32; m <<= 1;
  6004. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
  6005. for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
  6006. a += 32; m <<= 1;
  6007. q4 += 32;
  6008. }
  6009. memcpy(utmp, x[i].scales, 12);
  6010. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6011. const uint32_t uaux = utmp[1] & kmask1;
  6012. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6013. utmp[2] = uaux;
  6014. utmp[0] &= kmask1;
  6015. int sumi = 0;
  6016. for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
  6017. a = aux8;
  6018. int is = 0;
  6019. for (int j = 0; j < QK_K/32; ++j) {
  6020. int32_t scale = scales[is++];
  6021. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6022. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6023. q8 += 8; a += 8;
  6024. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6025. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6026. q8 += 8; a += 8;
  6027. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6028. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6029. q8 += 8; a += 8;
  6030. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6031. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6032. q8 += 8; a += 8;
  6033. }
  6034. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6035. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  6036. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  6037. sumf -= dmin * sumi;
  6038. }
  6039. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6040. *s = sumf;
  6041. #endif
  6042. }
  6043. #else
  6044. void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6045. assert(n % QK_K == 0);
  6046. assert(nrc == 1);
  6047. UNUSED(nrc);
  6048. UNUSED(bx);
  6049. UNUSED(by);
  6050. UNUSED(bs);
  6051. const block_q5_K * restrict x = vx;
  6052. const block_q8_K * restrict y = vy;
  6053. const int nb = n / QK_K;
  6054. #ifdef __ARM_NEON
  6055. const uint8x16_t m4b = vdupq_n_u8(0xf);
  6056. const uint8x16_t mh = vdupq_n_u8(16);
  6057. const int32x4_t mzero = vdupq_n_s32(0);
  6058. ggml_int8x16x4_t q5bytes;
  6059. ggml_uint8x16x4_t q5h;
  6060. float sumf = 0;
  6061. for (int i = 0; i < nb; ++i) {
  6062. const float d = y[i].d * (float)x[i].d;
  6063. const int8_t * sc = x[i].scales;
  6064. const uint8_t * restrict q5 = x[i].qs;
  6065. const uint8_t * restrict qh = x[i].qh;
  6066. const int8_t * restrict q8 = y[i].qs;
  6067. const uint8x8_t qhbits = vld1_u8(qh);
  6068. const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5);
  6069. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  6070. const uint8x16_t htmp = vcombine_u8(qhbits, vshr_n_u8(qhbits, 1));
  6071. q5h.val[0] = vbicq_u8(mh, vshlq_n_u8(htmp, 4));
  6072. q5h.val[1] = vbicq_u8(mh, vshlq_n_u8(htmp, 2));
  6073. q5h.val[2] = vbicq_u8(mh, htmp);
  6074. q5h.val[3] = vbicq_u8(mh, vshrq_n_u8(htmp, 2));
  6075. q5bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[0], m4b)), vreinterpretq_s8_u8(q5h.val[0]));
  6076. q5bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[1], m4b)), vreinterpretq_s8_u8(q5h.val[1]));
  6077. q5bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[0], 4)), vreinterpretq_s8_u8(q5h.val[2]));
  6078. q5bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[1], 4)), vreinterpretq_s8_u8(q5h.val[3]));
  6079. int32_t sumi1 = sc[0] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]));
  6080. int32_t sumi2 = sc[1] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[1], q8bytes.val[1]));
  6081. int32_t sumi3 = sc[2] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]));
  6082. int32_t sumi4 = sc[3] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[3], q8bytes.val[3]));
  6083. sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
  6084. }
  6085. *s = sumf;
  6086. #elif defined __AVX2__
  6087. const __m256i m4 = _mm256_set1_epi8(0xF);
  6088. const __m256i mone = _mm256_set1_epi8(1);
  6089. __m256 acc = _mm256_setzero_ps();
  6090. for (int i = 0; i < nb; ++i) {
  6091. const uint8_t * restrict q5 = x[i].qs;
  6092. const int8_t * restrict q8 = y[i].qs;
  6093. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6094. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
  6095. const __m256i scale_l = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[1]), _mm_set1_epi16(x[i].scales[0]));
  6096. const __m256i scale_h = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[3]), _mm_set1_epi16(x[i].scales[2]));
  6097. int64_t aux64;
  6098. memcpy(&aux64, x[i].qh, 8);
  6099. const __m128i haux128 = _mm_set_epi64x(aux64 >> 1, aux64);
  6100. const __m256i haux256 = MM256_SET_M128I(_mm_srli_epi16(haux128, 2), haux128);
  6101. const __m256i q5h_0 = _mm256_slli_epi16(_mm256_andnot_si256(haux256, mone), 4);
  6102. const __m256i q5h_1 = _mm256_slli_epi16(_mm256_andnot_si256(_mm256_srli_epi16(haux256, 4), mone), 4);
  6103. const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
  6104. const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
  6105. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6106. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6107. const __m256i p16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5l_0, q8_0));
  6108. const __m256i p16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5l_1, q8_1));
  6109. const __m256i s16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5h_0, q8_0));
  6110. const __m256i s16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5h_1, q8_1));
  6111. const __m256i dot = _mm256_sub_epi32(_mm256_add_epi32(p16_0, p16_1), _mm256_add_epi32(s16_0, s16_1));
  6112. acc = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(dot), acc);
  6113. }
  6114. *s = hsum_float_8(acc);
  6115. #elif defined __AVX__
  6116. const __m128i m4 = _mm_set1_epi8(0xF);
  6117. const __m128i mone = _mm_set1_epi8(1);
  6118. __m256 acc = _mm256_setzero_ps();
  6119. for (int i = 0; i < nb; ++i) {
  6120. const uint8_t * restrict q5 = x[i].qs;
  6121. const int8_t * restrict q8 = y[i].qs;
  6122. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6123. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
  6124. const __m128i scale_0 = _mm_set1_epi16(x[i].scales[0]);
  6125. const __m128i scale_1 = _mm_set1_epi16(x[i].scales[1]);
  6126. const __m128i scale_2 = _mm_set1_epi16(x[i].scales[2]);
  6127. const __m128i scale_3 = _mm_set1_epi16(x[i].scales[3]);
  6128. int64_t aux64;
  6129. memcpy(&aux64, x[i].qh, 8);
  6130. const __m128i haux128_0 = _mm_set_epi64x(aux64 >> 1, aux64);
  6131. const __m128i haux128_1 = _mm_srli_epi16(haux128_0, 2);
  6132. const __m128i q5h_0 = _mm_slli_epi16(_mm_andnot_si128(haux128_0, mone), 4);
  6133. const __m128i q5h_1 = _mm_slli_epi16(_mm_andnot_si128(haux128_1, mone), 4);
  6134. const __m128i q5h_2 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_0, 4), mone), 4);
  6135. const __m128i q5h_3 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_1, 4), mone), 4);
  6136. const __m128i q5l_0 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 0), m4);
  6137. const __m128i q5l_1 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 1), m4);
  6138. const __m128i q5l_2 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 0), 4), m4);
  6139. const __m128i q5l_3 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 1), 4), m4);
  6140. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6141. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6142. const __m128i p16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5l_0, _mm256_extractf128_si256(q8_0, 0)));
  6143. const __m128i p16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5l_1, _mm256_extractf128_si256(q8_0, 1)));
  6144. const __m128i p16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5l_2, _mm256_extractf128_si256(q8_1, 0)));
  6145. const __m128i p16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5l_3, _mm256_extractf128_si256(q8_1, 1)));
  6146. const __m128i s16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5h_0, _mm256_extractf128_si256(q8_0, 0)));
  6147. const __m128i s16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5h_1, _mm256_extractf128_si256(q8_0, 1)));
  6148. const __m128i s16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5h_2, _mm256_extractf128_si256(q8_1, 0)));
  6149. const __m128i s16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5h_3, _mm256_extractf128_si256(q8_1, 1)));
  6150. const __m128i dot_0 = _mm_sub_epi32(_mm_add_epi32(p16_0, p16_2), _mm_add_epi32(s16_0, s16_2));
  6151. const __m128i dot_1 = _mm_sub_epi32(_mm_add_epi32(p16_1, p16_3), _mm_add_epi32(s16_1, s16_3));
  6152. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(dot_1, dot_0))), acc);
  6153. }
  6154. *s = hsum_float_8(acc);
  6155. #elif defined __riscv_v_intrinsic
  6156. float sumf = 0;
  6157. for (int i = 0; i < nb; ++i) {
  6158. const float d = y[i].d * (float)x[i].d;
  6159. const int8_t * sc = x[i].scales;
  6160. const uint8_t * restrict q5 = x[i].qs;
  6161. const uint8_t * restrict qh = x[i].qh;
  6162. const int8_t * restrict q8 = y[i].qs;
  6163. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  6164. // load qh
  6165. vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(qh, 8);
  6166. vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
  6167. size_t vl = 16;
  6168. // combine both qh_1 and qh_2
  6169. vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
  6170. vuint8mf2_t qh_h0 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
  6171. vuint8mf2_t qh_h1 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), vl), 16, vl);
  6172. vuint8mf2_t qh_h2 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(qh_x, vl), 16, vl);
  6173. vuint8mf2_t qh_h3 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
  6174. vint8mf2_t qh_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h0);
  6175. vint8mf2_t qh_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h1);
  6176. vint8mf2_t qh_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h2);
  6177. vint8mf2_t qh_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h3);
  6178. // load q5
  6179. vuint8mf2_t q5_x1 = __riscv_vle8_v_u8mf2(q5, vl);
  6180. vuint8mf2_t q5_x2 = __riscv_vle8_v_u8mf2(q5+16, vl);
  6181. vint8mf2_t q5s_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x1, 0xF, vl));
  6182. vint8mf2_t q5s_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x2, 0xF, vl));
  6183. vint8mf2_t q5s_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x1, 0x4, vl));
  6184. vint8mf2_t q5s_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x2, 0x4, vl));
  6185. vint8mf2_t q5_0 = __riscv_vsub_vv_i8mf2(q5s_0, qh_0, vl);
  6186. vint8mf2_t q5_1 = __riscv_vsub_vv_i8mf2(q5s_1, qh_1, vl);
  6187. vint8mf2_t q5_2 = __riscv_vsub_vv_i8mf2(q5s_2, qh_2, vl);
  6188. vint8mf2_t q5_3 = __riscv_vsub_vv_i8mf2(q5s_3, qh_3, vl);
  6189. // load Q8 and multiply it with Q5
  6190. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q5_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  6191. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q5_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  6192. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q5_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  6193. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q5_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  6194. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  6195. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  6196. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  6197. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  6198. int32_t sumi1 = sc[0] * __riscv_vmv_x_s_i32m1_i32(vs_0);
  6199. int32_t sumi2 = sc[1] * __riscv_vmv_x_s_i32m1_i32(vs_1);
  6200. int32_t sumi3 = sc[2] * __riscv_vmv_x_s_i32m1_i32(vs_2);
  6201. int32_t sumi4 = sc[3] * __riscv_vmv_x_s_i32m1_i32(vs_3);
  6202. sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
  6203. }
  6204. *s = sumf;
  6205. #else
  6206. int8_t aux8[QK_K];
  6207. int16_t aux16[16];
  6208. float sums [8];
  6209. memset(sums, 0, 8*sizeof(float));
  6210. float sumf = 0;
  6211. for (int i = 0; i < nb; ++i) {
  6212. const uint8_t * restrict q4 = x[i].qs;
  6213. const uint8_t * restrict hm = x[i].qh;
  6214. const int8_t * restrict q8 = y[i].qs;
  6215. int8_t * restrict a = aux8;
  6216. for (int l = 0; l < 32; ++l) {
  6217. a[l+ 0] = q4[l] & 0xF;
  6218. a[l+32] = q4[l] >> 4;
  6219. }
  6220. for (int is = 0; is < 8; ++is) {
  6221. uint8_t m = 1 << is;
  6222. for (int l = 0; l < 8; ++l) a[8*is + l] -= (hm[l] & m ? 0 : 16);
  6223. }
  6224. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6225. const int8_t * restrict sc = x[i].scales;
  6226. for (int j = 0; j < QK_K/16; ++j) {
  6227. const float dl = d * sc[j];
  6228. for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
  6229. for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[8+l]);
  6230. q8 += 16; a += 16;
  6231. }
  6232. }
  6233. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6234. *s = sumf;
  6235. #endif
  6236. }
  6237. #endif
  6238. #if QK_K == 256
  6239. void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6240. assert(n % QK_K == 0);
  6241. assert(nrc == 1);
  6242. UNUSED(nrc);
  6243. UNUSED(bx);
  6244. UNUSED(by);
  6245. UNUSED(bs);
  6246. const block_q6_K * restrict x = vx;
  6247. const block_q8_K * restrict y = vy;
  6248. const int nb = n / QK_K;
  6249. #ifdef __ARM_NEON
  6250. float sum = 0;
  6251. const uint8x16_t m4b = vdupq_n_u8(0xF);
  6252. const int32x4_t vzero = vdupq_n_s32(0);
  6253. //const int8x16_t m32s = vdupq_n_s8(32);
  6254. const uint8x16_t mone = vdupq_n_u8(3);
  6255. ggml_int8x16x4_t q6bytes;
  6256. ggml_uint8x16x4_t q6h;
  6257. for (int i = 0; i < nb; ++i) {
  6258. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  6259. const uint8_t * restrict q6 = x[i].ql;
  6260. const uint8_t * restrict qh = x[i].qh;
  6261. const int8_t * restrict q8 = y[i].qs;
  6262. const int8_t * restrict scale = x[i].scales;
  6263. const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
  6264. const int8x16_t scales = vld1q_s8(scale);
  6265. const ggml_int16x8x2_t q6scales = {{vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}};
  6266. const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])),
  6267. vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))),
  6268. vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])),
  6269. vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1]))));
  6270. int32_t isum_mins = vaddvq_s32(prod);
  6271. int32_t isum = 0;
  6272. for (int j = 0; j < QK_K/128; ++j) {
  6273. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32;
  6274. ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64;
  6275. ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  6276. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
  6277. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
  6278. uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2);
  6279. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6280. shifted = vshrq_n_u8(qhbits.val[1], 2);
  6281. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6282. //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
  6283. //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
  6284. //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s);
  6285. //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s);
  6286. q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0]));
  6287. q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1]));
  6288. q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2]));
  6289. q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3]));
  6290. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  6291. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  6292. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  6293. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  6294. scale += 4;
  6295. q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  6296. shifted = vshrq_n_u8(qhbits.val[0], 4);
  6297. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6298. shifted = vshrq_n_u8(qhbits.val[1], 4);
  6299. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6300. shifted = vshrq_n_u8(qhbits.val[0], 6);
  6301. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6302. shifted = vshrq_n_u8(qhbits.val[1], 6);
  6303. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6304. //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s);
  6305. //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s);
  6306. //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s);
  6307. //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s);
  6308. q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0]));
  6309. q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1]));
  6310. q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2]));
  6311. q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3]));
  6312. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  6313. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  6314. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  6315. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  6316. scale += 4;
  6317. }
  6318. //sum += isum * d_all * y[i].d;
  6319. sum += d_all * y[i].d * (isum - 32 * isum_mins);
  6320. }
  6321. *s = sum;
  6322. #elif defined __AVX2__
  6323. const __m256i m4 = _mm256_set1_epi8(0xF);
  6324. const __m256i m2 = _mm256_set1_epi8(3);
  6325. const __m256i m32s = _mm256_set1_epi8(32);
  6326. __m256 acc = _mm256_setzero_ps();
  6327. for (int i = 0; i < nb; ++i) {
  6328. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6329. const uint8_t * restrict q4 = x[i].ql;
  6330. const uint8_t * restrict qh = x[i].qh;
  6331. const int8_t * restrict q8 = y[i].qs;
  6332. const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  6333. __m256i sumi = _mm256_setzero_si256();
  6334. int is = 0;
  6335. for (int j = 0; j < QK_K/128; ++j) {
  6336. const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0));
  6337. const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1));
  6338. const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2));
  6339. const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3));
  6340. is += 4;
  6341. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  6342. const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  6343. const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32;
  6344. const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4);
  6345. const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4);
  6346. const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4);
  6347. const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4);
  6348. const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
  6349. const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1);
  6350. const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2);
  6351. const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3);
  6352. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6353. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6354. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6355. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6356. __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
  6357. __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
  6358. __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2);
  6359. __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3);
  6360. __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
  6361. __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
  6362. __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2);
  6363. __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3);
  6364. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  6365. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  6366. p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
  6367. p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
  6368. p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
  6369. p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
  6370. p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2);
  6371. p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3);
  6372. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  6373. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3));
  6374. }
  6375. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  6376. }
  6377. *s = hsum_float_8(acc);
  6378. #elif defined __AVX__
  6379. const __m128i m4 = _mm_set1_epi8(0xF);
  6380. const __m128i m3 = _mm_set1_epi8(3);
  6381. const __m128i m32s = _mm_set1_epi8(32);
  6382. const __m128i m2 = _mm_set1_epi8(2);
  6383. __m256 acc = _mm256_setzero_ps();
  6384. for (int i = 0; i < nb; ++i) {
  6385. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6386. const uint8_t * restrict q4 = x[i].ql;
  6387. const uint8_t * restrict qh = x[i].qh;
  6388. const int8_t * restrict q8 = y[i].qs;
  6389. const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  6390. __m128i sumi_0 = _mm_setzero_si128();
  6391. __m128i sumi_1 = _mm_setzero_si128();
  6392. __m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  6393. for (int j = 0; j < QK_K/128; ++j) {
  6394. const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
  6395. const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
  6396. const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4);
  6397. const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4);
  6398. const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4);
  6399. const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4);
  6400. const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4);
  6401. const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4);
  6402. const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4);
  6403. const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4);
  6404. const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  6405. const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  6406. const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  6407. const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  6408. const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0);
  6409. const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1);
  6410. const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2);
  6411. const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3);
  6412. const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4);
  6413. const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5);
  6414. const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6);
  6415. const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7);
  6416. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6417. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6418. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6419. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6420. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6421. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6422. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6423. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6424. __m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0);
  6425. __m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1);
  6426. __m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2);
  6427. __m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3);
  6428. __m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4);
  6429. __m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5);
  6430. __m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6);
  6431. __m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7);
  6432. __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0);
  6433. __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1);
  6434. __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2);
  6435. __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3);
  6436. __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4);
  6437. __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5);
  6438. __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6);
  6439. __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7);
  6440. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  6441. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  6442. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  6443. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  6444. p16_4 = _mm_sub_epi16(p16_4, q8s_4);
  6445. p16_5 = _mm_sub_epi16(p16_5, q8s_5);
  6446. p16_6 = _mm_sub_epi16(p16_6, q8s_6);
  6447. p16_7 = _mm_sub_epi16(p16_7, q8s_7);
  6448. const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
  6449. shuffle = _mm_add_epi8(shuffle, m2);
  6450. const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
  6451. shuffle = _mm_add_epi8(shuffle, m2);
  6452. const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle);
  6453. shuffle = _mm_add_epi8(shuffle, m2);
  6454. const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle);
  6455. shuffle = _mm_add_epi8(shuffle, m2);
  6456. p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
  6457. p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
  6458. p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
  6459. p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
  6460. p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4);
  6461. p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5);
  6462. p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6);
  6463. p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7);
  6464. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  6465. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  6466. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6));
  6467. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7));
  6468. }
  6469. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  6470. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
  6471. }
  6472. *s = hsum_float_8(acc);
  6473. #elif defined __riscv_v_intrinsic
  6474. float sumf = 0;
  6475. for (int i = 0; i < nb; ++i) {
  6476. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6477. const uint8_t * restrict q6 = x[i].ql;
  6478. const uint8_t * restrict qh = x[i].qh;
  6479. const int8_t * restrict q8 = y[i].qs;
  6480. const int8_t * restrict scale = x[i].scales;
  6481. size_t vl;
  6482. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  6483. int sum_t = 0;
  6484. int is = 0;
  6485. for (int j = 0; j < QK_K/128; ++j) {
  6486. vl = 32;
  6487. // load qh
  6488. vuint8m1_t qh_x = __riscv_vle8_v_u8m1(qh, vl);
  6489. // load Q6
  6490. vuint8m1_t q6_0 = __riscv_vle8_v_u8m1(q6, vl);
  6491. vuint8m1_t q6_1 = __riscv_vle8_v_u8m1(q6+32, vl);
  6492. vuint8m1_t q6a_0 = __riscv_vand_vx_u8m1(q6_0, 0x0F, vl);
  6493. vuint8m1_t q6a_1 = __riscv_vand_vx_u8m1(q6_1, 0x0F, vl);
  6494. vuint8m1_t q6s_0 = __riscv_vsrl_vx_u8m1(q6_0, 0x04, vl);
  6495. vuint8m1_t q6s_1 = __riscv_vsrl_vx_u8m1(q6_1, 0x04, vl);
  6496. vuint8m1_t qh_0 = __riscv_vand_vx_u8m1(qh_x, 0x03, vl);
  6497. vuint8m1_t qh_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x2, vl), 0x03 , vl);
  6498. vuint8m1_t qh_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x4, vl), 0x03 , vl);
  6499. vuint8m1_t qh_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x6, vl), 0x03 , vl);
  6500. vuint8m1_t qhi_0 = __riscv_vor_vv_u8m1(q6a_0, __riscv_vsll_vx_u8m1(qh_0, 0x04, vl), vl);
  6501. vuint8m1_t qhi_1 = __riscv_vor_vv_u8m1(q6a_1, __riscv_vsll_vx_u8m1(qh_1, 0x04, vl), vl);
  6502. vuint8m1_t qhi_2 = __riscv_vor_vv_u8m1(q6s_0, __riscv_vsll_vx_u8m1(qh_2, 0x04, vl), vl);
  6503. vuint8m1_t qhi_3 = __riscv_vor_vv_u8m1(q6s_1, __riscv_vsll_vx_u8m1(qh_3, 0x04, vl), vl);
  6504. vint8m1_t a_0 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_0), 32, vl);
  6505. vint8m1_t a_1 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_1), 32, vl);
  6506. vint8m1_t a_2 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_2), 32, vl);
  6507. vint8m1_t a_3 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_3), 32, vl);
  6508. // load Q8 and take product
  6509. vint16m2_t va_q_0 = __riscv_vwmul_vv_i16m2(a_0, __riscv_vle8_v_i8m1(q8, vl), vl);
  6510. vint16m2_t va_q_1 = __riscv_vwmul_vv_i16m2(a_1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  6511. vint16m2_t va_q_2 = __riscv_vwmul_vv_i16m2(a_2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
  6512. vint16m2_t va_q_3 = __riscv_vwmul_vv_i16m2(a_3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
  6513. vl = 16;
  6514. vint32m2_t vaux_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 0), scale[is+0], vl);
  6515. vint32m2_t vaux_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 1), scale[is+1], vl);
  6516. vint32m2_t vaux_2 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 0), scale[is+2], vl);
  6517. vint32m2_t vaux_3 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 1), scale[is+3], vl);
  6518. vint32m2_t vaux_4 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 0), scale[is+4], vl);
  6519. vint32m2_t vaux_5 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 1), scale[is+5], vl);
  6520. vint32m2_t vaux_6 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 0), scale[is+6], vl);
  6521. vint32m2_t vaux_7 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 1), scale[is+7], vl);
  6522. vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_0, vaux_1, vl), vzero, vl);
  6523. vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_2, vaux_3, vl), isum0, vl);
  6524. vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_4, vaux_5, vl), isum1, vl);
  6525. vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_6, vaux_7, vl), isum2, vl);
  6526. sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
  6527. q6 += 64; qh += 32; q8 += 128; is=8;
  6528. }
  6529. sumf += d * sum_t;
  6530. }
  6531. *s = sumf;
  6532. #else
  6533. int8_t aux8[QK_K];
  6534. int16_t aux16[8];
  6535. float sums [8];
  6536. int32_t aux32[8];
  6537. memset(sums, 0, 8*sizeof(float));
  6538. float sumf = 0;
  6539. for (int i = 0; i < nb; ++i) {
  6540. const uint8_t * restrict q4 = x[i].ql;
  6541. const uint8_t * restrict qh = x[i].qh;
  6542. const int8_t * restrict q8 = y[i].qs;
  6543. memset(aux32, 0, 8*sizeof(int32_t));
  6544. int8_t * restrict a = aux8;
  6545. for (int j = 0; j < QK_K; j += 128) {
  6546. for (int l = 0; l < 32; ++l) {
  6547. a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  6548. a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  6549. a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  6550. a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  6551. }
  6552. a += 128;
  6553. q4 += 64;
  6554. qh += 32;
  6555. }
  6556. a = aux8;
  6557. int is = 0;
  6558. for (int j = 0; j < QK_K/16; ++j) {
  6559. int scale = x[i].scales[is++];
  6560. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6561. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6562. q8 += 8; a += 8;
  6563. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6564. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6565. q8 += 8; a += 8;
  6566. }
  6567. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6568. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  6569. }
  6570. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6571. *s = sumf;
  6572. #endif
  6573. }
  6574. #else
  6575. void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6576. assert(n % QK_K == 0);
  6577. assert(nrc == 1);
  6578. UNUSED(nrc);
  6579. UNUSED(bx);
  6580. UNUSED(by);
  6581. UNUSED(bs);
  6582. const block_q6_K * restrict x = vx;
  6583. const block_q8_K * restrict y = vy;
  6584. const int nb = n / QK_K;
  6585. #ifdef __ARM_NEON
  6586. float sum = 0;
  6587. const uint8x16_t m4b = vdupq_n_u8(0xF);
  6588. const int8x16_t m32s = vdupq_n_s8(32);
  6589. const int32x4_t vzero = vdupq_n_s32(0);
  6590. const uint8x16_t mone = vdupq_n_u8(3);
  6591. ggml_int8x16x4_t q6bytes;
  6592. ggml_uint8x16x4_t q6h;
  6593. for (int i = 0; i < nb; ++i) {
  6594. const float d_all = (float)x[i].d;
  6595. const uint8_t * restrict q6 = x[i].ql;
  6596. const uint8_t * restrict qh = x[i].qh;
  6597. const int8_t * restrict q8 = y[i].qs;
  6598. const int8_t * restrict scale = x[i].scales;
  6599. int32_t isum = 0;
  6600. uint8x16_t qhbits = vld1q_u8(qh);
  6601. ggml_uint8x16x2_t q6bits = ggml_vld1q_u8_x2(q6);
  6602. ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  6603. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits), 4);
  6604. uint8x16_t shifted = vshrq_n_u8(qhbits, 2);
  6605. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6606. shifted = vshrq_n_u8(qhbits, 4);
  6607. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6608. shifted = vshrq_n_u8(qhbits, 6);
  6609. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6610. q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
  6611. q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
  6612. q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[2])), m32s);
  6613. q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[3])), m32s);
  6614. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  6615. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  6616. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  6617. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  6618. sum += isum * d_all * y[i].d;
  6619. }
  6620. *s = sum;
  6621. #elif defined __AVX2__
  6622. const __m256i m4 = _mm256_set1_epi8(0xF);
  6623. const __m256i m2 = _mm256_set1_epi8(3);
  6624. const __m256i m32s = _mm256_set1_epi8(32);
  6625. __m256 acc = _mm256_setzero_ps();
  6626. for (int i = 0; i < nb; ++i) {
  6627. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6628. const uint8_t * restrict q4 = x[i].ql;
  6629. const uint8_t * restrict qh = x[i].qh;
  6630. const int8_t * restrict q8 = y[i].qs;
  6631. const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
  6632. const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
  6633. const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
  6634. const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
  6635. __m256i sumi = _mm256_setzero_si256();
  6636. const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
  6637. const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
  6638. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
  6639. const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
  6640. const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4);
  6641. const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4);
  6642. const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
  6643. const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_1);
  6644. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6645. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6646. __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
  6647. __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
  6648. __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
  6649. __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
  6650. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  6651. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  6652. p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
  6653. p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
  6654. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  6655. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  6656. }
  6657. *s = hsum_float_8(acc);
  6658. #elif defined __AVX__
  6659. const __m128i m4 = _mm_set1_epi8(0xF);
  6660. const __m128i m2 = _mm_set1_epi8(3);
  6661. const __m128i m32s = _mm_set1_epi8(32);
  6662. __m256 acc = _mm256_setzero_ps();
  6663. for (int i = 0; i < nb; ++i) {
  6664. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6665. const uint8_t * restrict q4 = x[i].ql;
  6666. const uint8_t * restrict qh = x[i].qh;
  6667. const int8_t * restrict q8 = y[i].qs;
  6668. const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
  6669. const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
  6670. const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
  6671. const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
  6672. __m128i sumi_0 = _mm_setzero_si128();
  6673. __m128i sumi_1 = _mm_setzero_si128();
  6674. const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
  6675. const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
  6676. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
  6677. const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
  6678. const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH, m2), 4);
  6679. const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 2), m2), 4);
  6680. const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 4), m2), 4);
  6681. const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 6), m2), 4);
  6682. const __m128i q4_0 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 0), m4), q4h_0);
  6683. const __m128i q4_1 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 1), m4), q4h_1);
  6684. const __m128i q4_2 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 0), 4), m4), q4h_2);
  6685. const __m128i q4_3 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 1), 4), m4), q4h_3);
  6686. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6687. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6688. __m128i q8s_0 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 0));
  6689. __m128i q8s_1 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 1));
  6690. __m128i q8s_2 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 0));
  6691. __m128i q8s_3 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 1));
  6692. __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
  6693. __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
  6694. __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
  6695. __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
  6696. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  6697. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  6698. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  6699. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  6700. p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
  6701. p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
  6702. p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
  6703. p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
  6704. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  6705. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  6706. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi_1, sumi_0))), acc);
  6707. }
  6708. *s = hsum_float_8(acc);
  6709. #elif defined __riscv_v_intrinsic
  6710. float sumf = 0;
  6711. for (int i = 0; i < nb; ++i) {
  6712. const float d_all = (float)x[i].d;
  6713. const uint8_t * restrict q6 = x[i].ql;
  6714. const uint8_t * restrict qh = x[i].qh;
  6715. const int8_t * restrict q8 = y[i].qs;
  6716. const int8_t * restrict scale = x[i].scales;
  6717. int32_t isum = 0;
  6718. size_t vl = 16;
  6719. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  6720. // load Q6
  6721. vuint8mf2_t q6_0 = __riscv_vle8_v_u8mf2(q6, vl);
  6722. vuint8mf2_t q6_1 = __riscv_vle8_v_u8mf2(q6+16, vl);
  6723. // load qh
  6724. vuint8mf2_t qh_x = __riscv_vle8_v_u8mf2(qh, vl);
  6725. vuint8mf2_t qh0 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  6726. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  6727. vuint8mf2_t qh1 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  6728. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  6729. vuint8mf2_t qh2 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  6730. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  6731. vuint8mf2_t qh3 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  6732. vuint8mf2_t q6h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_0, 0xF, vl), qh0, vl);
  6733. vuint8mf2_t q6h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_1, 0xF, vl), qh1, vl);
  6734. vuint8mf2_t q6h_2 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_0, 0x4, vl), qh2, vl);
  6735. vuint8mf2_t q6h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_1, 0x4, vl), qh3, vl);
  6736. vint8mf2_t q6v_0 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_0), 32, vl);
  6737. vint8mf2_t q6v_1 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_1), 32, vl);
  6738. vint8mf2_t q6v_2 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_2), 32, vl);
  6739. vint8mf2_t q6v_3 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_3), 32, vl);
  6740. // load Q8 and take product
  6741. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q6v_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  6742. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q6v_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  6743. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q6v_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  6744. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q6v_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  6745. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  6746. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  6747. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  6748. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  6749. isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scale[0];
  6750. isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scale[1];
  6751. isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scale[2];
  6752. isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scale[3];
  6753. sumf += isum * d_all * y[i].d;
  6754. }
  6755. *s = sumf;
  6756. #else
  6757. int8_t aux8[QK_K];
  6758. int16_t aux16[8];
  6759. float sums [8];
  6760. int32_t aux32[8];
  6761. memset(sums, 0, 8*sizeof(float));
  6762. float sumf = 0;
  6763. for (int i = 0; i < nb; ++i) {
  6764. const uint8_t * restrict q4 = x[i].ql;
  6765. const uint8_t * restrict qh = x[i].qh;
  6766. const int8_t * restrict q8 = y[i].qs;
  6767. memset(aux32, 0, 8*sizeof(int32_t));
  6768. int8_t * restrict a = aux8;
  6769. for (int l = 0; l < 16; ++l) {
  6770. a[l+ 0] = (int8_t)((q4[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  6771. a[l+16] = (int8_t)((q4[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  6772. a[l+32] = (int8_t)((q4[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  6773. a[l+48] = (int8_t)((q4[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  6774. }
  6775. int is = 0;
  6776. for (int j = 0; j < QK_K/16; ++j) {
  6777. int scale = x[i].scales[is++];
  6778. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6779. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6780. q8 += 8; a += 8;
  6781. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6782. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6783. q8 += 8; a += 8;
  6784. }
  6785. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6786. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  6787. }
  6788. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6789. *s = sumf;
  6790. #endif
  6791. }
  6792. #endif
  6793. static const int8_t keven_signs_q2xs[1024] = {
  6794. 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1,
  6795. 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1,
  6796. 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1,
  6797. 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1,
  6798. 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1,
  6799. 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1,
  6800. 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1,
  6801. 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1,
  6802. 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1,
  6803. 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1,
  6804. 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1,
  6805. 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1,
  6806. 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1,
  6807. 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1,
  6808. 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1,
  6809. 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1,
  6810. 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1,
  6811. 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1,
  6812. 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1,
  6813. 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1,
  6814. 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1,
  6815. 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1,
  6816. 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1,
  6817. 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1,
  6818. 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1,
  6819. 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
  6820. 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,
  6821. 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1,
  6822. 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1,
  6823. 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1,
  6824. 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,
  6825. 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1,
  6826. };
  6827. void ggml_vec_dot_iq2_xxs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6828. assert(n % QK_K == 0);
  6829. assert(nrc == 1);
  6830. UNUSED(nrc);
  6831. UNUSED(bx);
  6832. UNUSED(by);
  6833. UNUSED(bs);
  6834. const block_iq2_xxs * restrict x = vx;
  6835. const block_q8_K * restrict y = vy;
  6836. const int nb = n / QK_K;
  6837. #if defined(__ARM_NEON)
  6838. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  6839. uint32_t aux32[4];
  6840. const uint8_t * aux8 = (const uint8_t *)aux32;
  6841. ggml_int8x16x4_t q2u;
  6842. ggml_int8x16x4_t q2s;
  6843. ggml_int8x16x4_t q8b;
  6844. float sumf = 0;
  6845. for (int i = 0; i < nb; ++i) {
  6846. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6847. const uint16_t * restrict q2 = x[i].qs;
  6848. const int8_t * restrict q8 = y[i].qs;
  6849. float sumf1 = 0, sumf2 = 0;
  6850. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  6851. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  6852. memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8;
  6853. q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 0])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 1])));
  6854. q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 2])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 3])));
  6855. q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 8])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 9])));
  6856. q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[10])), vld1_s8((const void *)(iq2xxs_grid + aux8[11])));
  6857. q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127))));
  6858. q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127))));
  6859. q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 7) & 127))));
  6860. q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 21) & 127))));
  6861. q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]);
  6862. q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]);
  6863. q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]);
  6864. q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]);
  6865. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]);
  6866. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]);
  6867. sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28));
  6868. sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28));
  6869. }
  6870. sumf += d*(sumf1 + sumf2);
  6871. }
  6872. *s = 0.25f * sumf;
  6873. #elif defined(__AVX2__)
  6874. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  6875. uint32_t aux32[4];
  6876. const uint8_t * aux8 = (const uint8_t *)aux32;
  6877. __m256 accumf = _mm256_setzero_ps();
  6878. for (int i = 0; i < nb; ++i) {
  6879. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6880. const uint16_t * restrict q2 = x[i].qs;
  6881. const int8_t * restrict q8 = y[i].qs;
  6882. __m256i sumi1 = _mm256_setzero_si256();
  6883. __m256i sumi2 = _mm256_setzero_si256();
  6884. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  6885. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  6886. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  6887. memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8;
  6888. const __m256i q2_1 = _mm256_set_epi64x(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]);
  6889. const __m256i q2_2 = _mm256_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]);
  6890. const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127],
  6891. signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]);
  6892. const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127],
  6893. signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]);
  6894. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1);
  6895. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2);
  6896. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  6897. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  6898. const uint16_t ls1 = aux32[1] >> 28;
  6899. const uint16_t ls2 = aux32[3] >> 28;
  6900. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
  6901. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
  6902. sumi1 = _mm256_add_epi32(sumi1, p1);
  6903. sumi2 = _mm256_add_epi32(sumi2, p2);
  6904. }
  6905. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  6906. }
  6907. *s = 0.125f * hsum_float_8(accumf);
  6908. #else
  6909. uint32_t aux32[2];
  6910. const uint8_t * aux8 = (const uint8_t *)aux32;
  6911. float sumf = 0.f;
  6912. for (int i = 0; i < nb; ++i) {
  6913. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6914. const uint16_t * restrict q2 = x[i].qs;
  6915. const int8_t * restrict q8 = y[i].qs;
  6916. int32_t bsum = 0;
  6917. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  6918. memcpy(aux32, q2, 2*sizeof(uint32_t));
  6919. q2 += 4;
  6920. const uint32_t ls = 2*(aux32[1] >> 28) + 1;
  6921. int32_t sumi = 0;
  6922. for (int l = 0; l < 4; ++l) {
  6923. const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
  6924. const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
  6925. for (int j = 0; j < 8; ++j) {
  6926. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  6927. }
  6928. q8 += 8;
  6929. }
  6930. bsum += sumi * ls;
  6931. }
  6932. sumf += d * bsum;
  6933. }
  6934. *s = 0.125f * sumf;
  6935. #endif
  6936. }
  6937. void ggml_vec_dot_iq2_xs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6938. assert(n % QK_K == 0);
  6939. assert(nrc == 1);
  6940. UNUSED(nrc);
  6941. UNUSED(bx);
  6942. UNUSED(by);
  6943. UNUSED(bs);
  6944. const block_iq2_xs * restrict x = vx;
  6945. const block_q8_K * restrict y = vy;
  6946. const int nb = n / QK_K;
  6947. #if defined(__ARM_NEON)
  6948. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  6949. ggml_int8x16x4_t q2u;
  6950. ggml_int8x16x4_t q2s;
  6951. ggml_int8x16x4_t q8b;
  6952. int32x4x4_t scales32;
  6953. float sumf = 0;
  6954. for (int i = 0; i < nb; ++i) {
  6955. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6956. const uint16_t * restrict q2 = x[i].qs;
  6957. const int8_t * restrict q8 = y[i].qs;
  6958. const uint8x8_t scales8 = vld1_u8(x[i].scales);
  6959. const uint8x8_t scales_l = vand_u8(scales8, vdup_n_u8(0xf));
  6960. const uint8x8_t scales_h = vshr_n_u8(scales8, 4);
  6961. uint8x16_t scales = vcombine_u8(vzip1_u8(scales_l, scales_h), vzip2_u8(scales_l, scales_h));
  6962. scales = vaddq_u8(vshlq_n_u8(scales, 1), vdupq_n_u8(1));
  6963. const uint16x8_t scales1 = vmovl_u8(vget_low_u8(scales));
  6964. const uint16x8_t scales2 = vmovl_u8(vget_high_u8(scales));
  6965. scales32.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales1)));
  6966. scales32.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales1)));
  6967. scales32.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales2)));
  6968. scales32.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales2)));
  6969. int32x4_t sumi = vdupq_n_s32(0);
  6970. for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
  6971. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  6972. q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[0] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[1] & 511))));
  6973. q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[2] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[3] & 511))));
  6974. q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[4] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[5] & 511))));
  6975. q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[6] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[7] & 511))));
  6976. q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[0] >> 9))), vld1_s8((const void *)(signs64 + (q2[1] >> 9))));
  6977. q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[2] >> 9))), vld1_s8((const void *)(signs64 + (q2[3] >> 9))));
  6978. q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[4] >> 9))), vld1_s8((const void *)(signs64 + (q2[5] >> 9))));
  6979. q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[6] >> 9))), vld1_s8((const void *)(signs64 + (q2[7] >> 9))));
  6980. q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]);
  6981. q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]);
  6982. q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]);
  6983. q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]);
  6984. const int32x4_t p1 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]);
  6985. const int32x4_t p2 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[1], q8b.val[1]);
  6986. const int32x4_t p3 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]);
  6987. const int32x4_t p4 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[3], q8b.val[3]);
  6988. const int32x4_t p = vpaddq_s32(vpaddq_s32(p1, p2), vpaddq_s32(p3, p4));
  6989. sumi = vmlaq_s32(sumi, p, scales32.val[ib64]);
  6990. q2 += 8;
  6991. }
  6992. sumf += d*vaddvq_s32(sumi);
  6993. }
  6994. *s = 0.125f * sumf;
  6995. #elif defined(__AVX2__)
  6996. const __m128i m4 = _mm_set1_epi8(0xf);
  6997. const __m128i m1 = _mm_set1_epi8(1);
  6998. const __m256i m511 = _mm256_set1_epi16(511);
  6999. const __m256i mone = _mm256_set1_epi8(1);
  7000. static const uint8_t k_bit_helper[32] = {
  7001. 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
  7002. 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
  7003. };
  7004. static const char block_sign_shuffle_mask_1[32] = {
  7005. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
  7006. 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
  7007. };
  7008. static const char block_sign_shuffle_mask_2[32] = {
  7009. 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
  7010. 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
  7011. };
  7012. static const uint8_t bit_selector_mask_bytes[32] = {
  7013. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7014. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7015. };
  7016. const __m256i bit_helper = _mm256_loadu_si256((const __m256i*)k_bit_helper);
  7017. const __m256i bit_selector_mask = _mm256_loadu_si256((const __m256i*)bit_selector_mask_bytes);
  7018. const __m256i block_sign_shuffle_1 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_1);
  7019. const __m256i block_sign_shuffle_2 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_2);
  7020. uint64_t aux64;
  7021. // somewhat hacky, but gives a significant boost in performance
  7022. __m256i aux_gindex;
  7023. const uint16_t * gindex = (const uint16_t *)&aux_gindex;
  7024. __m256 accumf = _mm256_setzero_ps();
  7025. for (int i = 0; i < nb; ++i) {
  7026. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7027. const uint16_t * restrict q2 = x[i].qs;
  7028. const int8_t * restrict q8 = y[i].qs;
  7029. memcpy(&aux64, x[i].scales, 8);
  7030. __m128i stmp = _mm_set1_epi64x(aux64);
  7031. stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4));
  7032. const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1);
  7033. __m256i sumi1 = _mm256_setzero_si256();
  7034. __m256i sumi2 = _mm256_setzero_si256();
  7035. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) {
  7036. const __m256i q2_data = _mm256_loadu_si256((const __m256i*)q2); q2 += 16;
  7037. aux_gindex = _mm256_and_si256(q2_data, m511);
  7038. const __m256i partial_sign_bits = _mm256_srli_epi16(q2_data, 9);
  7039. const __m256i partial_sign_bits_upper = _mm256_srli_epi16(q2_data, 13);
  7040. const __m256i partial_sign_bits_for_counting = _mm256_xor_si256(partial_sign_bits, partial_sign_bits_upper);
  7041. const __m256i odd_bits = _mm256_shuffle_epi8(bit_helper, partial_sign_bits_for_counting);
  7042. const __m256i full_sign_bits = _mm256_or_si256(partial_sign_bits, odd_bits);
  7043. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7044. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7045. const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7046. const __m256i q8_4 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7047. const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]],
  7048. iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]);
  7049. const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]],
  7050. iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]);
  7051. const __m256i q2_3 = _mm256_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]],
  7052. iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]);
  7053. const __m256i q2_4 = _mm256_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]],
  7054. iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]);
  7055. const __m128i full_signs_l = _mm256_castsi256_si128(full_sign_bits);
  7056. const __m128i full_signs_h = _mm256_extractf128_si256(full_sign_bits, 1);
  7057. const __m256i full_signs_1 = _mm256_set_m128i(full_signs_l, full_signs_l);
  7058. const __m256i full_signs_2 = _mm256_set_m128i(full_signs_h, full_signs_h);
  7059. __m256i signs;
  7060. signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_1);
  7061. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7062. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone));
  7063. signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_2);
  7064. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7065. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone));
  7066. signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_1);
  7067. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7068. const __m256i q8s_3 = _mm256_sign_epi8(q8_3, _mm256_or_si256(signs, mone));
  7069. signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_2);
  7070. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7071. const __m256i q8s_4 = _mm256_sign_epi8(q8_4, _mm256_or_si256(signs, mone));
  7072. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  7073. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  7074. const __m256i dot3 = _mm256_maddubs_epi16(q2_3, q8s_3);
  7075. const __m256i dot4 = _mm256_maddubs_epi16(q2_4, q8s_4);
  7076. const __m256i sc1 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0)));
  7077. const __m256i sc2 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1)));
  7078. const __m256i sc3 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2)));
  7079. const __m256i sc4 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3)));
  7080. sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot1, sc1));
  7081. sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot2, sc2));
  7082. sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot3, sc3));
  7083. sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot4, sc4));
  7084. }
  7085. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  7086. }
  7087. *s = 0.125f * hsum_float_8(accumf);
  7088. #else
  7089. float sumf = 0.f;
  7090. for (int i = 0; i < nb; ++i) {
  7091. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7092. const uint16_t * restrict q2 = x[i].qs;
  7093. const uint8_t * restrict sc = x[i].scales;
  7094. const int8_t * restrict q8 = y[i].qs;
  7095. int32_t bsum = 0;
  7096. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  7097. const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1;
  7098. const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1;
  7099. int32_t sumi = 0;
  7100. for (int l = 0; l < 2; ++l) {
  7101. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
  7102. const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
  7103. for (int j = 0; j < 8; ++j) {
  7104. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  7105. }
  7106. q8 += 8;
  7107. }
  7108. bsum += sumi * ls1;
  7109. sumi = 0;
  7110. for (int l = 2; l < 4; ++l) {
  7111. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
  7112. const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
  7113. for (int j = 0; j < 8; ++j) {
  7114. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  7115. }
  7116. q8 += 8;
  7117. }
  7118. bsum += sumi * ls2;
  7119. q2 += 4;
  7120. }
  7121. sumf += d * bsum;
  7122. }
  7123. *s = 0.125f * sumf;
  7124. #endif
  7125. }
  7126. // TODO
  7127. void ggml_vec_dot_iq3_xxs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7128. assert(n % QK_K == 0);
  7129. assert(nrc == 1);
  7130. UNUSED(nrc);
  7131. UNUSED(bx);
  7132. UNUSED(by);
  7133. UNUSED(bs);
  7134. const block_iq3_xxs * restrict x = vx;
  7135. const block_q8_K * restrict y = vy;
  7136. const int nb = n / QK_K;
  7137. #if defined(__ARM_NEON)
  7138. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  7139. uint32_t aux32[2];
  7140. ggml_int8x16x4_t q3s;
  7141. ggml_int8x16x4_t q8b;
  7142. float sumf = 0;
  7143. for (int i = 0; i < nb; ++i) {
  7144. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7145. const uint8_t * restrict q3 = x[i].qs;
  7146. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  7147. const int8_t * restrict q8 = y[i].qs;
  7148. float sumf1 = 0, sumf2 = 0;
  7149. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7150. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  7151. memcpy(aux32, gas, 2*sizeof(uint32_t)); gas += 2*sizeof(uint32_t);
  7152. const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]);
  7153. const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]);
  7154. const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]);
  7155. const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]);
  7156. q3 += 16;
  7157. q3s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 7) & 127))));
  7158. q3s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 21) & 127))));
  7159. q3s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127))));
  7160. q3s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127))));
  7161. q3s.val[0] = vmulq_s8(q3s.val[0], vreinterpretq_s8_u32(aux32x4_0));
  7162. q3s.val[1] = vmulq_s8(q3s.val[1], vreinterpretq_s8_u32(aux32x4_1));
  7163. q3s.val[2] = vmulq_s8(q3s.val[2], vreinterpretq_s8_u32(aux32x4_2));
  7164. q3s.val[3] = vmulq_s8(q3s.val[3], vreinterpretq_s8_u32(aux32x4_3));
  7165. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]);
  7166. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]);
  7167. sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[0] >> 28));
  7168. sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[1] >> 28));
  7169. }
  7170. sumf += d*(sumf1 + sumf2);
  7171. }
  7172. *s = 0.5f * sumf;
  7173. #elif defined(__AVX2__)
  7174. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  7175. uint32_t aux32[2];
  7176. __m256 accumf = _mm256_setzero_ps();
  7177. for (int i = 0; i < nb; ++i) {
  7178. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7179. const uint8_t * restrict q3 = x[i].qs;
  7180. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  7181. const int8_t * restrict q8 = y[i].qs;
  7182. __m256i sumi1 = _mm256_setzero_si256();
  7183. __m256i sumi2 = _mm256_setzero_si256();
  7184. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7185. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7186. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7187. const __m256i q2_1 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]],
  7188. iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]);
  7189. q3 += 8;
  7190. const __m256i q2_2 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]],
  7191. iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]);
  7192. q3 += 8;
  7193. memcpy(aux32, gas, 8); gas += 8;
  7194. const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127],
  7195. signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]);
  7196. const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127],
  7197. signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]);
  7198. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1);
  7199. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2);
  7200. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  7201. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  7202. const uint16_t ls1 = aux32[0] >> 28;
  7203. const uint16_t ls2 = aux32[1] >> 28;
  7204. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
  7205. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
  7206. sumi1 = _mm256_add_epi32(sumi1, p1);
  7207. sumi2 = _mm256_add_epi32(sumi2, p2);
  7208. }
  7209. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  7210. }
  7211. *s = 0.25f * hsum_float_8(accumf);
  7212. #else
  7213. uint32_t aux32;
  7214. float sumf = 0.f;
  7215. for (int i = 0; i < nb; ++i) {
  7216. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7217. const uint8_t * restrict q3 = x[i].qs;
  7218. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  7219. const int8_t * restrict q8 = y[i].qs;
  7220. int32_t bsum = 0;
  7221. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  7222. memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t);
  7223. const uint32_t ls = 2*(aux32 >> 28) + 1;
  7224. int32_t sumi = 0;
  7225. for (int l = 0; l < 4; ++l) {
  7226. const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]);
  7227. const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]);
  7228. const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
  7229. for (int j = 0; j < 4; ++j) {
  7230. sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1);
  7231. sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1);
  7232. }
  7233. q8 += 8;
  7234. }
  7235. q3 += 8;
  7236. bsum += sumi * ls;
  7237. }
  7238. sumf += d * bsum;
  7239. }
  7240. *s = 0.25f * sumf;
  7241. #endif
  7242. }
  7243. // ================================ IQ2 quantization =============================================
  7244. typedef struct {
  7245. uint64_t * grid;
  7246. int * map;
  7247. uint16_t * neighbours;
  7248. } iq2_entry_t;
  7249. static iq2_entry_t iq2_data[2] = {
  7250. {NULL, NULL, NULL},
  7251. {NULL, NULL, NULL},
  7252. };
  7253. static inline int iq2_data_index(int grid_size) {
  7254. GGML_ASSERT(grid_size == 256 || grid_size == 512);
  7255. return grid_size == 256 ? 0 : 1;
  7256. }
  7257. static int iq2_compare_func(const void * left, const void * right) {
  7258. const int * l = (const int *)left;
  7259. const int * r = (const int *)right;
  7260. return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0;
  7261. }
  7262. void iq2xs_init_impl(int grid_size) {
  7263. const int gindex = iq2_data_index(grid_size);
  7264. if (iq2_data[gindex].grid) {
  7265. return;
  7266. }
  7267. static const uint16_t kgrid_256[256] = {
  7268. 0, 2, 5, 8, 10, 17, 20, 32, 34, 40, 42, 65, 68, 80, 88, 97,
  7269. 100, 128, 130, 138, 162, 257, 260, 272, 277, 320, 388, 408, 512, 514, 546, 642,
  7270. 1025, 1028, 1040, 1057, 1060, 1088, 1090, 1096, 1120, 1153, 1156, 1168, 1188, 1280, 1282, 1288,
  7271. 1312, 1350, 1385, 1408, 1425, 1545, 1552, 1600, 1668, 1700, 2048, 2053, 2056, 2068, 2088, 2113,
  7272. 2116, 2128, 2130, 2184, 2308, 2368, 2562, 2580, 4097, 4100, 4112, 4129, 4160, 4192, 4228, 4240,
  7273. 4245, 4352, 4360, 4384, 4432, 4442, 4480, 4644, 4677, 5120, 5128, 5152, 5157, 5193, 5248, 5400,
  7274. 5474, 5632, 5654, 6145, 6148, 6160, 6208, 6273, 6400, 6405, 6560, 6737, 8192, 8194, 8202, 8260,
  7275. 8289, 8320, 8322, 8489, 8520, 8704, 8706, 9217, 9220, 9232, 9280, 9302, 9472, 9537, 9572, 9872,
  7276. 10248, 10272, 10388, 10820, 16385, 16388, 16400, 16408, 16417, 16420, 16448, 16456, 16470, 16480, 16513, 16516,
  7277. 16528, 16640, 16672, 16737, 16768, 16773, 16897, 16912, 16968, 16982, 17000, 17408, 17416, 17440, 17536, 17561,
  7278. 17682, 17700, 17920, 18433, 18436, 18448, 18496, 18501, 18688, 18776, 18785, 18818, 19013, 19088, 20480, 20488,
  7279. 20497, 20505, 20512, 20608, 20616, 20740, 20802, 20900, 21137, 21648, 21650, 21770, 22017, 22100, 22528, 22545,
  7280. 22553, 22628, 22848, 23048, 24580, 24592, 24640, 24680, 24832, 24917, 25112, 25184, 25600, 25605, 25872, 25874,
  7281. 25988, 26690, 32768, 32770, 32778, 32833, 32898, 33028, 33048, 33088, 33297, 33793, 33796, 33808, 33813, 33856,
  7282. 33888, 34048, 34118, 34196, 34313, 34368, 34400, 34818, 35076, 35345, 36868, 36880, 36900, 36928, 37025, 37142,
  7283. 37248, 37445, 37888, 37922, 37956, 38225, 39041, 39200, 40962, 41040, 41093, 41225, 41472, 42008, 43088, 43268,
  7284. };
  7285. static const uint16_t kgrid_512[512] = {
  7286. 0, 2, 5, 8, 10, 17, 20, 22, 25, 32, 34, 37, 40, 65, 68, 70,
  7287. 73, 80, 82, 85, 88, 97, 100, 128, 130, 133, 136, 145, 148, 153, 160, 257,
  7288. 260, 262, 265, 272, 274, 277, 280, 282, 289, 292, 320, 322, 325, 328, 337, 340,
  7289. 352, 360, 385, 388, 400, 512, 514, 517, 520, 529, 532, 544, 577, 580, 592, 597,
  7290. 640, 650, 1025, 1028, 1030, 1033, 1040, 1042, 1045, 1048, 1057, 1060, 1088, 1090, 1093, 1096,
  7291. 1105, 1108, 1110, 1120, 1153, 1156, 1168, 1280, 1282, 1285, 1288, 1297, 1300, 1312, 1345, 1348,
  7292. 1360, 1377, 1408, 1537, 1540, 1552, 1574, 1600, 1602, 1668, 2048, 2050, 2053, 2056, 2058, 2065,
  7293. 2068, 2080, 2085, 2113, 2116, 2128, 2136, 2176, 2208, 2218, 2305, 2308, 2320, 2368, 2433, 2441,
  7294. 2560, 2592, 2600, 2710, 2720, 4097, 4100, 4102, 4105, 4112, 4114, 4117, 4120, 4129, 4132, 4160,
  7295. 4162, 4165, 4168, 4177, 4180, 4192, 4202, 4225, 4228, 4240, 4352, 4354, 4357, 4360, 4369, 4372,
  7296. 4384, 4417, 4420, 4432, 4480, 4500, 4502, 4609, 4612, 4614, 4624, 4672, 4704, 5120, 5122, 5125,
  7297. 5128, 5137, 5140, 5152, 5185, 5188, 5193, 5200, 5220, 5248, 5377, 5380, 5392, 5440, 5632, 5652,
  7298. 5705, 6145, 6148, 6160, 6162, 6208, 6228, 6278, 6400, 6405, 6502, 6737, 6825, 8192, 8194, 8197,
  7299. 8200, 8202, 8209, 8212, 8224, 8257, 8260, 8272, 8320, 8352, 8449, 8452, 8464, 8512, 8520, 8549,
  7300. 8704, 8738, 8832, 8872, 9217, 9220, 9232, 9257, 9280, 9472, 9537, 9554, 9625, 9729, 9754, 9894,
  7301. 10240, 10248, 10250, 10272, 10325, 10376, 10402, 10600, 10640, 10760, 10784, 10882, 10888, 10890, 16385, 16388,
  7302. 16390, 16393, 16400, 16402, 16405, 16408, 16417, 16420, 16448, 16450, 16453, 16456, 16458, 16465, 16468, 16480,
  7303. 16485, 16513, 16516, 16528, 16640, 16642, 16645, 16648, 16657, 16660, 16672, 16705, 16708, 16720, 16768, 16773,
  7304. 16802, 16897, 16900, 16912, 16914, 16937, 16960, 17408, 17410, 17413, 17416, 17425, 17428, 17433, 17440, 17473,
  7305. 17476, 17488, 17536, 17556, 17665, 17668, 17680, 17700, 17728, 17818, 17920, 17930, 17988, 18000, 18433, 18436,
  7306. 18448, 18496, 18501, 18516, 18530, 18688, 18705, 18756, 18768, 18793, 18948, 20480, 20482, 20485, 20488, 20497,
  7307. 20500, 20512, 20520, 20545, 20548, 20560, 20608, 20737, 20740, 20752, 20757, 20800, 20802, 20992, 21060, 21162,
  7308. 21505, 21508, 21520, 21537, 21568, 21600, 21633, 21665, 21760, 21768, 21888, 21896, 22049, 22120, 22177, 22528,
  7309. 22548, 22593, 22608, 22681, 22810, 22848, 22850, 23173, 24577, 24580, 24592, 24640, 24660, 24674, 24710, 24745,
  7310. 24832, 25124, 25162, 25234, 25600, 25622, 25872, 25920, 25925, 26020, 26625, 26730, 26917, 27142, 27220, 27234,
  7311. 32768, 32770, 32773, 32776, 32785, 32788, 32800, 32810, 32833, 32836, 32848, 32896, 32898, 32936, 32938, 33025,
  7312. 33028, 33030, 33040, 33088, 33105, 33113, 33280, 33312, 33408, 33410, 33440, 33448, 33793, 33796, 33808, 33810,
  7313. 33813, 33856, 33888, 33929, 34048, 34116, 34213, 34328, 34410, 34816, 34824, 34853, 34906, 34944, 34946, 34984,
  7314. 35078, 35362, 35456, 35464, 35478, 35496, 36865, 36868, 36880, 36928, 36950, 36996, 37120, 37154, 37220, 37462,
  7315. 37513, 37888, 37893, 37956, 37968, 37976, 38185, 38288, 38290, 38465, 38993, 39078, 39241, 39445, 39520, 40960,
  7316. 40962, 40968, 40970, 40992, 41002, 41120, 41297, 41305, 41382, 41472, 41474, 41480, 41514, 41600, 41632, 42048,
  7317. 42133, 42597, 42648, 43018, 43040, 43042, 43048, 43168, 43176, 43268, 43396, 43398, 43560, 43562, 43665, 43690,
  7318. };
  7319. const int kmap_size = 43692;
  7320. const int nwant = 2;
  7321. const uint16_t * kgrid = grid_size == 256 ? kgrid_256 : kgrid_512;
  7322. uint64_t * kgrid_q2xs;
  7323. int * kmap_q2xs;
  7324. uint16_t * kneighbors_q2xs;
  7325. printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size);
  7326. uint64_t * the_grid = (uint64_t *)malloc(grid_size*sizeof(uint64_t));
  7327. for (int k = 0; k < grid_size; ++k) {
  7328. int8_t * pos = (int8_t *)(the_grid + k);
  7329. for (int i = 0; i < 8; ++i) {
  7330. int l = (kgrid[k] >> 2*i) & 0x3;
  7331. pos[i] = 2*l + 1;
  7332. }
  7333. }
  7334. kgrid_q2xs = the_grid;
  7335. iq2_data[gindex].grid = the_grid;
  7336. kmap_q2xs = (int *)malloc(kmap_size*sizeof(int));
  7337. iq2_data[gindex].map = kmap_q2xs;
  7338. for (int i = 0; i < kmap_size; ++i) kmap_q2xs[i] = -1;
  7339. uint64_t aux64;
  7340. uint8_t * aux8 = (uint8_t *)&aux64;
  7341. for (int i = 0; i < grid_size; ++i) {
  7342. aux64 = kgrid_q2xs[i];
  7343. uint16_t index = 0;
  7344. for (int k=0; k<8; ++k) {
  7345. uint16_t q = (aux8[k] - 1)/2;
  7346. index |= (q << 2*k);
  7347. }
  7348. kmap_q2xs[index] = i;
  7349. }
  7350. int8_t pos[8];
  7351. int * dist2 = (int *)malloc(2*grid_size*sizeof(int));
  7352. int num_neighbors = 0, num_not_in_map = 0;
  7353. for (int i = 0; i < kmap_size; ++i) {
  7354. if (kmap_q2xs[i] >= 0) continue;
  7355. ++num_not_in_map;
  7356. for (int k = 0; k < 8; ++k) {
  7357. int l = (i >> 2*k) & 0x3;
  7358. pos[k] = 2*l + 1;
  7359. }
  7360. for (int j = 0; j < grid_size; ++j) {
  7361. const int8_t * pg = (const int8_t *)(kgrid_q2xs + j);
  7362. int d2 = 0;
  7363. for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  7364. dist2[2*j+0] = d2;
  7365. dist2[2*j+1] = j;
  7366. }
  7367. qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func);
  7368. int n = 0; int d2 = dist2[0];
  7369. int nhave = 1;
  7370. for (int j = 0; j < grid_size; ++j) {
  7371. if (dist2[2*j] > d2) {
  7372. if (nhave == nwant) break;
  7373. d2 = dist2[2*j];
  7374. ++nhave;
  7375. }
  7376. ++n;
  7377. }
  7378. num_neighbors += n;
  7379. }
  7380. printf("%s: %d neighbours in total\n", __func__, num_neighbors);
  7381. kneighbors_q2xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t));
  7382. iq2_data[gindex].neighbours = kneighbors_q2xs;
  7383. int counter = 0;
  7384. for (int i = 0; i < kmap_size; ++i) {
  7385. if (kmap_q2xs[i] >= 0) continue;
  7386. for (int k = 0; k < 8; ++k) {
  7387. int l = (i >> 2*k) & 0x3;
  7388. pos[k] = 2*l + 1;
  7389. }
  7390. for (int j = 0; j < grid_size; ++j) {
  7391. const int8_t * pg = (const int8_t *)(kgrid_q2xs + j);
  7392. int d2 = 0;
  7393. for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  7394. dist2[2*j+0] = d2;
  7395. dist2[2*j+1] = j;
  7396. }
  7397. qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func);
  7398. kmap_q2xs[i] = -(counter + 1);
  7399. int d2 = dist2[0];
  7400. uint16_t * start = &kneighbors_q2xs[counter++];
  7401. int n = 0, nhave = 1;
  7402. for (int j = 0; j < grid_size; ++j) {
  7403. if (dist2[2*j] > d2) {
  7404. if (nhave == nwant) break;
  7405. d2 = dist2[2*j];
  7406. ++nhave;
  7407. }
  7408. kneighbors_q2xs[counter++] = dist2[2*j+1];
  7409. ++n;
  7410. }
  7411. *start = n;
  7412. }
  7413. free(dist2);
  7414. }
  7415. void iq2xs_free_impl(int grid_size) {
  7416. GGML_ASSERT(grid_size == 256 || grid_size == 512 || grid_size == 1024);
  7417. const int gindex = iq2_data_index(grid_size);
  7418. if (iq2_data[gindex].grid) {
  7419. free(iq2_data[gindex].grid); iq2_data[gindex].grid = NULL;
  7420. free(iq2_data[gindex].map); iq2_data[gindex].map = NULL;
  7421. free(iq2_data[gindex].neighbours); iq2_data[gindex].neighbours = NULL;
  7422. }
  7423. }
  7424. static int iq2_find_best_neighbour(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
  7425. const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) {
  7426. int num_neighbors = neighbours[0];
  7427. GGML_ASSERT(num_neighbors > 0);
  7428. float best_d2 = FLT_MAX;
  7429. int grid_index = -1;
  7430. for (int j = 1; j <= num_neighbors; ++j) {
  7431. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  7432. float d2 = 0;
  7433. for (int i = 0; i < 8; ++i) {
  7434. float q = pg[i];
  7435. float diff = scale*q - xval[i];
  7436. d2 += weight[i]*diff*diff;
  7437. }
  7438. if (d2 < best_d2) {
  7439. best_d2 = d2; grid_index = neighbours[j];
  7440. }
  7441. }
  7442. GGML_ASSERT(grid_index >= 0);
  7443. const int8_t * pg = (const int8_t *)(grid + grid_index);
  7444. for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
  7445. return grid_index;
  7446. }
  7447. static void quantize_row_iq2_xxs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
  7448. const int gindex = iq2_data_index(256);
  7449. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  7450. const int * kmap_q2xs = iq2_data[gindex].map;
  7451. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  7452. GGML_ASSERT(quant_weights && "missing quantization weights");
  7453. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  7454. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  7455. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  7456. GGML_ASSERT(n%QK_K == 0);
  7457. const int kMaxQ = 3;
  7458. const int nbl = n/256;
  7459. block_iq2_xxs * y = vy;
  7460. float scales[QK_K/32];
  7461. float weight[32];
  7462. float xval[32];
  7463. int8_t L[32];
  7464. int8_t Laux[32];
  7465. float waux[32];
  7466. uint8_t block_signs[4];
  7467. uint32_t q2[2*(QK_K/32)];
  7468. for (int ibl = 0; ibl < nbl; ++ibl) {
  7469. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  7470. memset(q2, 0, QK_K/4);
  7471. float max_scale = 0;
  7472. const float * xbl = x + QK_K*ibl;
  7473. float sumx2 = 0;
  7474. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  7475. float sigma2 = sumx2/QK_K;
  7476. for (int ib = 0; ib < QK_K/32; ++ib) {
  7477. const float * xb = xbl + 32*ib;
  7478. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  7479. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  7480. for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
  7481. for (int k = 0; k < 4; ++k) {
  7482. int nflip = 0;
  7483. uint8_t s = 0;
  7484. for (int i = 0; i < 8; ++i) {
  7485. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  7486. else {
  7487. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  7488. }
  7489. }
  7490. if (nflip%2) {
  7491. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  7492. for (int i = 1; i < 8; ++i) {
  7493. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  7494. if (ax < min) {
  7495. min = ax; imin = i;
  7496. }
  7497. }
  7498. xval[8*k+imin] = -xval[8*k+imin];
  7499. s ^= (1 << imin);
  7500. }
  7501. block_signs[k] = s & 127;
  7502. }
  7503. float max = xval[0];
  7504. for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]);
  7505. if (!max) {
  7506. scales[ib] = 0;
  7507. memset(L, 0, 32);
  7508. continue;
  7509. }
  7510. float scale = make_qp_quants(32, kMaxQ+1, xval, (uint8_t*)L, weight);
  7511. float eff_max = scale*kMaxQ;
  7512. float best = 0;
  7513. for (int is = -6; is <= 6; ++is) {
  7514. float id = (2*kMaxQ-1+is*0.1f)/eff_max;
  7515. float this_scale = 1/id;
  7516. for (int k = 0; k < 4; ++k) {
  7517. for (int i = 0; i < 8; ++i) {
  7518. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  7519. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  7520. }
  7521. uint16_t u = 0;
  7522. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  7523. int grid_index = kmap_q2xs[u];
  7524. if (grid_index < 0) {
  7525. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  7526. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  7527. }
  7528. }
  7529. float sumqx = 0, sumq2 = 0;
  7530. for (int i = 0; i < 32; ++i) {
  7531. float w = weight[i];
  7532. float q = 2*Laux[i] + 1;
  7533. sumqx += w*xval[i]*q;
  7534. sumq2 += w*q*q;
  7535. }
  7536. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  7537. scale = sumqx/sumq2; best = scale*sumqx;
  7538. memcpy(L, Laux, 32);
  7539. }
  7540. }
  7541. if (scale > 0) {
  7542. float id = 1/scale;
  7543. for (int k = 0; k < 4; ++k) {
  7544. uint16_t u = 0;
  7545. for (int i = 0; i < 8; ++i) {
  7546. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  7547. l = MAX(0, MIN(kMaxQ-1, l));
  7548. u |= (l << 2*i);
  7549. }
  7550. int grid_index = kmap_q2xs[u];
  7551. if (grid_index < 0) {
  7552. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  7553. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  7554. }
  7555. const int8_t * pg = (const int8_t *)(kgrid_q2xs + grid_index);
  7556. for (int i = 0; i < 8; ++i) L[8*k+i] = (pg[i] - 1)/2;
  7557. }
  7558. float sumqx = 0, sumq2 = 0;
  7559. for (int i = 0; i < 32; ++i) {
  7560. float w = weight[i];
  7561. float q = 2*L[i] + 1;
  7562. sumqx += w*xval[i]*q;
  7563. sumq2 += w*q*q;
  7564. }
  7565. if (sumq2 > 0) scale = sumqx/sumq2;
  7566. }
  7567. if (scale < 0) {
  7568. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  7569. // and correspondingly flip quant signs.
  7570. scale = -scale;
  7571. for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127;
  7572. }
  7573. for (int k = 0; k < 4; ++k) {
  7574. uint16_t u = 0;
  7575. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  7576. int grid_index = kmap_q2xs[u];
  7577. if (grid_index < 0) {
  7578. printf("Oops: found point %u not on grid:", u);
  7579. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  7580. printf("\n");
  7581. GGML_ASSERT(false);
  7582. }
  7583. q2[2*ib+0] |= (grid_index << 8*k);
  7584. q2[2*ib+1] |= (block_signs[k] << 7*k);
  7585. }
  7586. GGML_ASSERT(scale >= 0);
  7587. scales[ib] = scale;
  7588. max_scale = MAX(max_scale, scale);
  7589. }
  7590. if (!max_scale) {
  7591. memset(y[ibl].qs, 0, QK_K/4);
  7592. continue;
  7593. }
  7594. float d = max_scale/31;
  7595. y[ibl].d = GGML_FP32_TO_FP16(d);
  7596. float id = 1/d;
  7597. for (int ib = 0; ib < QK_K/32; ++ib) {
  7598. int l = nearest_int(0.5f*(id*scales[ib]-1));
  7599. l = MAX(0, MIN(15, l));
  7600. q2[2*ib+1] |= ((uint32_t)l << 28);
  7601. }
  7602. memcpy(y[ibl].qs, q2, QK_K/4);
  7603. }
  7604. }
  7605. static void quantize_row_iq2_xs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
  7606. const int gindex = iq2_data_index(512);
  7607. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  7608. const int * kmap_q2xs = iq2_data[gindex].map;
  7609. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  7610. GGML_ASSERT(quant_weights && "missing quantization weights");
  7611. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  7612. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  7613. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  7614. GGML_ASSERT(n%QK_K == 0);
  7615. const int kMaxQ = 3;
  7616. const int nbl = n/256;
  7617. block_iq2_xs * y = vy;
  7618. float scales[QK_K/16];
  7619. float weight[16];
  7620. float xval[16];
  7621. int8_t L[16];
  7622. int8_t Laux[16];
  7623. float waux[16];
  7624. bool is_on_grid[2];
  7625. bool is_on_grid_aux[2];
  7626. uint8_t block_signs[2];
  7627. uint16_t q2[2*(QK_K/16)];
  7628. for (int ibl = 0; ibl < nbl; ++ibl) {
  7629. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  7630. memset(q2, 0, QK_K/4);
  7631. memset(y[ibl].scales, 0, QK_K/32);
  7632. float max_scale = 0;
  7633. const float * xbl = x + QK_K*ibl;
  7634. float sumx2 = 0;
  7635. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  7636. float sigma2 = sumx2/QK_K;
  7637. for (int ib = 0; ib < QK_K/16; ++ib) {
  7638. const float * xb = xbl + 16*ib;
  7639. const float * qw = quant_weights + QK_K*ibl + 16*ib;
  7640. for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  7641. for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]);
  7642. for (int k = 0; k < 2; ++k) {
  7643. int nflip = 0;
  7644. uint8_t s = 0;
  7645. for (int i = 0; i < 8; ++i) {
  7646. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  7647. else {
  7648. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  7649. }
  7650. }
  7651. if (nflip%2) {
  7652. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  7653. for (int i = 1; i < 8; ++i) {
  7654. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  7655. if (ax < min) {
  7656. min = ax; imin = i;
  7657. }
  7658. }
  7659. xval[8*k+imin] = -xval[8*k+imin];
  7660. s ^= (1 << imin);
  7661. }
  7662. block_signs[k] = s & 127;
  7663. }
  7664. float max = xval[0];
  7665. for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]);
  7666. if (!max) {
  7667. scales[ib] = 0;
  7668. memset(L, 0, 16);
  7669. continue;
  7670. }
  7671. float best = 0;
  7672. float scale = max/(2*kMaxQ-1);
  7673. is_on_grid[0] = is_on_grid[1] = true;
  7674. for (int is = -9; is <= 9; ++is) {
  7675. float id = (2*kMaxQ-1+is*0.1f)/max;
  7676. float this_scale = 1/id;
  7677. for (int k = 0; k < 2; ++k) {
  7678. for (int i = 0; i < 8; ++i) {
  7679. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  7680. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  7681. }
  7682. uint16_t u = 0;
  7683. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  7684. int grid_index = kmap_q2xs[u];
  7685. is_on_grid_aux[k] = true;
  7686. if (grid_index < 0) {
  7687. is_on_grid_aux[k] = false;
  7688. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  7689. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  7690. }
  7691. }
  7692. float sumqx = 0, sumq2 = 0;
  7693. for (int i = 0; i < 16; ++i) {
  7694. float w = weight[i];
  7695. float q = 2*Laux[i] + 1;
  7696. sumqx += w*xval[i]*q;
  7697. sumq2 += w*q*q;
  7698. }
  7699. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  7700. scale = sumqx/sumq2; best = scale*sumqx;
  7701. for (int i = 0; i < 16; ++i) L[i] = Laux[i];
  7702. for (int k = 0; k < 2; ++k) is_on_grid[k] = is_on_grid_aux[k];
  7703. }
  7704. }
  7705. int n_not_ongrid = 0;
  7706. for (int k = 0; k < 2; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  7707. if (n_not_ongrid > 0 && scale > 0) {
  7708. float id = 1/scale;
  7709. for (int k = 0; k < 2; ++k) {
  7710. if (is_on_grid[k]) continue;
  7711. uint16_t u = 0;
  7712. for (int i = 0; i < 8; ++i) {
  7713. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  7714. l = MAX(0, MIN(kMaxQ-1, l));
  7715. u |= (l << 2*i);
  7716. L[8*k + i] = l;
  7717. }
  7718. int grid_index = kmap_q2xs[u];
  7719. if (grid_index < 0) {
  7720. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  7721. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  7722. }
  7723. }
  7724. float sumqx = 0, sumq2 = 0;
  7725. for (int i = 0; i < 16; ++i) {
  7726. float w = weight[i];
  7727. float q = 2*L[i] + 1;
  7728. sumqx += w*xval[i]*q;
  7729. sumq2 += w*q*q;
  7730. }
  7731. if (sumq2 > 0) scale = sumqx/sumq2;
  7732. }
  7733. if (scale < 0) {
  7734. scale = -scale;
  7735. for (int k = 0; k < 2; ++k) block_signs[k] = (~block_signs[k]) & 127;
  7736. }
  7737. for (int k = 0; k < 2; ++k) {
  7738. uint16_t u = 0;
  7739. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  7740. int grid_index = kmap_q2xs[u];
  7741. if (grid_index < 0) {
  7742. printf("Oops: found point %u not on grid:", u);
  7743. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  7744. printf("\n");
  7745. GGML_ASSERT(false);
  7746. }
  7747. q2[2*ib+k] = grid_index | (block_signs[k] << 9);
  7748. }
  7749. GGML_ASSERT(scale >= 0);
  7750. scales[ib] = scale;
  7751. max_scale = MAX(max_scale, scale);
  7752. }
  7753. if (!max_scale) {
  7754. memset(y[ibl].qs, 0, QK_K/4);
  7755. continue;
  7756. }
  7757. float d = max_scale/31;
  7758. y[ibl].d = GGML_FP32_TO_FP16(d);
  7759. float id = 1/d;
  7760. for (int ib = 0; ib < QK_K/16; ++ib) {
  7761. int l = nearest_int(0.5f*(id*scales[ib]-1));
  7762. l = MAX(0, MIN(15, l));
  7763. if (ib%2 == 0) y[ibl].scales[ib/2] = l;
  7764. else y[ibl].scales[ib/2] |= (l << 4);
  7765. }
  7766. memcpy(y[ibl].qs, q2, QK_K/4);
  7767. }
  7768. }
  7769. size_t quantize_iq2_xxs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  7770. (void)hist;
  7771. GGML_ASSERT(n_per_row%QK_K == 0);
  7772. int nblock = n_per_row/QK_K;
  7773. char * qrow = (char *)dst;
  7774. for (int row = 0; row < nrow; ++row) {
  7775. quantize_row_iq2_xxs_impl(src, qrow, n_per_row, quant_weights);
  7776. src += n_per_row;
  7777. qrow += nblock*sizeof(block_iq2_xxs);
  7778. }
  7779. return nrow * nblock * sizeof(block_iq2_xxs);
  7780. }
  7781. size_t quantize_iq2_xs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  7782. (void)hist;
  7783. GGML_ASSERT(n_per_row%QK_K == 0);
  7784. int nblock = n_per_row/QK_K;
  7785. char * qrow = (char *)dst;
  7786. for (int row = 0; row < nrow; ++row) {
  7787. quantize_row_iq2_xs_impl(src, qrow, n_per_row, quant_weights);
  7788. src += n_per_row;
  7789. qrow += nblock*sizeof(block_iq2_xs);
  7790. }
  7791. return nrow * nblock * sizeof(block_iq2_xs);
  7792. }
  7793. //
  7794. // ============================================= 3-bit using D4 lattice
  7795. //
  7796. typedef struct {
  7797. uint32_t * grid;
  7798. int * map;
  7799. uint16_t * neighbours;
  7800. } iq3_entry_t;
  7801. static iq3_entry_t iq3_data[1] = {
  7802. {NULL, NULL, NULL},
  7803. };
  7804. static inline int iq3_data_index(int grid_size) {
  7805. (void)grid_size;
  7806. GGML_ASSERT(grid_size == 256);
  7807. return 0;
  7808. }
  7809. static int iq3_compare_func(const void * left, const void * right) {
  7810. const int * l = (const int *)left;
  7811. const int * r = (const int *)right;
  7812. return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0;
  7813. }
  7814. void iq3xs_init_impl(int grid_size) {
  7815. const int gindex = iq3_data_index(grid_size);
  7816. if (iq3_data[gindex].grid) {
  7817. return;
  7818. }
  7819. static const uint16_t kgrid_256[256] = {
  7820. 0, 2, 4, 9, 11, 15, 16, 18, 25, 34, 59, 61, 65, 67, 72, 74,
  7821. 81, 85, 88, 90, 97, 108, 120, 128, 130, 132, 137, 144, 146, 153, 155, 159,
  7822. 169, 175, 189, 193, 199, 200, 202, 213, 248, 267, 287, 292, 303, 315, 317, 321,
  7823. 327, 346, 362, 413, 436, 456, 460, 462, 483, 497, 513, 515, 520, 522, 529, 531,
  7824. 536, 538, 540, 551, 552, 576, 578, 585, 592, 594, 641, 643, 648, 650, 657, 664,
  7825. 698, 704, 706, 720, 729, 742, 758, 769, 773, 808, 848, 852, 870, 889, 901, 978,
  7826. 992, 1024, 1026, 1033, 1035, 1040, 1042, 1046, 1049, 1058, 1089, 1091, 1093, 1096, 1098, 1105,
  7827. 1112, 1139, 1143, 1144, 1152, 1154, 1161, 1167, 1168, 1170, 1183, 1184, 1197, 1217, 1224, 1228,
  7828. 1272, 1276, 1309, 1323, 1347, 1367, 1377, 1404, 1473, 1475, 1486, 1509, 1537, 1544, 1546, 1553,
  7829. 1555, 1576, 1589, 1594, 1600, 1602, 1616, 1625, 1636, 1638, 1665, 1667, 1672, 1685, 1706, 1722,
  7830. 1737, 1755, 1816, 1831, 1850, 1856, 1862, 1874, 1901, 1932, 1950, 1971, 2011, 2032, 2052, 2063,
  7831. 2077, 2079, 2091, 2095, 2172, 2192, 2207, 2208, 2224, 2230, 2247, 2277, 2308, 2345, 2356, 2389,
  7832. 2403, 2424, 2501, 2504, 2506, 2520, 2570, 2593, 2616, 2624, 2630, 2646, 2669, 2700, 2714, 2746,
  7833. 2754, 2795, 2824, 2835, 2839, 2874, 2882, 2905, 2984, 3028, 3042, 3092, 3108, 3110, 3124, 3153,
  7834. 3185, 3215, 3252, 3288, 3294, 3364, 3397, 3434, 3483, 3523, 3537, 3587, 3589, 3591, 3592, 3610,
  7835. 3626, 3670, 3680, 3722, 3749, 3754, 3776, 3789, 3803, 3824, 3857, 3873, 3904, 3906, 3924, 3992,
  7836. };
  7837. const int kmap_size = 4096;
  7838. const int nwant = 2;
  7839. const uint16_t * kgrid = kgrid_256;
  7840. uint32_t * kgrid_q3xs;
  7841. int * kmap_q3xs;
  7842. uint16_t * kneighbors_q3xs;
  7843. printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size);
  7844. uint32_t * the_grid = (uint32_t *)malloc(grid_size*sizeof(uint32_t));
  7845. for (int k = 0; k < grid_size; ++k) {
  7846. int8_t * pos = (int8_t *)(the_grid + k);
  7847. for (int i = 0; i < 4; ++i) {
  7848. int l = (kgrid[k] >> 3*i) & 0x7;
  7849. pos[i] = 2*l + 1;
  7850. }
  7851. }
  7852. kgrid_q3xs = the_grid;
  7853. iq3_data[gindex].grid = the_grid;
  7854. kmap_q3xs = (int *)malloc(kmap_size*sizeof(int));
  7855. iq3_data[gindex].map = kmap_q3xs;
  7856. for (int i = 0; i < kmap_size; ++i) kmap_q3xs[i] = -1;
  7857. uint32_t aux32;
  7858. uint8_t * aux8 = (uint8_t *)&aux32;
  7859. for (int i = 0; i < grid_size; ++i) {
  7860. aux32 = kgrid_q3xs[i];
  7861. uint16_t index = 0;
  7862. for (int k=0; k<4; ++k) {
  7863. uint16_t q = (aux8[k] - 1)/2;
  7864. index |= (q << 3*k);
  7865. }
  7866. kmap_q3xs[index] = i;
  7867. }
  7868. int8_t pos[4];
  7869. int * dist2 = (int *)malloc(2*grid_size*sizeof(int));
  7870. int num_neighbors = 0, num_not_in_map = 0;
  7871. for (int i = 0; i < kmap_size; ++i) {
  7872. if (kmap_q3xs[i] >= 0) continue;
  7873. ++num_not_in_map;
  7874. for (int k = 0; k < 4; ++k) {
  7875. int l = (i >> 3*k) & 0x7;
  7876. pos[k] = 2*l + 1;
  7877. }
  7878. for (int j = 0; j < grid_size; ++j) {
  7879. const int8_t * pg = (const int8_t *)(kgrid_q3xs + j);
  7880. int d2 = 0;
  7881. for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  7882. dist2[2*j+0] = d2;
  7883. dist2[2*j+1] = j;
  7884. }
  7885. qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func);
  7886. int n = 0; int d2 = dist2[0];
  7887. int nhave = 1;
  7888. for (int j = 0; j < grid_size; ++j) {
  7889. if (dist2[2*j] > d2) {
  7890. if (nhave == nwant) break;
  7891. d2 = dist2[2*j];
  7892. ++nhave;
  7893. }
  7894. ++n;
  7895. }
  7896. num_neighbors += n;
  7897. }
  7898. printf("%s: %d neighbours in total\n", __func__, num_neighbors);
  7899. kneighbors_q3xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t));
  7900. iq3_data[gindex].neighbours = kneighbors_q3xs;
  7901. int counter = 0;
  7902. for (int i = 0; i < kmap_size; ++i) {
  7903. if (kmap_q3xs[i] >= 0) continue;
  7904. for (int k = 0; k < 4; ++k) {
  7905. int l = (i >> 3*k) & 0x7;
  7906. pos[k] = 2*l + 1;
  7907. }
  7908. for (int j = 0; j < grid_size; ++j) {
  7909. const int8_t * pg = (const int8_t *)(kgrid_q3xs + j);
  7910. int d2 = 0;
  7911. for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  7912. dist2[2*j+0] = d2;
  7913. dist2[2*j+1] = j;
  7914. }
  7915. qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func);
  7916. kmap_q3xs[i] = -(counter + 1);
  7917. int d2 = dist2[0];
  7918. uint16_t * start = &kneighbors_q3xs[counter++];
  7919. int n = 0, nhave = 1;
  7920. for (int j = 0; j < grid_size; ++j) {
  7921. if (dist2[2*j] > d2) {
  7922. if (nhave == nwant) break;
  7923. d2 = dist2[2*j];
  7924. ++nhave;
  7925. }
  7926. kneighbors_q3xs[counter++] = dist2[2*j+1];
  7927. ++n;
  7928. }
  7929. *start = n;
  7930. }
  7931. free(dist2);
  7932. }
  7933. void iq3xs_free_impl(int grid_size) {
  7934. GGML_ASSERT(grid_size == 256);
  7935. const int gindex = iq3_data_index(grid_size);
  7936. if (iq3_data[gindex].grid) {
  7937. free(iq3_data[gindex].grid); iq3_data[gindex].grid = NULL;
  7938. free(iq3_data[gindex].map); iq3_data[gindex].map = NULL;
  7939. free(iq3_data[gindex].neighbours); iq3_data[gindex].neighbours = NULL;
  7940. }
  7941. }
  7942. static int iq3_find_best_neighbour(const uint16_t * restrict neighbours, const uint32_t * restrict grid,
  7943. const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) {
  7944. int num_neighbors = neighbours[0];
  7945. GGML_ASSERT(num_neighbors > 0);
  7946. float best_d2 = FLT_MAX;
  7947. int grid_index = -1;
  7948. for (int j = 1; j <= num_neighbors; ++j) {
  7949. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  7950. float d2 = 0;
  7951. for (int i = 0; i < 4; ++i) {
  7952. float q = pg[i];
  7953. float diff = scale*q - xval[i];
  7954. d2 += weight[i]*diff*diff;
  7955. }
  7956. if (d2 < best_d2) {
  7957. best_d2 = d2; grid_index = neighbours[j];
  7958. }
  7959. }
  7960. GGML_ASSERT(grid_index >= 0);
  7961. const int8_t * pg = (const int8_t *)(grid + grid_index);
  7962. for (int i = 0; i < 4; ++i) L[i] = (pg[i] - 1)/2;
  7963. return grid_index;
  7964. }
  7965. static void quantize_row_iq3_xxs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
  7966. const int gindex = iq3_data_index(256);
  7967. const uint32_t * kgrid_q3xs = iq3_data[gindex].grid;
  7968. const int * kmap_q3xs = iq3_data[gindex].map;
  7969. const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours;
  7970. //GGML_ASSERT(quant_weights && "missing quantization weights");
  7971. GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?");
  7972. GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?");
  7973. GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?");
  7974. GGML_ASSERT(n%QK_K == 0);
  7975. const int kMaxQ = 8;
  7976. const int nbl = n/256;
  7977. block_iq3_xxs * y = vy;
  7978. float scales[QK_K/32];
  7979. float weight[32];
  7980. float xval[32];
  7981. int8_t L[32];
  7982. int8_t Laux[32];
  7983. float waux[32];
  7984. bool is_on_grid[8];
  7985. bool is_on_grid_aux[8];
  7986. uint8_t block_signs[8];
  7987. uint8_t q3[3*(QK_K/8)];
  7988. uint32_t * scales_and_signs = (uint32_t *)(q3 + QK_K/4);
  7989. for (int ibl = 0; ibl < nbl; ++ibl) {
  7990. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  7991. memset(q3, 0, 3*QK_K/8);
  7992. float max_scale = 0;
  7993. const float * xbl = x + QK_K*ibl;
  7994. float sumx2 = 0;
  7995. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  7996. float sigma2 = sumx2/QK_K;
  7997. for (int ib = 0; ib < QK_K/32; ++ib) {
  7998. const float * xb = xbl + 32*ib;
  7999. if (quant_weights) {
  8000. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  8001. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  8002. } else {
  8003. for (int i = 0; i < 32; ++i) weight[i] = xb[i]*xb[i];
  8004. }
  8005. for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
  8006. for (int k = 0; k < 4; ++k) {
  8007. int nflip = 0;
  8008. uint8_t s = 0;
  8009. for (int i = 0; i < 8; ++i) {
  8010. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  8011. else {
  8012. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  8013. }
  8014. }
  8015. if (nflip%2) {
  8016. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  8017. for (int i = 1; i < 8; ++i) {
  8018. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  8019. if (ax < min) {
  8020. min = ax; imin = i;
  8021. }
  8022. }
  8023. xval[8*k+imin] = -xval[8*k+imin];
  8024. s ^= (1 << imin);
  8025. }
  8026. block_signs[k] = s & 127;
  8027. }
  8028. float max = xval[0];
  8029. for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]);
  8030. if (!max) {
  8031. scales[ib] = 0;
  8032. memset(L, 0, 32);
  8033. continue;
  8034. }
  8035. float best = 0;
  8036. float scale = max/(2*kMaxQ-1);
  8037. for (int is = -15; is <= 15; ++is) {
  8038. float id = (2*kMaxQ-1+is*0.2f)/max;
  8039. float this_scale = 1/id;
  8040. for (int k = 0; k < 8; ++k) {
  8041. for (int i = 0; i < 4; ++i) {
  8042. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  8043. Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l));
  8044. }
  8045. uint16_t u = 0;
  8046. for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i);
  8047. int grid_index = kmap_q3xs[u];
  8048. is_on_grid_aux[k] = true;
  8049. if (grid_index < 0) {
  8050. is_on_grid_aux[k] = false;
  8051. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  8052. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k);
  8053. }
  8054. }
  8055. float sumqx = 0, sumq2 = 0;
  8056. for (int i = 0; i < 32; ++i) {
  8057. float w = weight[i];
  8058. float q = 2*Laux[i] + 1;
  8059. sumqx += w*xval[i]*q;
  8060. sumq2 += w*q*q;
  8061. }
  8062. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  8063. scale = sumqx/sumq2; best = scale*sumqx;
  8064. for (int i = 0; i < 32; ++i) L[i] = Laux[i];
  8065. for (int k = 0; k < 8; ++k) is_on_grid[k] = is_on_grid_aux[k];
  8066. }
  8067. }
  8068. int n_not_ongrid = 0;
  8069. for (int k = 0; k < 8; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  8070. if (n_not_ongrid > 0 && scale > 0) {
  8071. float id = 1/scale;
  8072. for (int k = 0; k < 8; ++k) {
  8073. if (is_on_grid[k]) continue;
  8074. uint16_t u = 0;
  8075. for (int i = 0; i < 4; ++i) {
  8076. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  8077. l = MAX(0, MIN(kMaxQ-1, l));
  8078. u |= (l << 3*i);
  8079. }
  8080. int grid_index = kmap_q3xs[u];
  8081. if (grid_index < 0) {
  8082. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  8083. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k);
  8084. }
  8085. const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index);
  8086. for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2;
  8087. }
  8088. float sumqx = 0, sumq2 = 0;
  8089. for (int i = 0; i < 32; ++i) {
  8090. float w = weight[i];
  8091. float q = 2*L[i] + 1;
  8092. sumqx += w*xval[i]*q;
  8093. sumq2 += w*q*q;
  8094. }
  8095. if (sumq2 > 0) scale = sumqx/sumq2;
  8096. }
  8097. if (scale < 0) {
  8098. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  8099. // and correspondingly flip quant signs.
  8100. scale = -scale;
  8101. for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127;
  8102. }
  8103. for (int k = 0; k < 8; ++k) {
  8104. uint16_t u = 0;
  8105. for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i);
  8106. int grid_index = kmap_q3xs[u];
  8107. if (grid_index < 0) {
  8108. printf("Oops: found point %u not on grid:", u);
  8109. for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]);
  8110. printf("\n");
  8111. GGML_ASSERT(false);
  8112. }
  8113. q3[8*ib+k] = grid_index;
  8114. }
  8115. scales_and_signs[ib] = block_signs[0] | (block_signs[1] << 7) | (block_signs[2] << 14) | (block_signs[3] << 21);
  8116. GGML_ASSERT(scale >= 0);
  8117. scales[ib] = scale;
  8118. max_scale = MAX(max_scale, scale);
  8119. }
  8120. if (!max_scale) {
  8121. memset(y[ibl].qs, 0, 3*QK_K/8);
  8122. continue;
  8123. }
  8124. float d = max_scale/31;
  8125. y[ibl].d = GGML_FP32_TO_FP16(d);
  8126. float id = 1/d;
  8127. float sumqx = 0, sumq2 = 0;
  8128. for (int ib = 0; ib < QK_K/32; ++ib) {
  8129. int l = nearest_int(0.5f*(id*scales[ib]-1));
  8130. l = MAX(0, MIN(15, l));
  8131. scales_and_signs[ib] |= ((uint32_t)l << 28);
  8132. if (false) {
  8133. const float * xb = xbl + 32*ib;
  8134. if (quant_weights) {
  8135. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  8136. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  8137. } else {
  8138. for (int i = 0; i < 32; ++i) weight[i] = xb[i]*xb[i];
  8139. }
  8140. const float db = 0.25f * d * (1 + 2*l);
  8141. for (int k = 0; k < 8; ++k) {
  8142. const int8_t * signs = keven_signs_q2xs + 8*((scales_and_signs[ib] >> 7*(k/2)) & 127) + 4*(k%2);
  8143. const float * xk = xb + 4*k;
  8144. const float * wk = weight + 4*k;
  8145. //const uint8_t * grid = (const uint8_t *)(kgrid_q3xs + q3[8*ib+k]);
  8146. const uint8_t * grid = (const uint8_t *)(iq3xxs_grid + q3[8*ib+k]);
  8147. float best_mse = 0; int best_index = q3[8*ib+k];
  8148. for (int j = 0; j < 4; ++j) {
  8149. float diff = db * grid[j] * signs[j] - xk[j];
  8150. best_mse += wk[j] * diff * diff;
  8151. }
  8152. for (int idx = 0; idx < 256; ++idx) {
  8153. //grid = (const uint8_t *)(kgrid_q3xs + idx);
  8154. grid = (const uint8_t *)(iq3xxs_grid + idx);
  8155. float mse = 0;
  8156. for (int j = 0; j < 4; ++j) {
  8157. float diff = db * grid[j] * signs[j] - xk[j];
  8158. mse += wk[j] * diff * diff;
  8159. }
  8160. if (mse < best_mse) {
  8161. best_mse = mse; best_index = idx;
  8162. }
  8163. }
  8164. q3[8*ib+k] = best_index;
  8165. //grid = (const uint8_t *)(kgrid_q3xs + best_index);
  8166. grid = (const uint8_t *)(iq3xxs_grid + best_index);
  8167. for (int j = 0; j < 4; ++j) {
  8168. float q = db * grid[j] * signs[j];
  8169. sumqx += wk[j] * q * xk[j];
  8170. sumq2 += wk[j] * q * q;
  8171. }
  8172. }
  8173. if (sumq2 > 0) y[ibl].d = GGML_FP32_TO_FP16(d*sumqx/sumq2);
  8174. }
  8175. }
  8176. memcpy(y[ibl].qs, q3, 3*QK_K/8);
  8177. }
  8178. }
  8179. size_t quantize_iq3_xxs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  8180. (void)hist;
  8181. GGML_ASSERT(n_per_row%QK_K == 0);
  8182. int nblock = n_per_row/QK_K;
  8183. char * qrow = (char *)dst;
  8184. for (int row = 0; row < nrow; ++row) {
  8185. quantize_row_iq3_xxs_impl(src, qrow, n_per_row, quant_weights);
  8186. src += n_per_row;
  8187. qrow += nblock*sizeof(block_iq3_xxs);
  8188. }
  8189. return nrow * nblock * sizeof(block_iq3_xxs);
  8190. }
  8191. void quantize_row_iq3_xxs(const float * restrict x, void * restrict vy, int k) {
  8192. assert(k % QK_K == 0);
  8193. block_iq3_xxs * restrict y = vy;
  8194. quantize_row_iq3_xxs_reference(x, y, k);
  8195. }
  8196. void quantize_row_iq3_xxs_reference(const float * restrict x, block_iq3_xxs * restrict y, int k) {
  8197. assert(k % QK_K == 0);
  8198. quantize_row_iq3_xxs_impl(x, y, k, NULL);
  8199. }