ggml-quants.c 392 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882
  1. #include "ggml-quants.h"
  2. #include "ggml-impl.h"
  3. #include <math.h>
  4. #include <string.h>
  5. #include <assert.h>
  6. #include <float.h>
  7. #include <stdlib.h> // for qsort
  8. #include <stdio.h> // for GGML_ASSERT
  9. #ifdef __ARM_NEON
  10. // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
  11. //
  12. // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
  13. //
  14. #include <arm_neon.h>
  15. #else
  16. #ifdef __wasm_simd128__
  17. #include <wasm_simd128.h>
  18. #else
  19. #if defined(__POWER9_VECTOR__) || defined(__powerpc64__)
  20. #include <altivec.h>
  21. #undef bool
  22. #define bool _Bool
  23. #else
  24. #if defined(_MSC_VER) || defined(__MINGW32__)
  25. #include <intrin.h>
  26. #else
  27. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
  28. #if !defined(__riscv)
  29. #include <immintrin.h>
  30. #endif
  31. #endif
  32. #endif
  33. #endif
  34. #endif
  35. #endif
  36. #ifdef __riscv_v_intrinsic
  37. #include <riscv_vector.h>
  38. #endif
  39. #undef MIN
  40. #undef MAX
  41. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  42. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  43. #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
  44. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  45. // multiply int8_t, add results pairwise twice
  46. static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
  47. // Get absolute values of x vectors
  48. const __m128i ax = _mm_sign_epi8(x, x);
  49. // Sign the values of the y vectors
  50. const __m128i sy = _mm_sign_epi8(y, x);
  51. // Perform multiplication and create 16-bit values
  52. const __m128i dot = _mm_maddubs_epi16(ax, sy);
  53. const __m128i ones = _mm_set1_epi16(1);
  54. return _mm_madd_epi16(ones, dot);
  55. }
  56. #if __AVX__ || __AVX2__ || __AVX512F__
  57. // horizontally add 8 floats
  58. static inline float hsum_float_8(const __m256 x) {
  59. __m128 res = _mm256_extractf128_ps(x, 1);
  60. res = _mm_add_ps(res, _mm256_castps256_ps128(x));
  61. res = _mm_add_ps(res, _mm_movehl_ps(res, res));
  62. res = _mm_add_ss(res, _mm_movehdup_ps(res));
  63. return _mm_cvtss_f32(res);
  64. }
  65. // horizontally add 8 int32_t
  66. static inline int hsum_i32_8(const __m256i a) {
  67. const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
  68. const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
  69. const __m128i sum64 = _mm_add_epi32(hi64, sum128);
  70. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  71. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  72. }
  73. // horizontally add 4 int32_t
  74. static inline int hsum_i32_4(const __m128i a) {
  75. const __m128i hi64 = _mm_unpackhi_epi64(a, a);
  76. const __m128i sum64 = _mm_add_epi32(hi64, a);
  77. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  78. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  79. }
  80. #if defined(__AVX2__) || defined(__AVX512F__)
  81. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  82. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  83. uint32_t x32;
  84. memcpy(&x32, x, sizeof(uint32_t));
  85. const __m256i shuf_mask = _mm256_set_epi64x(
  86. 0x0303030303030303, 0x0202020202020202,
  87. 0x0101010101010101, 0x0000000000000000);
  88. __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
  89. const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
  90. bytes = _mm256_or_si256(bytes, bit_mask);
  91. return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1));
  92. }
  93. // Unpack 32 4-bit fields into 32 bytes
  94. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  95. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  96. {
  97. const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
  98. const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
  99. const __m256i lowMask = _mm256_set1_epi8( 0xF );
  100. return _mm256_and_si256(lowMask, bytes);
  101. }
  102. // add int16_t pairwise and return as float vector
  103. static inline __m256 sum_i16_pairs_float(const __m256i x) {
  104. const __m256i ones = _mm256_set1_epi16(1);
  105. const __m256i summed_pairs = _mm256_madd_epi16(ones, x);
  106. return _mm256_cvtepi32_ps(summed_pairs);
  107. }
  108. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  109. #if __AVXVNNI__
  110. const __m256i zero = _mm256_setzero_si256();
  111. const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy);
  112. return _mm256_cvtepi32_ps(summed_pairs);
  113. #else
  114. // Perform multiplication and create 16-bit values
  115. const __m256i dot = _mm256_maddubs_epi16(ax, sy);
  116. return sum_i16_pairs_float(dot);
  117. #endif
  118. }
  119. // multiply int8_t, add results pairwise twice and return as float vector
  120. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  121. #if __AVXVNNIINT8__
  122. const __m256i zero = _mm256_setzero_si256();
  123. const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y);
  124. return _mm256_cvtepi32_ps(summed_pairs);
  125. #else
  126. // Get absolute values of x vectors
  127. const __m256i ax = _mm256_sign_epi8(x, x);
  128. // Sign the values of the y vectors
  129. const __m256i sy = _mm256_sign_epi8(y, x);
  130. return mul_sum_us8_pairs_float(ax, sy);
  131. #endif
  132. }
  133. static inline __m128i packNibbles( __m256i bytes )
  134. {
  135. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  136. #if __AVX512F__
  137. const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000
  138. bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh
  139. return _mm256_cvtepi16_epi8(bytes); // abcd_efgh
  140. #else
  141. const __m256i lowByte = _mm256_set1_epi16( 0xFF );
  142. __m256i high = _mm256_andnot_si256( lowByte, bytes );
  143. __m256i low = _mm256_and_si256( lowByte, bytes );
  144. high = _mm256_srli_epi16( high, 4 );
  145. bytes = _mm256_or_si256( low, high );
  146. // Compress uint16_t lanes into bytes
  147. __m128i r0 = _mm256_castsi256_si128( bytes );
  148. __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
  149. return _mm_packus_epi16( r0, r1 );
  150. #endif
  151. }
  152. #elif defined(__AVX__)
  153. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  154. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  155. uint32_t x32;
  156. memcpy(&x32, x, sizeof(uint32_t));
  157. const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  158. const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202);
  159. __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl);
  160. __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh);
  161. const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe);
  162. bytesl = _mm_or_si128(bytesl, bit_mask);
  163. bytesh = _mm_or_si128(bytesh, bit_mask);
  164. bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
  165. bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
  166. return MM256_SET_M128I(bytesh, bytesl);
  167. }
  168. // Unpack 32 4-bit fields into 32 bytes
  169. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  170. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  171. {
  172. // Load 16 bytes from memory
  173. __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi);
  174. __m128i tmph = _mm_srli_epi16(tmpl, 4);
  175. const __m128i lowMask = _mm_set1_epi8(0xF);
  176. tmpl = _mm_and_si128(lowMask, tmpl);
  177. tmph = _mm_and_si128(lowMask, tmph);
  178. return MM256_SET_M128I(tmph, tmpl);
  179. }
  180. // add int16_t pairwise and return as float vector
  181. static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
  182. const __m128i ones = _mm_set1_epi16(1);
  183. const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
  184. const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
  185. const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
  186. return _mm256_cvtepi32_ps(summed_pairs);
  187. }
  188. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  189. const __m128i axl = _mm256_castsi256_si128(ax);
  190. const __m128i axh = _mm256_extractf128_si256(ax, 1);
  191. const __m128i syl = _mm256_castsi256_si128(sy);
  192. const __m128i syh = _mm256_extractf128_si256(sy, 1);
  193. // Perform multiplication and create 16-bit values
  194. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  195. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  196. return sum_i16_pairs_float(doth, dotl);
  197. }
  198. // multiply int8_t, add results pairwise twice and return as float vector
  199. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  200. const __m128i xl = _mm256_castsi256_si128(x);
  201. const __m128i xh = _mm256_extractf128_si256(x, 1);
  202. const __m128i yl = _mm256_castsi256_si128(y);
  203. const __m128i yh = _mm256_extractf128_si256(y, 1);
  204. // Get absolute values of x vectors
  205. const __m128i axl = _mm_sign_epi8(xl, xl);
  206. const __m128i axh = _mm_sign_epi8(xh, xh);
  207. // Sign the values of the y vectors
  208. const __m128i syl = _mm_sign_epi8(yl, xl);
  209. const __m128i syh = _mm_sign_epi8(yh, xh);
  210. // Perform multiplication and create 16-bit values
  211. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  212. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  213. return sum_i16_pairs_float(doth, dotl);
  214. }
  215. static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
  216. {
  217. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  218. const __m128i lowByte = _mm_set1_epi16( 0xFF );
  219. __m128i high = _mm_andnot_si128( lowByte, bytes1 );
  220. __m128i low = _mm_and_si128( lowByte, bytes1 );
  221. high = _mm_srli_epi16( high, 4 );
  222. bytes1 = _mm_or_si128( low, high );
  223. high = _mm_andnot_si128( lowByte, bytes2 );
  224. low = _mm_and_si128( lowByte, bytes2 );
  225. high = _mm_srli_epi16( high, 4 );
  226. bytes2 = _mm_or_si128( low, high );
  227. return _mm_packus_epi16( bytes1, bytes2);
  228. }
  229. #endif
  230. #elif defined(__SSSE3__)
  231. // horizontally add 4x4 floats
  232. static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
  233. __m128 res_0 =_mm_hadd_ps(a, b);
  234. __m128 res_1 =_mm_hadd_ps(c, d);
  235. __m128 res =_mm_hadd_ps(res_0, res_1);
  236. res =_mm_hadd_ps(res, res);
  237. res =_mm_hadd_ps(res, res);
  238. return _mm_cvtss_f32(res);
  239. }
  240. #endif // __AVX__ || __AVX2__ || __AVX512F__
  241. #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  242. #if defined(__ARM_NEON)
  243. #if !defined(__aarch64__)
  244. // 64-bit compatibility
  245. // vaddvq_s16
  246. // vpaddq_s16
  247. // vpaddq_s32
  248. // vaddvq_s32
  249. // vaddvq_f32
  250. // vmaxvq_f32
  251. // vcvtnq_s32_f32
  252. // vzip1_u8
  253. // vzip2_u8
  254. inline static int32_t vaddvq_s16(int16x8_t v) {
  255. return
  256. (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) +
  257. (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) +
  258. (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) +
  259. (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7);
  260. }
  261. inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) {
  262. int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a));
  263. int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b));
  264. return vcombine_s16(a0, b0);
  265. }
  266. inline static int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) {
  267. int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a));
  268. int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b));
  269. return vcombine_s32(a0, b0);
  270. }
  271. inline static int32_t vaddvq_s32(int32x4_t v) {
  272. return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
  273. }
  274. inline static float vaddvq_f32(float32x4_t v) {
  275. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  276. }
  277. inline static float vmaxvq_f32(float32x4_t v) {
  278. return
  279. MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
  280. MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
  281. }
  282. inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
  283. int32x4_t res;
  284. res[0] = roundf(vgetq_lane_f32(v, 0));
  285. res[1] = roundf(vgetq_lane_f32(v, 1));
  286. res[2] = roundf(vgetq_lane_f32(v, 2));
  287. res[3] = roundf(vgetq_lane_f32(v, 3));
  288. return res;
  289. }
  290. inline static uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) {
  291. uint8x8_t res;
  292. res[0] = a[0]; res[1] = b[0];
  293. res[2] = a[1]; res[3] = b[1];
  294. res[4] = a[2]; res[5] = b[2];
  295. res[6] = a[3]; res[7] = b[3];
  296. return res;
  297. }
  298. inline static uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) {
  299. uint8x8_t res;
  300. res[0] = a[4]; res[1] = b[4];
  301. res[2] = a[5]; res[3] = b[5];
  302. res[4] = a[6]; res[5] = b[6];
  303. res[6] = a[7]; res[7] = b[7];
  304. return res;
  305. }
  306. // vld1q_s16_x2
  307. // vld1q_u8_x2
  308. // vld1q_u8_x4
  309. // vld1q_s8_x2
  310. // vld1q_s8_x4
  311. // TODO: double-check these work correctly
  312. typedef struct ggml_int16x8x2_t {
  313. int16x8_t val[2];
  314. } ggml_int16x8x2_t;
  315. inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) {
  316. ggml_int16x8x2_t res;
  317. res.val[0] = vld1q_s16(ptr + 0);
  318. res.val[1] = vld1q_s16(ptr + 8);
  319. return res;
  320. }
  321. typedef struct ggml_uint8x16x2_t {
  322. uint8x16_t val[2];
  323. } ggml_uint8x16x2_t;
  324. inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) {
  325. ggml_uint8x16x2_t res;
  326. res.val[0] = vld1q_u8(ptr + 0);
  327. res.val[1] = vld1q_u8(ptr + 16);
  328. return res;
  329. }
  330. typedef struct ggml_uint8x16x4_t {
  331. uint8x16_t val[4];
  332. } ggml_uint8x16x4_t;
  333. inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) {
  334. ggml_uint8x16x4_t res;
  335. res.val[0] = vld1q_u8(ptr + 0);
  336. res.val[1] = vld1q_u8(ptr + 16);
  337. res.val[2] = vld1q_u8(ptr + 32);
  338. res.val[3] = vld1q_u8(ptr + 48);
  339. return res;
  340. }
  341. typedef struct ggml_int8x16x2_t {
  342. int8x16_t val[2];
  343. } ggml_int8x16x2_t;
  344. inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) {
  345. ggml_int8x16x2_t res;
  346. res.val[0] = vld1q_s8(ptr + 0);
  347. res.val[1] = vld1q_s8(ptr + 16);
  348. return res;
  349. }
  350. typedef struct ggml_int8x16x4_t {
  351. int8x16_t val[4];
  352. } ggml_int8x16x4_t;
  353. inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) {
  354. ggml_int8x16x4_t res;
  355. res.val[0] = vld1q_s8(ptr + 0);
  356. res.val[1] = vld1q_s8(ptr + 16);
  357. res.val[2] = vld1q_s8(ptr + 32);
  358. res.val[3] = vld1q_s8(ptr + 48);
  359. return res;
  360. }
  361. #else
  362. #define ggml_int16x8x2_t int16x8x2_t
  363. #define ggml_uint8x16x2_t uint8x16x2_t
  364. #define ggml_uint8x16x4_t uint8x16x4_t
  365. #define ggml_int8x16x2_t int8x16x2_t
  366. #define ggml_int8x16x4_t int8x16x4_t
  367. #define ggml_vld1q_s16_x2 vld1q_s16_x2
  368. #define ggml_vld1q_u8_x2 vld1q_u8_x2
  369. #define ggml_vld1q_u8_x4 vld1q_u8_x4
  370. #define ggml_vld1q_s8_x2 vld1q_s8_x2
  371. #define ggml_vld1q_s8_x4 vld1q_s8_x4
  372. #endif
  373. #if !defined(__ARM_FEATURE_DOTPROD)
  374. inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) {
  375. const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b));
  376. const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b));
  377. return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1)));
  378. }
  379. #else
  380. #define ggml_vdotq_s32(a, b, c) vdotq_s32(a, b, c)
  381. #endif
  382. #endif
  383. #if defined(__ARM_NEON) || defined(__wasm_simd128__)
  384. #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
  385. #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
  386. #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
  387. #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
  388. #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
  389. #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
  390. #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
  391. #define B8(c,s ) B7(c,s, c), B7(c,s, s)
  392. // precomputed tables for expanding 8bits to 8 bytes:
  393. static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
  394. static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
  395. #endif
  396. // reference implementation for deterministic creation of model files
  397. void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
  398. static const int qk = QK4_0;
  399. assert(k % qk == 0);
  400. const int nb = k / qk;
  401. for (int i = 0; i < nb; i++) {
  402. float amax = 0.0f; // absolute max
  403. float max = 0.0f;
  404. for (int j = 0; j < qk; j++) {
  405. const float v = x[i*qk + j];
  406. if (amax < fabsf(v)) {
  407. amax = fabsf(v);
  408. max = v;
  409. }
  410. }
  411. const float d = max / -8;
  412. const float id = d ? 1.0f/d : 0.0f;
  413. y[i].d = GGML_FP32_TO_FP16(d);
  414. for (int j = 0; j < qk/2; ++j) {
  415. const float x0 = x[i*qk + 0 + j]*id;
  416. const float x1 = x[i*qk + qk/2 + j]*id;
  417. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
  418. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
  419. y[i].qs[j] = xi0;
  420. y[i].qs[j] |= xi1 << 4;
  421. }
  422. }
  423. }
  424. void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
  425. quantize_row_q4_0_reference(x, y, k);
  426. }
  427. void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) {
  428. const int qk = QK4_1;
  429. assert(k % qk == 0);
  430. const int nb = k / qk;
  431. for (int i = 0; i < nb; i++) {
  432. float min = FLT_MAX;
  433. float max = -FLT_MAX;
  434. for (int j = 0; j < qk; j++) {
  435. const float v = x[i*qk + j];
  436. if (v < min) min = v;
  437. if (v > max) max = v;
  438. }
  439. const float d = (max - min) / ((1 << 4) - 1);
  440. const float id = d ? 1.0f/d : 0.0f;
  441. y[i].d = GGML_FP32_TO_FP16(d);
  442. y[i].m = GGML_FP32_TO_FP16(min);
  443. for (int j = 0; j < qk/2; ++j) {
  444. const float x0 = (x[i*qk + 0 + j] - min)*id;
  445. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  446. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
  447. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
  448. y[i].qs[j] = xi0;
  449. y[i].qs[j] |= xi1 << 4;
  450. }
  451. }
  452. }
  453. void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
  454. quantize_row_q4_1_reference(x, y, k);
  455. }
  456. void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
  457. static const int qk = QK5_0;
  458. assert(k % qk == 0);
  459. const int nb = k / qk;
  460. for (int i = 0; i < nb; i++) {
  461. float amax = 0.0f; // absolute max
  462. float max = 0.0f;
  463. for (int j = 0; j < qk; j++) {
  464. const float v = x[i*qk + j];
  465. if (amax < fabsf(v)) {
  466. amax = fabsf(v);
  467. max = v;
  468. }
  469. }
  470. const float d = max / -16;
  471. const float id = d ? 1.0f/d : 0.0f;
  472. y[i].d = GGML_FP32_TO_FP16(d);
  473. uint32_t qh = 0;
  474. for (int j = 0; j < qk/2; ++j) {
  475. const float x0 = x[i*qk + 0 + j]*id;
  476. const float x1 = x[i*qk + qk/2 + j]*id;
  477. const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
  478. const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
  479. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  480. // get the 5-th bit and store it in qh at the right position
  481. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  482. qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
  483. }
  484. memcpy(&y[i].qh, &qh, sizeof(qh));
  485. }
  486. }
  487. void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) {
  488. quantize_row_q5_0_reference(x, y, k);
  489. }
  490. void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
  491. const int qk = QK5_1;
  492. assert(k % qk == 0);
  493. const int nb = k / qk;
  494. for (int i = 0; i < nb; i++) {
  495. float min = FLT_MAX;
  496. float max = -FLT_MAX;
  497. for (int j = 0; j < qk; j++) {
  498. const float v = x[i*qk + j];
  499. if (v < min) min = v;
  500. if (v > max) max = v;
  501. }
  502. const float d = (max - min) / ((1 << 5) - 1);
  503. const float id = d ? 1.0f/d : 0.0f;
  504. y[i].d = GGML_FP32_TO_FP16(d);
  505. y[i].m = GGML_FP32_TO_FP16(min);
  506. uint32_t qh = 0;
  507. for (int j = 0; j < qk/2; ++j) {
  508. const float x0 = (x[i*qk + 0 + j] - min)*id;
  509. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  510. const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
  511. const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
  512. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  513. // get the 5-th bit and store it in qh at the right position
  514. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  515. qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
  516. }
  517. memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
  518. }
  519. }
  520. void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) {
  521. quantize_row_q5_1_reference(x, y, k);
  522. }
  523. // reference implementation for deterministic creation of model files
  524. void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
  525. assert(k % QK8_0 == 0);
  526. const int nb = k / QK8_0;
  527. for (int i = 0; i < nb; i++) {
  528. float amax = 0.0f; // absolute max
  529. for (int j = 0; j < QK8_0; j++) {
  530. const float v = x[i*QK8_0 + j];
  531. amax = MAX(amax, fabsf(v));
  532. }
  533. const float d = amax / ((1 << 7) - 1);
  534. const float id = d ? 1.0f/d : 0.0f;
  535. y[i].d = GGML_FP32_TO_FP16(d);
  536. for (int j = 0; j < QK8_0; ++j) {
  537. const float x0 = x[i*QK8_0 + j]*id;
  538. y[i].qs[j] = roundf(x0);
  539. }
  540. }
  541. }
  542. void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
  543. assert(QK8_0 == 32);
  544. assert(k % QK8_0 == 0);
  545. const int nb = k / QK8_0;
  546. block_q8_0 * restrict y = vy;
  547. #if defined(__ARM_NEON)
  548. for (int i = 0; i < nb; i++) {
  549. float32x4_t srcv [8];
  550. float32x4_t asrcv[8];
  551. float32x4_t amaxv[8];
  552. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  553. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  554. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  555. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  556. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  557. const float amax = vmaxvq_f32(amaxv[0]);
  558. const float d = amax / ((1 << 7) - 1);
  559. const float id = d ? 1.0f/d : 0.0f;
  560. y[i].d = GGML_FP32_TO_FP16(d);
  561. for (int j = 0; j < 8; j++) {
  562. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  563. const int32x4_t vi = vcvtnq_s32_f32(v);
  564. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  565. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  566. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  567. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  568. }
  569. }
  570. #elif defined(__wasm_simd128__)
  571. for (int i = 0; i < nb; i++) {
  572. v128_t srcv [8];
  573. v128_t asrcv[8];
  574. v128_t amaxv[8];
  575. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  576. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  577. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  578. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  579. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  580. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  581. wasm_f32x4_extract_lane(amaxv[0], 1)),
  582. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  583. wasm_f32x4_extract_lane(amaxv[0], 3)));
  584. const float d = amax / ((1 << 7) - 1);
  585. const float id = d ? 1.0f/d : 0.0f;
  586. y[i].d = GGML_FP32_TO_FP16(d);
  587. for (int j = 0; j < 8; j++) {
  588. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  589. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  590. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  591. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  592. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  593. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  594. }
  595. }
  596. #elif defined(__AVX2__) || defined(__AVX__)
  597. for (int i = 0; i < nb; i++) {
  598. // Load elements into 4 AVX vectors
  599. __m256 v0 = _mm256_loadu_ps( x );
  600. __m256 v1 = _mm256_loadu_ps( x + 8 );
  601. __m256 v2 = _mm256_loadu_ps( x + 16 );
  602. __m256 v3 = _mm256_loadu_ps( x + 24 );
  603. x += 32;
  604. // Compute max(abs(e)) for the block
  605. const __m256 signBit = _mm256_set1_ps( -0.0f );
  606. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  607. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  608. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  609. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  610. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  611. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  612. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  613. const float maxScalar = _mm_cvtss_f32( max4 );
  614. // Quantize these floats
  615. const float d = maxScalar / 127.f;
  616. y[i].d = GGML_FP32_TO_FP16(d);
  617. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  618. const __m256 mul = _mm256_set1_ps( id );
  619. // Apply the multiplier
  620. v0 = _mm256_mul_ps( v0, mul );
  621. v1 = _mm256_mul_ps( v1, mul );
  622. v2 = _mm256_mul_ps( v2, mul );
  623. v3 = _mm256_mul_ps( v3, mul );
  624. // Round to nearest integer
  625. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  626. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  627. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  628. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  629. // Convert floats to integers
  630. __m256i i0 = _mm256_cvtps_epi32( v0 );
  631. __m256i i1 = _mm256_cvtps_epi32( v1 );
  632. __m256i i2 = _mm256_cvtps_epi32( v2 );
  633. __m256i i3 = _mm256_cvtps_epi32( v3 );
  634. #if defined(__AVX2__)
  635. // Convert int32 to int16
  636. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  637. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  638. // Convert int16 to int8
  639. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  640. // We got our precious signed bytes, but the order is now wrong
  641. // These AVX2 pack instructions process 16-byte pieces independently
  642. // The following instruction is fixing the order
  643. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  644. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  645. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  646. #else
  647. // Since we don't have in AVX some necessary functions,
  648. // we split the registers in half and call AVX2 analogs from SSE
  649. __m128i ni0 = _mm256_castsi256_si128( i0 );
  650. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  651. __m128i ni2 = _mm256_castsi256_si128( i1 );
  652. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  653. __m128i ni4 = _mm256_castsi256_si128( i2 );
  654. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  655. __m128i ni6 = _mm256_castsi256_si128( i3 );
  656. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  657. // Convert int32 to int16
  658. ni0 = _mm_packs_epi32( ni0, ni1 );
  659. ni2 = _mm_packs_epi32( ni2, ni3 );
  660. ni4 = _mm_packs_epi32( ni4, ni5 );
  661. ni6 = _mm_packs_epi32( ni6, ni7 );
  662. // Convert int16 to int8
  663. ni0 = _mm_packs_epi16( ni0, ni2 );
  664. ni4 = _mm_packs_epi16( ni4, ni6 );
  665. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  666. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  667. #endif
  668. }
  669. #elif defined(__riscv_v_intrinsic)
  670. size_t vl = __riscv_vsetvl_e32m4(QK8_0);
  671. for (int i = 0; i < nb; i++) {
  672. // load elements
  673. vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_0, vl);
  674. vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
  675. vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl);
  676. vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
  677. float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
  678. const float d = amax / ((1 << 7) - 1);
  679. const float id = d ? 1.0f/d : 0.0f;
  680. y[i].d = GGML_FP32_TO_FP16(d);
  681. vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
  682. // convert to integer
  683. vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
  684. vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
  685. // store result
  686. __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
  687. }
  688. #else
  689. GGML_UNUSED(nb);
  690. // scalar
  691. quantize_row_q8_0_reference(x, y, k);
  692. #endif
  693. }
  694. // reference implementation for deterministic creation of model files
  695. void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
  696. assert(QK8_1 == 32);
  697. assert(k % QK8_1 == 0);
  698. const int nb = k / QK8_1;
  699. for (int i = 0; i < nb; i++) {
  700. float amax = 0.0f; // absolute max
  701. for (int j = 0; j < QK8_1; j++) {
  702. const float v = x[i*QK8_1 + j];
  703. amax = MAX(amax, fabsf(v));
  704. }
  705. const float d = amax / ((1 << 7) - 1);
  706. const float id = d ? 1.0f/d : 0.0f;
  707. y[i].d = d;
  708. int sum = 0;
  709. for (int j = 0; j < QK8_1/2; ++j) {
  710. const float v0 = x[i*QK8_1 + j]*id;
  711. const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
  712. y[i].qs[ j] = roundf(v0);
  713. y[i].qs[QK8_1/2 + j] = roundf(v1);
  714. sum += y[i].qs[ j];
  715. sum += y[i].qs[QK8_1/2 + j];
  716. }
  717. y[i].s = sum*d;
  718. }
  719. }
  720. void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) {
  721. assert(k % QK8_1 == 0);
  722. const int nb = k / QK8_1;
  723. block_q8_1 * restrict y = vy;
  724. #if defined(__ARM_NEON)
  725. for (int i = 0; i < nb; i++) {
  726. float32x4_t srcv [8];
  727. float32x4_t asrcv[8];
  728. float32x4_t amaxv[8];
  729. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  730. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  731. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  732. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  733. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  734. const float amax = vmaxvq_f32(amaxv[0]);
  735. const float d = amax / ((1 << 7) - 1);
  736. const float id = d ? 1.0f/d : 0.0f;
  737. y[i].d = d;
  738. int32x4_t accv = vdupq_n_s32(0);
  739. for (int j = 0; j < 8; j++) {
  740. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  741. const int32x4_t vi = vcvtnq_s32_f32(v);
  742. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  743. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  744. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  745. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  746. accv = vaddq_s32(accv, vi);
  747. }
  748. y[i].s = d * vaddvq_s32(accv);
  749. }
  750. #elif defined(__wasm_simd128__)
  751. for (int i = 0; i < nb; i++) {
  752. v128_t srcv [8];
  753. v128_t asrcv[8];
  754. v128_t amaxv[8];
  755. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  756. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  757. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  758. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  759. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  760. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  761. wasm_f32x4_extract_lane(amaxv[0], 1)),
  762. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  763. wasm_f32x4_extract_lane(amaxv[0], 3)));
  764. const float d = amax / ((1 << 7) - 1);
  765. const float id = d ? 1.0f/d : 0.0f;
  766. y[i].d = d;
  767. v128_t accv = wasm_i32x4_splat(0);
  768. for (int j = 0; j < 8; j++) {
  769. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  770. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  771. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  772. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  773. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  774. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  775. accv = wasm_i32x4_add(accv, vi);
  776. }
  777. y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) +
  778. wasm_i32x4_extract_lane(accv, 1) +
  779. wasm_i32x4_extract_lane(accv, 2) +
  780. wasm_i32x4_extract_lane(accv, 3));
  781. }
  782. #elif defined(__AVX2__) || defined(__AVX__)
  783. for (int i = 0; i < nb; i++) {
  784. // Load elements into 4 AVX vectors
  785. __m256 v0 = _mm256_loadu_ps( x );
  786. __m256 v1 = _mm256_loadu_ps( x + 8 );
  787. __m256 v2 = _mm256_loadu_ps( x + 16 );
  788. __m256 v3 = _mm256_loadu_ps( x + 24 );
  789. x += 32;
  790. // Compute max(abs(e)) for the block
  791. const __m256 signBit = _mm256_set1_ps( -0.0f );
  792. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  793. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  794. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  795. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  796. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  797. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  798. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  799. const float maxScalar = _mm_cvtss_f32( max4 );
  800. // Quantize these floats
  801. const float d = maxScalar / 127.f;
  802. y[i].d = d;
  803. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  804. const __m256 mul = _mm256_set1_ps( id );
  805. // Apply the multiplier
  806. v0 = _mm256_mul_ps( v0, mul );
  807. v1 = _mm256_mul_ps( v1, mul );
  808. v2 = _mm256_mul_ps( v2, mul );
  809. v3 = _mm256_mul_ps( v3, mul );
  810. // Round to nearest integer
  811. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  812. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  813. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  814. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  815. // Convert floats to integers
  816. __m256i i0 = _mm256_cvtps_epi32( v0 );
  817. __m256i i1 = _mm256_cvtps_epi32( v1 );
  818. __m256i i2 = _mm256_cvtps_epi32( v2 );
  819. __m256i i3 = _mm256_cvtps_epi32( v3 );
  820. #if defined(__AVX2__)
  821. // Compute the sum of the quants and set y[i].s
  822. y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)));
  823. // Convert int32 to int16
  824. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  825. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  826. // Convert int16 to int8
  827. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  828. // We got our precious signed bytes, but the order is now wrong
  829. // These AVX2 pack instructions process 16-byte pieces independently
  830. // The following instruction is fixing the order
  831. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  832. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  833. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  834. #else
  835. // Since we don't have in AVX some necessary functions,
  836. // we split the registers in half and call AVX2 analogs from SSE
  837. __m128i ni0 = _mm256_castsi256_si128( i0 );
  838. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  839. __m128i ni2 = _mm256_castsi256_si128( i1 );
  840. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  841. __m128i ni4 = _mm256_castsi256_si128( i2 );
  842. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  843. __m128i ni6 = _mm256_castsi256_si128( i3 );
  844. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  845. // Compute the sum of the quants and set y[i].s
  846. const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
  847. const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
  848. y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1));
  849. // Convert int32 to int16
  850. ni0 = _mm_packs_epi32( ni0, ni1 );
  851. ni2 = _mm_packs_epi32( ni2, ni3 );
  852. ni4 = _mm_packs_epi32( ni4, ni5 );
  853. ni6 = _mm_packs_epi32( ni6, ni7 );
  854. // Convert int16 to int8
  855. ni0 = _mm_packs_epi16( ni0, ni2 );
  856. ni4 = _mm_packs_epi16( ni4, ni6 );
  857. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  858. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  859. #endif
  860. }
  861. #elif defined(__riscv_v_intrinsic)
  862. size_t vl = __riscv_vsetvl_e32m4(QK8_1);
  863. for (int i = 0; i < nb; i++) {
  864. // load elements
  865. vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_1, vl);
  866. vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
  867. vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl);
  868. vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
  869. float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
  870. const float d = amax / ((1 << 7) - 1);
  871. const float id = d ? 1.0f/d : 0.0f;
  872. y[i].d = d;
  873. vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
  874. // convert to integer
  875. vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
  876. vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
  877. // store result
  878. __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
  879. // compute sum for y[i].s
  880. vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl);
  881. vint16m1_t vwrs = __riscv_vwredsum_vs_i8m1_i16m1(vs, tmp2, vl);
  882. // set y[i].s
  883. int sum = __riscv_vmv_x_s_i16m1_i16(vwrs);
  884. y[i].s = sum*d;
  885. }
  886. #else
  887. GGML_UNUSED(nb);
  888. // scalar
  889. quantize_row_q8_1_reference(x, y, k);
  890. #endif
  891. }
  892. void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) {
  893. static const int qk = QK4_0;
  894. assert(k % qk == 0);
  895. const int nb = k / qk;
  896. for (int i = 0; i < nb; i++) {
  897. const float d = GGML_FP16_TO_FP32(x[i].d);
  898. for (int j = 0; j < qk/2; ++j) {
  899. const int x0 = (x[i].qs[j] & 0x0F) - 8;
  900. const int x1 = (x[i].qs[j] >> 4) - 8;
  901. y[i*qk + j + 0 ] = x0*d;
  902. y[i*qk + j + qk/2] = x1*d;
  903. }
  904. }
  905. }
  906. void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
  907. static const int qk = QK4_1;
  908. assert(k % qk == 0);
  909. const int nb = k / qk;
  910. for (int i = 0; i < nb; i++) {
  911. const float d = GGML_FP16_TO_FP32(x[i].d);
  912. const float m = GGML_FP16_TO_FP32(x[i].m);
  913. for (int j = 0; j < qk/2; ++j) {
  914. const int x0 = (x[i].qs[j] & 0x0F);
  915. const int x1 = (x[i].qs[j] >> 4);
  916. y[i*qk + j + 0 ] = x0*d + m;
  917. y[i*qk + j + qk/2] = x1*d + m;
  918. }
  919. }
  920. }
  921. void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) {
  922. static const int qk = QK5_0;
  923. assert(k % qk == 0);
  924. const int nb = k / qk;
  925. for (int i = 0; i < nb; i++) {
  926. const float d = GGML_FP16_TO_FP32(x[i].d);
  927. uint32_t qh;
  928. memcpy(&qh, x[i].qh, sizeof(qh));
  929. for (int j = 0; j < qk/2; ++j) {
  930. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  931. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  932. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  933. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  934. y[i*qk + j + 0 ] = x0*d;
  935. y[i*qk + j + qk/2] = x1*d;
  936. }
  937. }
  938. }
  939. void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) {
  940. static const int qk = QK5_1;
  941. assert(k % qk == 0);
  942. const int nb = k / qk;
  943. for (int i = 0; i < nb; i++) {
  944. const float d = GGML_FP16_TO_FP32(x[i].d);
  945. const float m = GGML_FP16_TO_FP32(x[i].m);
  946. uint32_t qh;
  947. memcpy(&qh, x[i].qh, sizeof(qh));
  948. for (int j = 0; j < qk/2; ++j) {
  949. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  950. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  951. const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
  952. const int x1 = (x[i].qs[j] >> 4) | xh_1;
  953. y[i*qk + j + 0 ] = x0*d + m;
  954. y[i*qk + j + qk/2] = x1*d + m;
  955. }
  956. }
  957. }
  958. void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int k) {
  959. static const int qk = QK8_0;
  960. assert(k % qk == 0);
  961. const int nb = k / qk;
  962. for (int i = 0; i < nb; i++) {
  963. const float d = GGML_FP16_TO_FP32(x[i].d);
  964. for (int j = 0; j < qk; ++j) {
  965. y[i*qk + j] = x[i].qs[j]*d;
  966. }
  967. }
  968. }
  969. //
  970. // 2-6 bit quantization in super-blocks
  971. //
  972. //
  973. // ===================== Helper functions
  974. //
  975. static inline int nearest_int(float fval) {
  976. assert(fval <= 4194303.f);
  977. float val = fval + 12582912.f;
  978. int i; memcpy(&i, &val, sizeof(int));
  979. return (i & 0x007fffff) - 0x00400000;
  980. }
  981. static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, int rmse_type,
  982. const float * restrict qw) {
  983. float max = 0;
  984. float amax = 0;
  985. for (int i = 0; i < n; ++i) {
  986. float ax = fabsf(x[i]);
  987. if (ax > amax) { amax = ax; max = x[i]; }
  988. }
  989. if (amax < 1e-30f) { // all zero
  990. for (int i = 0; i < n; ++i) {
  991. L[i] = 0;
  992. }
  993. return 0.f;
  994. }
  995. float iscale = -nmax / max;
  996. if (rmse_type == 0) {
  997. for (int i = 0; i < n; ++i) {
  998. int l = nearest_int(iscale * x[i]);
  999. L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
  1000. }
  1001. return 1/iscale;
  1002. }
  1003. bool return_early = false;
  1004. if (rmse_type < 0) {
  1005. rmse_type = -rmse_type;
  1006. return_early = true;
  1007. }
  1008. float sumlx = 0;
  1009. float suml2 = 0;
  1010. #ifdef HAVE_BUGGY_APPLE_LINKER
  1011. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  1012. for (volatile int i = 0; i < n; ++i) {
  1013. #else
  1014. for (int i = 0; i < n; ++i) {
  1015. #endif
  1016. int l = nearest_int(iscale * x[i]);
  1017. l = MAX(-nmax, MIN(nmax-1, l));
  1018. L[i] = l + nmax;
  1019. float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i]));
  1020. sumlx += w*x[i]*l;
  1021. suml2 += w*l*l;
  1022. }
  1023. float scale = sumlx/suml2;
  1024. if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale;
  1025. float best = scale * sumlx;
  1026. for (int is = -9; is <= 9; ++is) {
  1027. if (is == 0) {
  1028. continue;
  1029. }
  1030. iscale = -(nmax + 0.1f*is) / max;
  1031. sumlx = suml2 = 0;
  1032. for (int i = 0; i < n; ++i) {
  1033. int l = nearest_int(iscale * x[i]);
  1034. l = MAX(-nmax, MIN(nmax-1, l));
  1035. float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i]));
  1036. sumlx += w*x[i]*l;
  1037. suml2 += w*l*l;
  1038. }
  1039. if (suml2 > 0 && sumlx*sumlx > best*suml2) {
  1040. for (int i = 0; i < n; ++i) {
  1041. int l = nearest_int(iscale * x[i]);
  1042. L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
  1043. }
  1044. scale = sumlx/suml2; best = scale*sumlx;
  1045. }
  1046. }
  1047. return scale;
  1048. }
  1049. static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, bool do_rmse) {
  1050. float max = 0;
  1051. float amax = 0;
  1052. for (int i = 0; i < n; ++i) {
  1053. float ax = fabsf(x[i]);
  1054. if (ax > amax) { amax = ax; max = x[i]; }
  1055. }
  1056. if (!amax) { // all zero
  1057. for (int i = 0; i < n; ++i) { L[i] = 0; }
  1058. return 0.f;
  1059. }
  1060. float iscale = -nmax / max;
  1061. if (do_rmse) {
  1062. float sumlx = 0;
  1063. float suml2 = 0;
  1064. for (int i = 0; i < n; ++i) {
  1065. int l = nearest_int(iscale * x[i]);
  1066. l = MAX(-nmax, MIN(nmax-1, l));
  1067. L[i] = l;
  1068. float w = x[i]*x[i];
  1069. sumlx += w*x[i]*l;
  1070. suml2 += w*l*l;
  1071. }
  1072. for (int itry = 0; itry < 5; ++itry) {
  1073. int n_changed = 0;
  1074. for (int i = 0; i < n; ++i) {
  1075. float w = x[i]*x[i];
  1076. float slx = sumlx - w*x[i]*L[i];
  1077. if (slx > 0) {
  1078. float sl2 = suml2 - w*L[i]*L[i];
  1079. int new_l = nearest_int(x[i] * sl2 / slx);
  1080. new_l = MAX(-nmax, MIN(nmax-1, new_l));
  1081. if (new_l != L[i]) {
  1082. slx += w*x[i]*new_l;
  1083. sl2 += w*new_l*new_l;
  1084. if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) {
  1085. L[i] = new_l; sumlx = slx; suml2 = sl2;
  1086. ++n_changed;
  1087. }
  1088. }
  1089. }
  1090. }
  1091. if (!n_changed) {
  1092. break;
  1093. }
  1094. }
  1095. for (int i = 0; i < n; ++i) {
  1096. L[i] += nmax;
  1097. }
  1098. return sumlx / suml2;
  1099. }
  1100. for (int i = 0; i < n; ++i) {
  1101. int l = nearest_int(iscale * x[i]);
  1102. l = MAX(-nmax, MIN(nmax-1, l));
  1103. L[i] = l + nmax;
  1104. }
  1105. return 1/iscale;
  1106. }
  1107. static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min,
  1108. int ntry, float alpha) {
  1109. float min = x[0];
  1110. float max = x[0];
  1111. for (int i = 1; i < n; ++i) {
  1112. if (x[i] < min) min = x[i];
  1113. if (x[i] > max) max = x[i];
  1114. }
  1115. if (max == min) {
  1116. for (int i = 0; i < n; ++i) L[i] = 0;
  1117. *the_min = 0;
  1118. return 0.f;
  1119. }
  1120. if (min > 0) min = 0;
  1121. float iscale = nmax/(max - min);
  1122. float scale = 1/iscale;
  1123. for (int itry = 0; itry < ntry; ++itry) {
  1124. float sumlx = 0; int suml2 = 0;
  1125. bool did_change = false;
  1126. for (int i = 0; i < n; ++i) {
  1127. int l = nearest_int(iscale*(x[i] - min));
  1128. l = MAX(0, MIN(nmax, l));
  1129. if (l != L[i]) {
  1130. L[i] = l;
  1131. did_change = true;
  1132. }
  1133. sumlx += (x[i] - min)*l;
  1134. suml2 += l*l;
  1135. }
  1136. scale = sumlx/suml2;
  1137. float sum = 0;
  1138. for (int i = 0; i < n; ++i) {
  1139. sum += x[i] - scale*L[i];
  1140. }
  1141. min = alpha*min + (1 - alpha)*sum/n;
  1142. if (min > 0) min = 0;
  1143. iscale = 1/scale;
  1144. if (!did_change) break;
  1145. }
  1146. *the_min = -min;
  1147. return scale;
  1148. }
  1149. static float make_qkx2_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
  1150. uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
  1151. float rmin, float rdelta, int nstep, bool use_mad) {
  1152. float min = x[0];
  1153. float max = x[0];
  1154. float sum_w = weights[0];
  1155. float sum_x = sum_w * x[0];
  1156. #ifdef HAVE_BUGGY_APPLE_LINKER
  1157. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  1158. for (volatile int i = 1; i < n; ++i) {
  1159. #else
  1160. for (int i = 1; i < n; ++i) {
  1161. #endif
  1162. if (x[i] < min) min = x[i];
  1163. if (x[i] > max) max = x[i];
  1164. float w = weights[i];
  1165. sum_w += w;
  1166. sum_x += w * x[i];
  1167. }
  1168. if (min > 0) min = 0;
  1169. if (max == min) {
  1170. for (int i = 0; i < n; ++i) L[i] = 0;
  1171. *the_min = -min;
  1172. return 0.f;
  1173. }
  1174. float iscale = nmax/(max - min);
  1175. float scale = 1/iscale;
  1176. float best_mad = 0;
  1177. for (int i = 0; i < n; ++i) {
  1178. int l = nearest_int(iscale*(x[i] - min));
  1179. L[i] = MAX(0, MIN(nmax, l));
  1180. float diff = scale * L[i] + min - x[i];
  1181. diff = use_mad ? fabsf(diff) : diff * diff;
  1182. float w = weights[i];
  1183. best_mad += w * diff;
  1184. }
  1185. if (nstep < 1) {
  1186. *the_min = -min;
  1187. return scale;
  1188. }
  1189. for (int is = 0; is <= nstep; ++is) {
  1190. iscale = (rmin + rdelta*is + nmax)/(max - min);
  1191. float sum_l = 0, sum_l2 = 0, sum_xl = 0;
  1192. for (int i = 0; i < n; ++i) {
  1193. int l = nearest_int(iscale*(x[i] - min));
  1194. l = MAX(0, MIN(nmax, l));
  1195. Laux[i] = l;
  1196. float w = weights[i];
  1197. sum_l += w*l;
  1198. sum_l2 += w*l*l;
  1199. sum_xl += w*l*x[i];
  1200. }
  1201. float D = sum_w * sum_l2 - sum_l * sum_l;
  1202. if (D > 0) {
  1203. float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
  1204. float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
  1205. if (this_min > 0) {
  1206. this_min = 0;
  1207. this_scale = sum_xl / sum_l2;
  1208. }
  1209. float mad = 0;
  1210. for (int i = 0; i < n; ++i) {
  1211. float diff = this_scale * Laux[i] + this_min - x[i];
  1212. diff = use_mad ? fabsf(diff) : diff * diff;
  1213. float w = weights[i];
  1214. mad += w * diff;
  1215. }
  1216. if (mad < best_mad) {
  1217. for (int i = 0; i < n; ++i) {
  1218. L[i] = Laux[i];
  1219. }
  1220. best_mad = mad;
  1221. scale = this_scale;
  1222. min = this_min;
  1223. }
  1224. }
  1225. }
  1226. *the_min = -min;
  1227. return scale;
  1228. }
  1229. #if QK_K == 256
  1230. static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) {
  1231. if (j < 4) {
  1232. *d = q[j] & 63; *m = q[j + 4] & 63;
  1233. } else {
  1234. *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
  1235. *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
  1236. }
  1237. }
  1238. #endif
  1239. //========================- 2-bit (de)-quantization
  1240. void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k) {
  1241. assert(k % QK_K == 0);
  1242. const int nb = k / QK_K;
  1243. uint8_t L[QK_K];
  1244. uint8_t Laux[16];
  1245. float weights[16];
  1246. float mins[QK_K/16];
  1247. float scales[QK_K/16];
  1248. const float q4scale = 15.f;
  1249. for (int i = 0; i < nb; i++) {
  1250. float max_scale = 0; // as we are deducting the min, scales are always positive
  1251. float max_min = 0;
  1252. for (int j = 0; j < QK_K/16; ++j) {
  1253. for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]);
  1254. scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true);
  1255. float scale = scales[j];
  1256. if (scale > max_scale) {
  1257. max_scale = scale;
  1258. }
  1259. float min = mins[j];
  1260. if (min > max_min) {
  1261. max_min = min;
  1262. }
  1263. }
  1264. if (max_scale > 0) {
  1265. float iscale = q4scale/max_scale;
  1266. for (int j = 0; j < QK_K/16; ++j) {
  1267. int l = nearest_int(iscale*scales[j]);
  1268. y[i].scales[j] = l;
  1269. }
  1270. y[i].d = GGML_FP32_TO_FP16(max_scale/q4scale);
  1271. } else {
  1272. for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0;
  1273. y[i].d = GGML_FP32_TO_FP16(0.f);
  1274. }
  1275. if (max_min > 0) {
  1276. float iscale = q4scale/max_min;
  1277. for (int j = 0; j < QK_K/16; ++j) {
  1278. int l = nearest_int(iscale*mins[j]);
  1279. y[i].scales[j] |= (l << 4);
  1280. }
  1281. y[i].dmin = GGML_FP32_TO_FP16(max_min/q4scale);
  1282. } else {
  1283. y[i].dmin = GGML_FP32_TO_FP16(0.f);
  1284. }
  1285. for (int j = 0; j < QK_K/16; ++j) {
  1286. const float d = GGML_FP16_TO_FP32(y[i].d) * (y[i].scales[j] & 0xF);
  1287. if (!d) continue;
  1288. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * (y[i].scales[j] >> 4);
  1289. for (int ii = 0; ii < 16; ++ii) {
  1290. int l = nearest_int((x[16*j + ii] + dm)/d);
  1291. l = MAX(0, MIN(3, l));
  1292. L[16*j + ii] = l;
  1293. }
  1294. }
  1295. #if QK_K == 256
  1296. for (int j = 0; j < QK_K; j += 128) {
  1297. for (int l = 0; l < 32; ++l) {
  1298. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1299. }
  1300. }
  1301. #else
  1302. for (int l = 0; l < 16; ++l) {
  1303. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1304. }
  1305. #endif
  1306. x += QK_K;
  1307. }
  1308. }
  1309. void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k) {
  1310. assert(k % QK_K == 0);
  1311. const int nb = k / QK_K;
  1312. for (int i = 0; i < nb; i++) {
  1313. const float d = GGML_FP16_TO_FP32(x[i].d);
  1314. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  1315. const uint8_t * q = x[i].qs;
  1316. #if QK_K == 256
  1317. int is = 0;
  1318. float dl, ml;
  1319. for (int n = 0; n < QK_K; n += 128) {
  1320. int shift = 0;
  1321. for (int j = 0; j < 4; ++j) {
  1322. uint8_t sc = x[i].scales[is++];
  1323. dl = d * (sc & 0xF); ml = min * (sc >> 4);
  1324. for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml;
  1325. sc = x[i].scales[is++];
  1326. dl = d * (sc & 0xF); ml = min * (sc >> 4);
  1327. for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml;
  1328. shift += 2;
  1329. }
  1330. q += 32;
  1331. }
  1332. #else
  1333. float dl1 = d * (x[i].scales[0] & 0xF), ml1 = min * (x[i].scales[0] >> 4);
  1334. float dl2 = d * (x[i].scales[1] & 0xF), ml2 = min * (x[i].scales[1] >> 4);
  1335. float dl3 = d * (x[i].scales[2] & 0xF), ml3 = min * (x[i].scales[2] >> 4);
  1336. float dl4 = d * (x[i].scales[3] & 0xF), ml4 = min * (x[i].scales[3] >> 4);
  1337. for (int l = 0; l < 16; ++l) {
  1338. y[l+ 0] = dl1 * ((int8_t)((q[l] >> 0) & 3)) - ml1;
  1339. y[l+16] = dl2 * ((int8_t)((q[l] >> 2) & 3)) - ml2;
  1340. y[l+32] = dl3 * ((int8_t)((q[l] >> 4) & 3)) - ml3;
  1341. y[l+48] = dl4 * ((int8_t)((q[l] >> 6) & 3)) - ml4;
  1342. }
  1343. y += QK_K;
  1344. #endif
  1345. }
  1346. }
  1347. void quantize_row_q2_K(const float * restrict x, void * restrict vy, int k) {
  1348. quantize_row_q2_K_reference(x, vy, k);
  1349. }
  1350. size_t ggml_quantize_q2_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
  1351. (void)hist; // TODO: collect histograms
  1352. for (int j = 0; j < n; j += k) {
  1353. block_q2_K * restrict y = (block_q2_K *)dst + j/QK_K;
  1354. quantize_row_q2_K_reference(src + j, y, k);
  1355. }
  1356. return (n/QK_K*sizeof(block_q2_K));
  1357. }
  1358. static float make_qkx3_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
  1359. uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
  1360. float rmin, float rdelta, int nstep, bool use_mad) {
  1361. float min = x[0];
  1362. float max = x[0];
  1363. float sum_w = weights ? weights[0] : x[0]*x[0];
  1364. float sum_x = sum_w * x[0];
  1365. #ifdef HAVE_BUGGY_APPLE_LINKER
  1366. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  1367. for (volatile int i = 1; i < n; ++i) {
  1368. #else
  1369. for (int i = 1; i < n; ++i) {
  1370. #endif
  1371. if (x[i] < min) min = x[i];
  1372. if (x[i] > max) max = x[i];
  1373. float w = weights ? weights[i] : x[i]*x[i];
  1374. sum_w += w;
  1375. sum_x += w * x[i];
  1376. }
  1377. if (min > 0) {
  1378. min = 0;
  1379. }
  1380. if (max <= min) {
  1381. memset(L, 0, n);
  1382. *the_min = -min;
  1383. return 0.f;
  1384. }
  1385. float iscale = nmax/(max - min);
  1386. float scale = 1/iscale;
  1387. float best_mad = 0;
  1388. for (int i = 0; i < n; ++i) {
  1389. int l = nearest_int(iscale*(x[i] - min));
  1390. L[i] = MAX(0, MIN(nmax, l));
  1391. float diff = scale * L[i] + min - x[i];
  1392. diff = use_mad ? fabsf(diff) : diff*diff;
  1393. float w = weights ? weights[i] : x[i]*x[i];
  1394. best_mad += w * diff;
  1395. }
  1396. if (nstep < 1) {
  1397. *the_min = -min;
  1398. return scale;
  1399. }
  1400. for (int is = 0; is <= nstep; ++is) {
  1401. iscale = (rmin + rdelta*is + nmax)/(max - min);
  1402. float sum_l = 0, sum_l2 = 0, sum_xl = 0;
  1403. for (int i = 0; i < n; ++i) {
  1404. int l = nearest_int(iscale*(x[i] - min));
  1405. l = MAX(0, MIN(nmax, l));
  1406. Laux[i] = l;
  1407. float w = weights ? weights[i] : x[i]*x[i];
  1408. sum_l += w*l;
  1409. sum_l2 += w*l*l;
  1410. sum_xl += w*l*x[i];
  1411. }
  1412. float D = sum_w * sum_l2 - sum_l * sum_l;
  1413. if (D > 0) {
  1414. float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
  1415. float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
  1416. if (this_min > 0) {
  1417. this_min = 0;
  1418. this_scale = sum_xl / sum_l2;
  1419. }
  1420. float mad = 0;
  1421. for (int i = 0; i < n; ++i) {
  1422. float diff = this_scale * Laux[i] + this_min - x[i];
  1423. diff = use_mad ? fabsf(diff) : diff*diff;
  1424. float w = weights ? weights[i] : x[i]*x[i];
  1425. mad += w * diff;
  1426. }
  1427. if (mad < best_mad) {
  1428. for (int i = 0; i < n; ++i) {
  1429. L[i] = Laux[i];
  1430. }
  1431. best_mad = mad;
  1432. scale = this_scale;
  1433. min = this_min;
  1434. }
  1435. }
  1436. }
  1437. *the_min = -min;
  1438. return scale;
  1439. }
  1440. static float make_qp_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, const float * quant_weights) {
  1441. float max = 0;
  1442. for (int i = 0; i < n; ++i) {
  1443. max = MAX(max, x[i]);
  1444. }
  1445. if (!max) { // all zero
  1446. for (int i = 0; i < n; ++i) { L[i] = 0; }
  1447. return 0.f;
  1448. }
  1449. float iscale = nmax / max;
  1450. for (int i = 0; i < n; ++i) {
  1451. L[i] = nearest_int(iscale * x[i]);
  1452. }
  1453. float scale = 1/iscale;
  1454. float best_mse = 0;
  1455. for (int i = 0; i < n; ++i) {
  1456. float diff = x[i] - scale*L[i];
  1457. float w = quant_weights[i];
  1458. best_mse += w*diff*diff;
  1459. }
  1460. for (int is = -4; is <= 4; ++is) {
  1461. if (is == 0) continue;
  1462. float iscale_is = (0.1f*is + nmax)/max;
  1463. float scale_is = 1/iscale_is;
  1464. float mse = 0;
  1465. for (int i = 0; i < n; ++i) {
  1466. int l = nearest_int(iscale_is*x[i]);
  1467. l = MIN(nmax, l);
  1468. float diff = x[i] - scale_is*l;
  1469. float w = quant_weights[i];
  1470. mse += w*diff*diff;
  1471. }
  1472. if (mse < best_mse) {
  1473. best_mse = mse;
  1474. iscale = iscale_is;
  1475. }
  1476. }
  1477. float sumlx = 0;
  1478. float suml2 = 0;
  1479. for (int i = 0; i < n; ++i) {
  1480. int l = nearest_int(iscale * x[i]);
  1481. l = MIN(nmax, l);
  1482. L[i] = l;
  1483. float w = quant_weights[i];
  1484. sumlx += w*x[i]*l;
  1485. suml2 += w*l*l;
  1486. }
  1487. for (int itry = 0; itry < 5; ++itry) {
  1488. int n_changed = 0;
  1489. for (int i = 0; i < n; ++i) {
  1490. float w = quant_weights[i];
  1491. float slx = sumlx - w*x[i]*L[i];
  1492. float sl2 = suml2 - w*L[i]*L[i];
  1493. if (slx > 0 && sl2 > 0) {
  1494. int new_l = nearest_int(x[i] * sl2 / slx);
  1495. new_l = MIN(nmax, new_l);
  1496. if (new_l != L[i]) {
  1497. slx += w*x[i]*new_l;
  1498. sl2 += w*new_l*new_l;
  1499. if (slx*slx*suml2 > sumlx*sumlx*sl2) {
  1500. L[i] = new_l; sumlx = slx; suml2 = sl2;
  1501. ++n_changed;
  1502. }
  1503. }
  1504. }
  1505. }
  1506. if (!n_changed) {
  1507. break;
  1508. }
  1509. }
  1510. return sumlx / suml2;
  1511. }
  1512. static void quantize_row_q2_K_impl(const float * restrict x, block_q2_K * restrict y, int k, const float * restrict quant_weights) {
  1513. GGML_ASSERT(quant_weights);
  1514. assert(k % QK_K == 0);
  1515. const int nb = k / QK_K;
  1516. const bool requantize = true;
  1517. uint8_t L[QK_K];
  1518. uint8_t Laux[16];
  1519. float mins[QK_K/16];
  1520. float scales[QK_K/16];
  1521. float sw[QK_K/16];
  1522. float weight[QK_K/16];
  1523. uint8_t Ls[QK_K/16], Lm[QK_K/16];
  1524. for (int i = 0; i < nb; i++) {
  1525. memset(sw, 0, QK_K/16*sizeof(float));
  1526. float sumx2 = 0;
  1527. for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j];
  1528. float sigma2 = sumx2/QK_K;
  1529. for (int j = 0; j < QK_K/16; ++j) {
  1530. const float * restrict qw = quant_weights + QK_K * i + 16*j;
  1531. for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j + l]*x[16*j + l]);
  1532. for (int l = 0; l < 16; ++l) sw[j] += weight[l];
  1533. scales[j] = make_qkx3_quants(16, 3, x + 16*j, weight, L + 16*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  1534. }
  1535. float dm = make_qp_quants(QK_K/16, 15, scales, Ls, sw);
  1536. float mm = make_qp_quants(QK_K/16, 15, mins, Lm, sw);
  1537. y[i].d = GGML_FP32_TO_FP16(dm);
  1538. y[i].dmin = GGML_FP32_TO_FP16(mm);
  1539. dm = GGML_FP16_TO_FP32(y[i].d);
  1540. mm = GGML_FP16_TO_FP32(y[i].dmin);
  1541. for (int j = 0; j < QK_K/16; ++j) {
  1542. y[i].scales[j] = Ls[j] | (Lm[j] << 4);
  1543. }
  1544. if (requantize) {
  1545. for (int j = 0; j < QK_K/16; ++j) {
  1546. const float d = dm * (y[i].scales[j] & 0xF);
  1547. if (!d) continue;
  1548. const float m = mm * (y[i].scales[j] >> 4);
  1549. for (int ii = 0; ii < 16; ++ii) {
  1550. int l = nearest_int((x[16*j + ii] + m)/d);
  1551. l = MAX(0, MIN(3, l));
  1552. L[16*j + ii] = l;
  1553. }
  1554. }
  1555. }
  1556. #if QK_K == 256
  1557. for (int j = 0; j < QK_K; j += 128) {
  1558. for (int l = 0; l < 32; ++l) {
  1559. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1560. }
  1561. }
  1562. #else
  1563. for (int l = 0; l < 16; ++l) {
  1564. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1565. }
  1566. #endif
  1567. x += QK_K;
  1568. }
  1569. }
  1570. size_t quantize_q2_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  1571. (void)hist;
  1572. size_t row_size = ggml_row_size(GGML_TYPE_Q2_K, n_per_row);
  1573. if (!quant_weights) {
  1574. quantize_row_q2_K_reference(src, dst, nrow*n_per_row);
  1575. }
  1576. else {
  1577. char * qrow = (char *)dst;
  1578. for (int row = 0; row < nrow; ++row) {
  1579. quantize_row_q2_K_impl(src, (block_q2_K*)qrow, n_per_row, quant_weights);
  1580. src += n_per_row;
  1581. qrow += row_size;
  1582. }
  1583. }
  1584. return nrow * row_size;
  1585. }
  1586. //========================= 3-bit (de)-quantization
  1587. void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k) {
  1588. assert(k % QK_K == 0);
  1589. const int nb = k / QK_K;
  1590. int8_t L[QK_K];
  1591. float scales[QK_K / 16];
  1592. for (int i = 0; i < nb; i++) {
  1593. float max_scale = 0;
  1594. float amax = 0;
  1595. for (int j = 0; j < QK_K/16; ++j) {
  1596. scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true);
  1597. float scale = fabsf(scales[j]);
  1598. if (scale > amax) {
  1599. amax = scale; max_scale = scales[j];
  1600. }
  1601. }
  1602. #if QK_K == 256
  1603. memset(y[i].scales, 0, 12);
  1604. if (max_scale) {
  1605. float iscale = -32.f/max_scale;
  1606. for (int j = 0; j < QK_K/16; ++j) {
  1607. int8_t l = nearest_int(iscale*scales[j]);
  1608. l = MAX(-32, MIN(31, l)) + 32;
  1609. if (j < 8) {
  1610. y[i].scales[j] = l & 0xF;
  1611. } else {
  1612. y[i].scales[j-8] |= ((l & 0xF) << 4);
  1613. }
  1614. l >>= 4;
  1615. y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
  1616. }
  1617. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  1618. } else {
  1619. y[i].d = GGML_FP32_TO_FP16(0.f);
  1620. }
  1621. int8_t sc;
  1622. for (int j = 0; j < QK_K/16; ++j) {
  1623. sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
  1624. sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
  1625. float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1626. if (!d) {
  1627. continue;
  1628. }
  1629. for (int ii = 0; ii < 16; ++ii) {
  1630. int l = nearest_int(x[16*j + ii]/d);
  1631. l = MAX(-4, MIN(3, l));
  1632. L[16*j + ii] = l + 4;
  1633. }
  1634. }
  1635. #else
  1636. if (max_scale) {
  1637. float iscale = -8.f/max_scale;
  1638. for (int j = 0; j < QK_K/16; j+=2) {
  1639. int l1 = nearest_int(iscale*scales[j]);
  1640. l1 = 8 + MAX(-8, MIN(7, l1));
  1641. int l2 = nearest_int(iscale*scales[j+1]);
  1642. l2 = 8 + MAX(-8, MIN(7, l2));
  1643. y[i].scales[j/2] = l1 | (l2 << 4);
  1644. }
  1645. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  1646. } else {
  1647. for (int j = 0; j < QK_K/16; j+=2) {
  1648. y[i].scales[j/2] = 0;
  1649. }
  1650. y[i].d = GGML_FP32_TO_FP16(0.f);
  1651. }
  1652. for (int j = 0; j < QK_K/16; ++j) {
  1653. int s = j%2 == 0 ? y[i].scales[j/2] & 0xF : y[i].scales[j/2] >> 4;
  1654. float d = GGML_FP16_TO_FP32(y[i].d) * (s - 8);
  1655. if (!d) {
  1656. continue;
  1657. }
  1658. for (int ii = 0; ii < 16; ++ii) {
  1659. int l = nearest_int(x[16*j + ii]/d);
  1660. l = MAX(-4, MIN(3, l));
  1661. L[16*j + ii] = l + 4;
  1662. }
  1663. }
  1664. #endif
  1665. memset(y[i].hmask, 0, QK_K/8);
  1666. // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
  1667. int m = 0;
  1668. uint8_t hm = 1;
  1669. for (int j = 0; j < QK_K; ++j) {
  1670. if (L[j] > 3) {
  1671. y[i].hmask[m] |= hm;
  1672. L[j] -= 4;
  1673. }
  1674. if (++m == QK_K/8) {
  1675. m = 0; hm <<= 1;
  1676. }
  1677. }
  1678. #if QK_K == 256
  1679. for (int j = 0; j < QK_K; j += 128) {
  1680. for (int l = 0; l < 32; ++l) {
  1681. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1682. }
  1683. }
  1684. #else
  1685. for (int l = 0; l < 16; ++l) {
  1686. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1687. }
  1688. #endif
  1689. x += QK_K;
  1690. }
  1691. }
  1692. #if QK_K == 256
  1693. void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
  1694. assert(k % QK_K == 0);
  1695. const int nb = k / QK_K;
  1696. const uint32_t kmask1 = 0x03030303;
  1697. const uint32_t kmask2 = 0x0f0f0f0f;
  1698. uint32_t aux[4];
  1699. const int8_t * scales = (const int8_t*)aux;
  1700. for (int i = 0; i < nb; i++) {
  1701. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  1702. const uint8_t * restrict q = x[i].qs;
  1703. const uint8_t * restrict hm = x[i].hmask;
  1704. uint8_t m = 1;
  1705. memcpy(aux, x[i].scales, 12);
  1706. uint32_t tmp = aux[2];
  1707. aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
  1708. aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
  1709. aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
  1710. aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
  1711. int is = 0;
  1712. float dl;
  1713. for (int n = 0; n < QK_K; n += 128) {
  1714. int shift = 0;
  1715. for (int j = 0; j < 4; ++j) {
  1716. dl = d_all * (scales[is++] - 32);
  1717. for (int l = 0; l < 16; ++l) {
  1718. *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4));
  1719. }
  1720. dl = d_all * (scales[is++] - 32);
  1721. for (int l = 0; l < 16; ++l) {
  1722. *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4));
  1723. }
  1724. shift += 2;
  1725. m <<= 1;
  1726. }
  1727. q += 32;
  1728. }
  1729. }
  1730. }
  1731. #else
  1732. void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
  1733. assert(k % QK_K == 0);
  1734. assert(QK_K == 64);
  1735. const int nb = k / QK_K;
  1736. for (int i = 0; i < nb; i++) {
  1737. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  1738. const uint8_t * restrict q = x[i].qs;
  1739. const uint8_t * restrict hm = x[i].hmask;
  1740. const float d1 = d_all * ((x[i].scales[0] & 0xF) - 8);
  1741. const float d2 = d_all * ((x[i].scales[0] >> 4) - 8);
  1742. const float d3 = d_all * ((x[i].scales[1] & 0xF) - 8);
  1743. const float d4 = d_all * ((x[i].scales[1] >> 4) - 8);
  1744. for (int l=0; l<8; ++l) {
  1745. uint8_t h = hm[l];
  1746. y[l+ 0] = d1 * ((int8_t)((q[l+0] >> 0) & 3) - ((h & 0x01) ? 0 : 4));
  1747. y[l+ 8] = d1 * ((int8_t)((q[l+8] >> 0) & 3) - ((h & 0x02) ? 0 : 4));
  1748. y[l+16] = d2 * ((int8_t)((q[l+0] >> 2) & 3) - ((h & 0x04) ? 0 : 4));
  1749. y[l+24] = d2 * ((int8_t)((q[l+8] >> 2) & 3) - ((h & 0x08) ? 0 : 4));
  1750. y[l+32] = d3 * ((int8_t)((q[l+0] >> 4) & 3) - ((h & 0x10) ? 0 : 4));
  1751. y[l+40] = d3 * ((int8_t)((q[l+8] >> 4) & 3) - ((h & 0x20) ? 0 : 4));
  1752. y[l+48] = d4 * ((int8_t)((q[l+0] >> 6) & 3) - ((h & 0x40) ? 0 : 4));
  1753. y[l+56] = d4 * ((int8_t)((q[l+8] >> 6) & 3) - ((h & 0x80) ? 0 : 4));
  1754. }
  1755. y += QK_K;
  1756. }
  1757. }
  1758. #endif
  1759. void quantize_row_q3_K(const float * restrict x, void * restrict vy, int k) {
  1760. quantize_row_q3_K_reference(x, vy, k);
  1761. }
  1762. size_t ggml_quantize_q3_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
  1763. (void)hist; // TODO: collect histograms
  1764. for (int j = 0; j < n; j += k) {
  1765. block_q3_K * restrict y = (block_q3_K *)dst + j/QK_K;
  1766. quantize_row_q3_K_reference(src + j, y, k);
  1767. }
  1768. return (n/QK_K*sizeof(block_q3_K));
  1769. }
  1770. static void quantize_row_q3_K_impl(const float * restrict x, block_q3_K * restrict y, int n_per_row, const float * restrict quant_weights) {
  1771. #if QK_K != 256
  1772. (void)quant_weights;
  1773. quantize_row_q3_K_reference(x, y, n_per_row);
  1774. #else
  1775. assert(n_per_row % QK_K == 0);
  1776. const int nb = n_per_row / QK_K;
  1777. int8_t L[QK_K];
  1778. float scales[QK_K / 16];
  1779. float weight[16];
  1780. float sw[QK_K / 16];
  1781. int8_t Ls[QK_K / 16];
  1782. for (int i = 0; i < nb; i++) {
  1783. float sumx2 = 0;
  1784. for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j];
  1785. float sigma2 = 2*sumx2/QK_K;
  1786. for (int j = 0; j < QK_K/16; ++j) {
  1787. if (quant_weights) {
  1788. const float * qw = quant_weights ? quant_weights + QK_K * i + 16*j : NULL;
  1789. for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j+l]*x[16*j+l]);
  1790. } else {
  1791. for (int l = 0; l < 16; ++l) weight[l] = x[16*j+l]*x[16*j+l];
  1792. }
  1793. float sumw = 0;
  1794. for (int l = 0; l < 16; ++l) sumw += weight[l];
  1795. sw[j] = sumw;
  1796. scales[j] = make_qx_quants(16, 4, x + 16*j, L + 16*j, 1, weight);
  1797. }
  1798. memset(y[i].scales, 0, 12);
  1799. float d_block = make_qx_quants(QK_K/16, 32, scales, Ls, 1, sw);
  1800. for (int j = 0; j < QK_K/16; ++j) {
  1801. int l = Ls[j];
  1802. if (j < 8) {
  1803. y[i].scales[j] = l & 0xF;
  1804. } else {
  1805. y[i].scales[j-8] |= ((l & 0xF) << 4);
  1806. }
  1807. l >>= 4;
  1808. y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
  1809. }
  1810. y[i].d = GGML_FP32_TO_FP16(d_block);
  1811. int8_t sc;
  1812. for (int j = 0; j < QK_K/16; ++j) {
  1813. sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
  1814. sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
  1815. float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1816. if (!d) {
  1817. continue;
  1818. }
  1819. for (int ii = 0; ii < 16; ++ii) {
  1820. int l = nearest_int(x[16*j + ii]/d);
  1821. l = MAX(-4, MIN(3, l));
  1822. L[16*j + ii] = l + 4;
  1823. }
  1824. }
  1825. memset(y[i].hmask, 0, QK_K/8);
  1826. // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
  1827. int m = 0;
  1828. uint8_t hm = 1;
  1829. for (int j = 0; j < QK_K; ++j) {
  1830. if (L[j] > 3) {
  1831. y[i].hmask[m] |= hm;
  1832. L[j] -= 4;
  1833. }
  1834. if (++m == QK_K/8) {
  1835. m = 0; hm <<= 1;
  1836. }
  1837. }
  1838. for (int j = 0; j < QK_K; j += 128) {
  1839. for (int l = 0; l < 32; ++l) {
  1840. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1841. }
  1842. }
  1843. x += QK_K;
  1844. }
  1845. #endif
  1846. }
  1847. size_t quantize_q3_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  1848. (void)hist;
  1849. size_t row_size = ggml_row_size(GGML_TYPE_Q3_K, n_per_row);
  1850. if (!quant_weights) {
  1851. quantize_row_q3_K_reference(src, dst, nrow*n_per_row);
  1852. }
  1853. else {
  1854. char * qrow = (char *)dst;
  1855. for (int row = 0; row < nrow; ++row) {
  1856. quantize_row_q3_K_impl(src, (block_q3_K*)qrow, n_per_row, quant_weights);
  1857. src += n_per_row;
  1858. qrow += row_size;
  1859. }
  1860. }
  1861. return nrow * row_size;
  1862. }
  1863. // ====================== 4-bit (de)-quantization
  1864. void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k) {
  1865. assert(k % QK_K == 0);
  1866. const int nb = k / QK_K;
  1867. uint8_t L[QK_K];
  1868. uint8_t Laux[32];
  1869. float weights[32];
  1870. float mins[QK_K/32];
  1871. float scales[QK_K/32];
  1872. for (int i = 0; i < nb; i++) {
  1873. float max_scale = 0; // as we are deducting the min, scales are always positive
  1874. float max_min = 0;
  1875. for (int j = 0; j < QK_K/32; ++j) {
  1876. //scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
  1877. float sum_x2 = 0;
  1878. for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
  1879. float av_x = sqrtf(sum_x2/32);
  1880. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  1881. scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false);
  1882. float scale = scales[j];
  1883. if (scale > max_scale) {
  1884. max_scale = scale;
  1885. }
  1886. float min = mins[j];
  1887. if (min > max_min) {
  1888. max_min = min;
  1889. }
  1890. }
  1891. #if QK_K == 256
  1892. float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
  1893. float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
  1894. for (int j = 0; j < QK_K/32; ++j) {
  1895. uint8_t ls = nearest_int(inv_scale*scales[j]);
  1896. uint8_t lm = nearest_int(inv_min*mins[j]);
  1897. ls = MIN(63, ls);
  1898. lm = MIN(63, lm);
  1899. if (j < 4) {
  1900. y[i].scales[j] = ls;
  1901. y[i].scales[j+4] = lm;
  1902. } else {
  1903. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  1904. y[i].scales[j-4] |= ((ls >> 4) << 6);
  1905. y[i].scales[j-0] |= ((lm >> 4) << 6);
  1906. }
  1907. }
  1908. y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
  1909. y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
  1910. uint8_t sc, m;
  1911. for (int j = 0; j < QK_K/32; ++j) {
  1912. get_scale_min_k4(j, y[i].scales, &sc, &m);
  1913. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1914. if (!d) continue;
  1915. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  1916. for (int ii = 0; ii < 32; ++ii) {
  1917. int l = nearest_int((x[32*j + ii] + dm)/d);
  1918. l = MAX(0, MIN(15, l));
  1919. L[32*j + ii] = l;
  1920. }
  1921. }
  1922. #else
  1923. const float s_factor = 15.f;
  1924. float inv_scale = max_scale > 0 ? s_factor/max_scale : 0.f;
  1925. float inv_min = max_min > 0 ? s_factor/max_min : 0.f;
  1926. int d1 = nearest_int(inv_scale*scales[0]);
  1927. int m1 = nearest_int(inv_min*mins[0]);
  1928. int d2 = nearest_int(inv_scale*scales[1]);
  1929. int m2 = nearest_int(inv_min*mins[1]);
  1930. y[i].scales[0] = d1 | (m1 << 4);
  1931. y[i].scales[1] = d2 | (m2 << 4);
  1932. y[i].d[0] = GGML_FP32_TO_FP16(max_scale/s_factor);
  1933. y[i].d[1] = GGML_FP32_TO_FP16(max_min/s_factor);
  1934. float sumlx = 0;
  1935. int suml2 = 0;
  1936. for (int j = 0; j < QK_K/32; ++j) {
  1937. const uint8_t sd = y[i].scales[j] & 0xF;
  1938. const uint8_t sm = y[i].scales[j] >> 4;
  1939. const float d = GGML_FP16_TO_FP32(y[i].d[0]) * sd;
  1940. if (!d) continue;
  1941. const float m = GGML_FP16_TO_FP32(y[i].d[1]) * sm;
  1942. for (int ii = 0; ii < 32; ++ii) {
  1943. int l = nearest_int((x[32*j + ii] + m)/d);
  1944. l = MAX(0, MIN(15, l));
  1945. L[32*j + ii] = l;
  1946. sumlx += (x[32*j + ii] + m)*l*sd;
  1947. suml2 += l*l*sd*sd;
  1948. }
  1949. }
  1950. if (suml2) {
  1951. y[i].d[0] = GGML_FP32_TO_FP16(sumlx/suml2);
  1952. }
  1953. #endif
  1954. uint8_t * q = y[i].qs;
  1955. for (int j = 0; j < QK_K; j += 64) {
  1956. for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
  1957. q += 32;
  1958. }
  1959. x += QK_K;
  1960. }
  1961. }
  1962. void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k) {
  1963. assert(k % QK_K == 0);
  1964. const int nb = k / QK_K;
  1965. for (int i = 0; i < nb; i++) {
  1966. const uint8_t * q = x[i].qs;
  1967. #if QK_K == 256
  1968. const float d = GGML_FP16_TO_FP32(x[i].d);
  1969. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  1970. int is = 0;
  1971. uint8_t sc, m;
  1972. for (int j = 0; j < QK_K; j += 64) {
  1973. get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
  1974. const float d1 = d * sc; const float m1 = min * m;
  1975. get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
  1976. const float d2 = d * sc; const float m2 = min * m;
  1977. for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1;
  1978. for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2;
  1979. q += 32; is += 2;
  1980. }
  1981. #else
  1982. const float dall = GGML_FP16_TO_FP32(x[i].d[0]);
  1983. const float mall = GGML_FP16_TO_FP32(x[i].d[1]);
  1984. const float d1 = dall * (x[i].scales[0] & 0xF), m1 = mall * (x[i].scales[0] >> 4);
  1985. const float d2 = dall * (x[i].scales[1] & 0xF), m2 = mall * (x[i].scales[1] >> 4);
  1986. for (int l = 0; l < 32; ++l) {
  1987. y[l+ 0] = d1 * (q[l] & 0xF) - m1;
  1988. y[l+32] = d2 * (q[l] >> 4) - m2;
  1989. }
  1990. y += QK_K;
  1991. #endif
  1992. }
  1993. }
  1994. void quantize_row_q4_K(const float * restrict x, void * restrict vy, int k) {
  1995. assert(k % QK_K == 0);
  1996. block_q4_K * restrict y = vy;
  1997. quantize_row_q4_K_reference(x, y, k);
  1998. }
  1999. size_t ggml_quantize_q4_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
  2000. assert(k % QK_K == 0);
  2001. (void)hist; // TODO: collect histograms
  2002. for (int j = 0; j < n; j += k) {
  2003. block_q4_K * restrict y = (block_q4_K *)dst + j/QK_K;
  2004. quantize_row_q4_K_reference(src + j, y, k);
  2005. }
  2006. return (n/QK_K*sizeof(block_q4_K));
  2007. }
  2008. static void quantize_row_q4_K_impl(const float * restrict x, block_q4_K * restrict y, int n_per_row, const float * quant_weights) {
  2009. #if QK_K != 256
  2010. (void)quant_weights;
  2011. quantize_row_q4_K_reference(x, y, n_per_row);
  2012. #else
  2013. assert(n_per_row % QK_K == 0);
  2014. const int nb = n_per_row / QK_K;
  2015. uint8_t L[QK_K];
  2016. uint8_t Laux[32];
  2017. float weights[32];
  2018. float mins[QK_K/32];
  2019. float scales[QK_K/32];
  2020. for (int i = 0; i < nb; i++) {
  2021. float sum_x2 = 0;
  2022. for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l];
  2023. float sigma2 = sum_x2/QK_K;
  2024. float av_x = sqrtf(sigma2);
  2025. float max_scale = 0; // as we are deducting the min, scales are always positive
  2026. float max_min = 0;
  2027. for (int j = 0; j < QK_K/32; ++j) {
  2028. if (quant_weights) {
  2029. const float * qw = quant_weights + QK_K*i + 32*j;
  2030. for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]);
  2031. } else {
  2032. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  2033. }
  2034. scales[j] = make_qkx3_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  2035. //scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false);
  2036. float scale = scales[j];
  2037. if (scale > max_scale) {
  2038. max_scale = scale;
  2039. }
  2040. float min = mins[j];
  2041. if (min > max_min) {
  2042. max_min = min;
  2043. }
  2044. }
  2045. float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
  2046. float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
  2047. for (int j = 0; j < QK_K/32; ++j) {
  2048. uint8_t ls = nearest_int(inv_scale*scales[j]);
  2049. uint8_t lm = nearest_int(inv_min*mins[j]);
  2050. ls = MIN(63, ls);
  2051. lm = MIN(63, lm);
  2052. if (j < 4) {
  2053. y[i].scales[j] = ls;
  2054. y[i].scales[j+4] = lm;
  2055. } else {
  2056. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  2057. y[i].scales[j-4] |= ((ls >> 4) << 6);
  2058. y[i].scales[j-0] |= ((lm >> 4) << 6);
  2059. }
  2060. }
  2061. y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
  2062. y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
  2063. uint8_t sc, m;
  2064. for (int j = 0; j < QK_K/32; ++j) {
  2065. get_scale_min_k4(j, y[i].scales, &sc, &m);
  2066. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  2067. if (!d) continue;
  2068. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  2069. for (int ii = 0; ii < 32; ++ii) {
  2070. int l = nearest_int((x[32*j + ii] + dm)/d);
  2071. l = MAX(0, MIN(15, l));
  2072. L[32*j + ii] = l;
  2073. }
  2074. }
  2075. uint8_t * q = y[i].qs;
  2076. for (int j = 0; j < QK_K; j += 64) {
  2077. for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
  2078. q += 32;
  2079. }
  2080. x += QK_K;
  2081. }
  2082. #endif
  2083. }
  2084. size_t quantize_q4_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2085. (void)hist;
  2086. size_t row_size = ggml_row_size(GGML_TYPE_Q4_K, n_per_row);
  2087. if (!quant_weights) {
  2088. quantize_row_q4_K_reference(src, dst, nrow*n_per_row);
  2089. }
  2090. else {
  2091. char * qrow = (char *)dst;
  2092. for (int row = 0; row < nrow; ++row) {
  2093. quantize_row_q4_K_impl(src, (block_q4_K*)qrow, n_per_row, quant_weights);
  2094. src += n_per_row;
  2095. qrow += row_size;
  2096. }
  2097. }
  2098. return nrow * row_size;
  2099. }
  2100. // ====================== 5-bit (de)-quantization
  2101. void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k) {
  2102. assert(k % QK_K == 0);
  2103. const int nb = k / QK_K;
  2104. #if QK_K == 256
  2105. uint8_t L[QK_K];
  2106. float mins[QK_K/32];
  2107. float scales[QK_K/32];
  2108. float weights[32];
  2109. uint8_t Laux[32];
  2110. #else
  2111. int8_t L[QK_K];
  2112. float scales[QK_K/16];
  2113. #endif
  2114. for (int i = 0; i < nb; i++) {
  2115. #if QK_K == 256
  2116. float max_scale = 0; // as we are deducting the min, scales are always positive
  2117. float max_min = 0;
  2118. for (int j = 0; j < QK_K/32; ++j) {
  2119. //scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
  2120. float sum_x2 = 0;
  2121. for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
  2122. float av_x = sqrtf(sum_x2/32);
  2123. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  2124. scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false);
  2125. float scale = scales[j];
  2126. if (scale > max_scale) {
  2127. max_scale = scale;
  2128. }
  2129. float min = mins[j];
  2130. if (min > max_min) {
  2131. max_min = min;
  2132. }
  2133. }
  2134. float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
  2135. float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
  2136. for (int j = 0; j < QK_K/32; ++j) {
  2137. uint8_t ls = nearest_int(inv_scale*scales[j]);
  2138. uint8_t lm = nearest_int(inv_min*mins[j]);
  2139. ls = MIN(63, ls);
  2140. lm = MIN(63, lm);
  2141. if (j < 4) {
  2142. y[i].scales[j] = ls;
  2143. y[i].scales[j+4] = lm;
  2144. } else {
  2145. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  2146. y[i].scales[j-4] |= ((ls >> 4) << 6);
  2147. y[i].scales[j-0] |= ((lm >> 4) << 6);
  2148. }
  2149. }
  2150. y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
  2151. y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
  2152. uint8_t sc, m;
  2153. for (int j = 0; j < QK_K/32; ++j) {
  2154. get_scale_min_k4(j, y[i].scales, &sc, &m);
  2155. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  2156. if (!d) continue;
  2157. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  2158. for (int ii = 0; ii < 32; ++ii) {
  2159. int l = nearest_int((x[32*j + ii] + dm)/d);
  2160. l = MAX(0, MIN(31, l));
  2161. L[32*j + ii] = l;
  2162. }
  2163. }
  2164. uint8_t * restrict qh = y[i].qh;
  2165. uint8_t * restrict ql = y[i].qs;
  2166. memset(qh, 0, QK_K/8);
  2167. uint8_t m1 = 1, m2 = 2;
  2168. for (int n = 0; n < QK_K; n += 64) {
  2169. for (int j = 0; j < 32; ++j) {
  2170. int l1 = L[n + j];
  2171. if (l1 > 15) {
  2172. l1 -= 16; qh[j] |= m1;
  2173. }
  2174. int l2 = L[n + j + 32];
  2175. if (l2 > 15) {
  2176. l2 -= 16; qh[j] |= m2;
  2177. }
  2178. ql[j] = l1 | (l2 << 4);
  2179. }
  2180. m1 <<= 2; m2 <<= 2;
  2181. ql += 32;
  2182. }
  2183. #else
  2184. float max_scale = 0, amax = 0;
  2185. for (int j = 0; j < QK_K/16; ++j) {
  2186. scales[j] = make_qx_quants(16, 16, x + 16*j, L + 16*j, 1, NULL);
  2187. float abs_scale = fabsf(scales[j]);
  2188. if (abs_scale > amax) {
  2189. amax = abs_scale;
  2190. max_scale = scales[j];
  2191. }
  2192. }
  2193. float iscale = -128.f/max_scale;
  2194. for (int j = 0; j < QK_K/16; ++j) {
  2195. int l = nearest_int(iscale*scales[j]);
  2196. y[i].scales[j] = MAX(-128, MIN(127, l));
  2197. }
  2198. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2199. for (int j = 0; j < QK_K/16; ++j) {
  2200. const float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2201. if (!d) continue;
  2202. for (int ii = 0; ii < 16; ++ii) {
  2203. int l = nearest_int(x[16*j + ii]/d);
  2204. l = MAX(-16, MIN(15, l));
  2205. L[16*j + ii] = l + 16;
  2206. }
  2207. }
  2208. uint8_t * restrict qh = y[i].qh;
  2209. uint8_t * restrict ql = y[i].qs;
  2210. memset(qh, 0, QK_K/8);
  2211. for (int j = 0; j < 32; ++j) {
  2212. int jm = j%8;
  2213. int is = j/8;
  2214. int l1 = L[j];
  2215. if (l1 > 15) {
  2216. l1 -= 16; qh[jm] |= (1 << is);
  2217. }
  2218. int l2 = L[j + 32];
  2219. if (l2 > 15) {
  2220. l2 -= 16; qh[jm] |= (1 << (4 + is));
  2221. }
  2222. ql[j] = l1 | (l2 << 4);
  2223. }
  2224. #endif
  2225. x += QK_K;
  2226. }
  2227. }
  2228. void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k) {
  2229. assert(k % QK_K == 0);
  2230. const int nb = k / QK_K;
  2231. for (int i = 0; i < nb; i++) {
  2232. const uint8_t * ql = x[i].qs;
  2233. const uint8_t * qh = x[i].qh;
  2234. #if QK_K == 256
  2235. const float d = GGML_FP16_TO_FP32(x[i].d);
  2236. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  2237. int is = 0;
  2238. uint8_t sc, m;
  2239. uint8_t u1 = 1, u2 = 2;
  2240. for (int j = 0; j < QK_K; j += 64) {
  2241. get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
  2242. const float d1 = d * sc; const float m1 = min * m;
  2243. get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
  2244. const float d2 = d * sc; const float m2 = min * m;
  2245. for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1;
  2246. for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2;
  2247. ql += 32; is += 2;
  2248. u1 <<= 2; u2 <<= 2;
  2249. }
  2250. #else
  2251. float d = GGML_FP16_TO_FP32(x[i].d);
  2252. const int8_t * restrict s = x[i].scales;
  2253. for (int l = 0; l < 8; ++l) {
  2254. y[l+ 0] = d * s[0] * ((ql[l+ 0] & 0xF) - (qh[l] & 0x01 ? 0 : 16));
  2255. y[l+ 8] = d * s[0] * ((ql[l+ 8] & 0xF) - (qh[l] & 0x02 ? 0 : 16));
  2256. y[l+16] = d * s[1] * ((ql[l+16] & 0xF) - (qh[l] & 0x04 ? 0 : 16));
  2257. y[l+24] = d * s[1] * ((ql[l+24] & 0xF) - (qh[l] & 0x08 ? 0 : 16));
  2258. y[l+32] = d * s[2] * ((ql[l+ 0] >> 4) - (qh[l] & 0x10 ? 0 : 16));
  2259. y[l+40] = d * s[2] * ((ql[l+ 8] >> 4) - (qh[l] & 0x20 ? 0 : 16));
  2260. y[l+48] = d * s[3] * ((ql[l+16] >> 4) - (qh[l] & 0x40 ? 0 : 16));
  2261. y[l+56] = d * s[3] * ((ql[l+24] >> 4) - (qh[l] & 0x80 ? 0 : 16));
  2262. }
  2263. y += QK_K;
  2264. #endif
  2265. }
  2266. }
  2267. void quantize_row_q5_K(const float * restrict x, void * restrict vy, int k) {
  2268. assert(k % QK_K == 0);
  2269. block_q5_K * restrict y = vy;
  2270. quantize_row_q5_K_reference(x, y, k);
  2271. }
  2272. size_t ggml_quantize_q5_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
  2273. assert(k % QK_K == 0);
  2274. (void)hist; // TODO: collect histograms
  2275. for (int j = 0; j < n; j += k) {
  2276. block_q5_K * restrict y = (block_q5_K *)dst + j/QK_K;
  2277. quantize_row_q5_K_reference(src + j, y, k);
  2278. }
  2279. return (n/QK_K*sizeof(block_q5_K));
  2280. }
  2281. static void quantize_row_q5_K_impl(const float * restrict x, block_q5_K * restrict y, int n_per_row, const float * quant_weights) {
  2282. #if QK_K != 256
  2283. (void)quant_weights;
  2284. quantize_row_q5_K_reference(x, y, n_per_row);
  2285. #else
  2286. assert(n_per_row % QK_K == 0);
  2287. const int nb = n_per_row / QK_K;
  2288. uint8_t L[QK_K];
  2289. float mins[QK_K/32];
  2290. float scales[QK_K/32];
  2291. float weights[32];
  2292. uint8_t Laux[32];
  2293. for (int i = 0; i < nb; i++) {
  2294. float sum_x2 = 0;
  2295. for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l];
  2296. float sigma2 = sum_x2/QK_K;
  2297. float av_x = sqrtf(sigma2);
  2298. float max_scale = 0; // as we are deducting the min, scales are always positive
  2299. float max_min = 0;
  2300. for (int j = 0; j < QK_K/32; ++j) {
  2301. if (quant_weights) {
  2302. const float * qw = quant_weights + QK_K*i + 32*j;
  2303. for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]);
  2304. } else {
  2305. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  2306. }
  2307. scales[j] = make_qkx3_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  2308. float scale = scales[j];
  2309. if (scale > max_scale) {
  2310. max_scale = scale;
  2311. }
  2312. float min = mins[j];
  2313. if (min > max_min) {
  2314. max_min = min;
  2315. }
  2316. }
  2317. float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
  2318. float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
  2319. for (int j = 0; j < QK_K/32; ++j) {
  2320. uint8_t ls = nearest_int(inv_scale*scales[j]);
  2321. uint8_t lm = nearest_int(inv_min*mins[j]);
  2322. ls = MIN(63, ls);
  2323. lm = MIN(63, lm);
  2324. if (j < 4) {
  2325. y[i].scales[j] = ls;
  2326. y[i].scales[j+4] = lm;
  2327. } else {
  2328. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  2329. y[i].scales[j-4] |= ((ls >> 4) << 6);
  2330. y[i].scales[j-0] |= ((lm >> 4) << 6);
  2331. }
  2332. }
  2333. y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
  2334. y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
  2335. uint8_t sc, m;
  2336. for (int j = 0; j < QK_K/32; ++j) {
  2337. get_scale_min_k4(j, y[i].scales, &sc, &m);
  2338. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  2339. if (!d) continue;
  2340. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  2341. for (int ii = 0; ii < 32; ++ii) {
  2342. int l = nearest_int((x[32*j + ii] + dm)/d);
  2343. l = MAX(0, MIN(31, l));
  2344. L[32*j + ii] = l;
  2345. }
  2346. }
  2347. uint8_t * restrict qh = y[i].qh;
  2348. uint8_t * restrict ql = y[i].qs;
  2349. memset(qh, 0, QK_K/8);
  2350. uint8_t m1 = 1, m2 = 2;
  2351. for (int n = 0; n < QK_K; n += 64) {
  2352. for (int j = 0; j < 32; ++j) {
  2353. int l1 = L[n + j];
  2354. if (l1 > 15) {
  2355. l1 -= 16; qh[j] |= m1;
  2356. }
  2357. int l2 = L[n + j + 32];
  2358. if (l2 > 15) {
  2359. l2 -= 16; qh[j] |= m2;
  2360. }
  2361. ql[j] = l1 | (l2 << 4);
  2362. }
  2363. m1 <<= 2; m2 <<= 2;
  2364. ql += 32;
  2365. }
  2366. x += QK_K;
  2367. }
  2368. #endif
  2369. }
  2370. size_t quantize_q5_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2371. (void)hist;
  2372. size_t row_size = ggml_row_size(GGML_TYPE_Q5_K, n_per_row);
  2373. if (!quant_weights) {
  2374. quantize_row_q5_K_reference(src, dst, nrow*n_per_row);
  2375. }
  2376. else {
  2377. char * qrow = (char *)dst;
  2378. for (int row = 0; row < nrow; ++row) {
  2379. quantize_row_q5_K_impl(src, (block_q5_K*)qrow, n_per_row, quant_weights);
  2380. src += n_per_row;
  2381. qrow += row_size;
  2382. }
  2383. }
  2384. return nrow * row_size;
  2385. }
  2386. // ====================== 6-bit (de)-quantization
  2387. void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k) {
  2388. assert(k % QK_K == 0);
  2389. const int nb = k / QK_K;
  2390. int8_t L[QK_K];
  2391. float scales[QK_K/16];
  2392. for (int i = 0; i < nb; i++) {
  2393. float max_scale = 0;
  2394. float max_abs_scale = 0;
  2395. for (int ib = 0; ib < QK_K/16; ++ib) {
  2396. const float scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL);
  2397. scales[ib] = scale;
  2398. const float abs_scale = fabsf(scale);
  2399. if (abs_scale > max_abs_scale) {
  2400. max_abs_scale = abs_scale;
  2401. max_scale = scale;
  2402. }
  2403. }
  2404. if (!max_abs_scale) {
  2405. memset(&y[i], 0, sizeof(block_q6_K));
  2406. y[i].d = GGML_FP32_TO_FP16(0.f);
  2407. x += QK_K;
  2408. continue;
  2409. }
  2410. float iscale = -128.f/max_scale;
  2411. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2412. for (int ib = 0; ib < QK_K/16; ++ib) {
  2413. y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
  2414. }
  2415. for (int j = 0; j < QK_K/16; ++j) {
  2416. float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2417. if (!d) {
  2418. continue;
  2419. }
  2420. for (int ii = 0; ii < 16; ++ii) {
  2421. int l = nearest_int(x[16*j + ii]/d);
  2422. l = MAX(-32, MIN(31, l));
  2423. L[16*j + ii] = l + 32;
  2424. }
  2425. }
  2426. uint8_t * restrict ql = y[i].ql;
  2427. uint8_t * restrict qh = y[i].qh;
  2428. #if QK_K == 256
  2429. for (int j = 0; j < QK_K; j += 128) {
  2430. for (int l = 0; l < 32; ++l) {
  2431. const uint8_t q1 = L[j + l + 0] & 0xF;
  2432. const uint8_t q2 = L[j + l + 32] & 0xF;
  2433. const uint8_t q3 = L[j + l + 64] & 0xF;
  2434. const uint8_t q4 = L[j + l + 96] & 0xF;
  2435. ql[l+ 0] = q1 | (q3 << 4);
  2436. ql[l+32] = q2 | (q4 << 4);
  2437. qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
  2438. }
  2439. ql += 64;
  2440. qh += 32;
  2441. }
  2442. #else
  2443. for (int l = 0; l < 32; ++l) {
  2444. const uint8_t q1 = L[l + 0] & 0xF;
  2445. const uint8_t q2 = L[l + 32] & 0xF;
  2446. ql[l] = q1 | (q2 << 4);
  2447. }
  2448. for (int l = 0; l < 16; ++l) {
  2449. qh[l] = (L[l] >> 4) | ((L[l + 16] >> 4) << 2) | ((L[l + 32] >> 4) << 4) | ((L[l + 48] >> 4) << 6);
  2450. }
  2451. #endif
  2452. x += QK_K;
  2453. }
  2454. }
  2455. void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k) {
  2456. assert(k % QK_K == 0);
  2457. const int nb = k / QK_K;
  2458. for (int i = 0; i < nb; i++) {
  2459. const float d = GGML_FP16_TO_FP32(x[i].d);
  2460. const uint8_t * restrict ql = x[i].ql;
  2461. const uint8_t * restrict qh = x[i].qh;
  2462. const int8_t * restrict sc = x[i].scales;
  2463. #if QK_K == 256
  2464. for (int n = 0; n < QK_K; n += 128) {
  2465. for (int l = 0; l < 32; ++l) {
  2466. int is = l/16;
  2467. const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  2468. const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  2469. const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  2470. const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  2471. y[l + 0] = d * sc[is + 0] * q1;
  2472. y[l + 32] = d * sc[is + 2] * q2;
  2473. y[l + 64] = d * sc[is + 4] * q3;
  2474. y[l + 96] = d * sc[is + 6] * q4;
  2475. }
  2476. y += 128;
  2477. ql += 64;
  2478. qh += 32;
  2479. sc += 8;
  2480. }
  2481. #else
  2482. for (int l = 0; l < 16; ++l) {
  2483. const int8_t q1 = (int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  2484. const int8_t q2 = (int8_t)((ql[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  2485. const int8_t q3 = (int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  2486. const int8_t q4 = (int8_t)((ql[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  2487. y[l+ 0] = d * sc[0] * q1;
  2488. y[l+16] = d * sc[1] * q2;
  2489. y[l+32] = d * sc[2] * q3;
  2490. y[l+48] = d * sc[3] * q4;
  2491. }
  2492. y += 64;
  2493. #endif
  2494. }
  2495. }
  2496. void quantize_row_q6_K(const float * restrict x, void * restrict vy, int k) {
  2497. assert(k % QK_K == 0);
  2498. block_q6_K * restrict y = vy;
  2499. quantize_row_q6_K_reference(x, y, k);
  2500. }
  2501. size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist) {
  2502. assert(k % QK_K == 0);
  2503. (void)hist; // TODO: collect histograms
  2504. for (int j = 0; j < n; j += k) {
  2505. block_q6_K * restrict y = (block_q6_K *)dst + j/QK_K;
  2506. quantize_row_q6_K_reference(src + j, y, k);
  2507. }
  2508. return (n/QK_K*sizeof(block_q6_K));
  2509. }
  2510. static void quantize_row_q6_K_impl(const float * restrict x, block_q6_K * restrict y, int n_per_row, const float * quant_weights) {
  2511. #if QK_K != 256
  2512. (void)quant_weights;
  2513. quantize_row_q6_K_reference(x, y, n_per_row);
  2514. #else
  2515. assert(n_per_row % QK_K == 0);
  2516. const int nb = n_per_row / QK_K;
  2517. int8_t L[QK_K];
  2518. float scales[QK_K/16];
  2519. //float weights[16];
  2520. for (int i = 0; i < nb; i++) {
  2521. //float sum_x2 = 0;
  2522. //for (int j = 0; j < QK_K; ++j) sum_x2 += x[j]*x[j];
  2523. //float sigma2 = sum_x2/QK_K;
  2524. float max_scale = 0;
  2525. float max_abs_scale = 0;
  2526. for (int ib = 0; ib < QK_K/16; ++ib) {
  2527. float scale;
  2528. if (quant_weights) {
  2529. const float * qw = quant_weights + QK_K*i + 16*ib;
  2530. //for (int j = 0; j < 16; ++j) weights[j] = qw[j] * sqrtf(sigma2 + x[16*ib + j]*x[16*ib + j]);
  2531. //scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, weights);
  2532. scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, qw);
  2533. } else {
  2534. scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL);
  2535. }
  2536. scales[ib] = scale;
  2537. const float abs_scale = fabsf(scale);
  2538. if (abs_scale > max_abs_scale) {
  2539. max_abs_scale = abs_scale;
  2540. max_scale = scale;
  2541. }
  2542. }
  2543. if (!max_abs_scale) {
  2544. memset(&y[i], 0, sizeof(block_q6_K));
  2545. y[i].d = GGML_FP32_TO_FP16(0.f);
  2546. x += QK_K;
  2547. continue;
  2548. }
  2549. float iscale = -128.f/max_scale;
  2550. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2551. for (int ib = 0; ib < QK_K/16; ++ib) {
  2552. y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
  2553. }
  2554. for (int j = 0; j < QK_K/16; ++j) {
  2555. float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2556. if (!d) {
  2557. continue;
  2558. }
  2559. for (int ii = 0; ii < 16; ++ii) {
  2560. int l = nearest_int(x[16*j + ii]/d);
  2561. l = MAX(-32, MIN(31, l));
  2562. L[16*j + ii] = l + 32;
  2563. }
  2564. }
  2565. uint8_t * restrict ql = y[i].ql;
  2566. uint8_t * restrict qh = y[i].qh;
  2567. for (int j = 0; j < QK_K; j += 128) {
  2568. for (int l = 0; l < 32; ++l) {
  2569. const uint8_t q1 = L[j + l + 0] & 0xF;
  2570. const uint8_t q2 = L[j + l + 32] & 0xF;
  2571. const uint8_t q3 = L[j + l + 64] & 0xF;
  2572. const uint8_t q4 = L[j + l + 96] & 0xF;
  2573. ql[l+ 0] = q1 | (q3 << 4);
  2574. ql[l+32] = q2 | (q4 << 4);
  2575. qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
  2576. }
  2577. ql += 64;
  2578. qh += 32;
  2579. }
  2580. x += QK_K;
  2581. }
  2582. #endif
  2583. }
  2584. size_t quantize_q6_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2585. (void)hist;
  2586. size_t row_size = ggml_row_size(GGML_TYPE_Q6_K, n_per_row);
  2587. if (!quant_weights) {
  2588. quantize_row_q6_K_reference(src, dst, nrow*n_per_row);
  2589. }
  2590. else {
  2591. char * qrow = (char *)dst;
  2592. for (int row = 0; row < nrow; ++row) {
  2593. quantize_row_q6_K_impl(src, (block_q6_K*)qrow, n_per_row, quant_weights);
  2594. src += n_per_row;
  2595. qrow += row_size;
  2596. }
  2597. }
  2598. return nrow * row_size;
  2599. }
  2600. static void quantize_row_q4_0_impl(const float * restrict x, block_q4_0 * restrict y, int n_per_row, const float * quant_weights) {
  2601. static_assert(QK4_0 == 32, "QK4_0 must be 32");
  2602. if (!quant_weights) {
  2603. quantize_row_q4_0_reference(x, y, n_per_row);
  2604. return;
  2605. }
  2606. float weight[QK4_0];
  2607. int8_t L[QK4_0];
  2608. float sum_x2 = 0;
  2609. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2610. float sigma2 = sum_x2/n_per_row;
  2611. const int nb = n_per_row/QK4_0;
  2612. for (int ib = 0; ib < nb; ++ib) {
  2613. const float * xb = x + QK4_0 * ib;
  2614. const float * qw = quant_weights + QK4_0 * ib;
  2615. for (int j = 0; j < QK4_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2616. float d = make_qx_quants(QK4_0, 8, xb, L, 1, weight);
  2617. y[ib].d = GGML_FP32_TO_FP16(d);
  2618. for (int j = 0; j < 16; ++j) {
  2619. y[ib].qs[j] = L[j] | (L[j+16] << 4);
  2620. }
  2621. }
  2622. }
  2623. size_t quantize_q4_0(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2624. if (!quant_weights) {
  2625. return ggml_quantize_q4_0(src, dst, nrow*n_per_row, n_per_row, hist);
  2626. }
  2627. size_t row_size = ggml_row_size(GGML_TYPE_Q4_0, n_per_row);
  2628. char * qrow = (char *)dst;
  2629. for (int row = 0; row < nrow; ++row) {
  2630. quantize_row_q4_0_impl(src, (block_q4_0*)qrow, n_per_row, quant_weights);
  2631. src += n_per_row;
  2632. qrow += row_size;
  2633. }
  2634. return nrow * row_size;
  2635. }
  2636. static void quantize_row_q4_1_impl(const float * restrict x, block_q4_1 * restrict y, int n_per_row, const float * quant_weights) {
  2637. static_assert(QK4_1 == 32, "QK4_1 must be 32");
  2638. if (!quant_weights) {
  2639. quantize_row_q4_1_reference(x, y, n_per_row);
  2640. return;
  2641. }
  2642. float weight[QK4_1];
  2643. uint8_t L[QK4_1], Laux[QK4_1];
  2644. float sum_x2 = 0;
  2645. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2646. float sigma2 = sum_x2/n_per_row;
  2647. const int nb = n_per_row/QK4_1;
  2648. for (int ib = 0; ib < nb; ++ib) {
  2649. const float * xb = x + QK4_1 * ib;
  2650. const float * qw = quant_weights + QK4_1 * ib;
  2651. for (int j = 0; j < QK4_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2652. float min;
  2653. float d = make_qkx3_quants(QK4_1, 15, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false);
  2654. y[ib].d = GGML_FP32_TO_FP16(d);
  2655. y[ib].m = GGML_FP32_TO_FP16(-min);
  2656. for (int j = 0; j < 16; ++j) {
  2657. y[ib].qs[j] = L[j] | (L[j+16] << 4);
  2658. }
  2659. }
  2660. }
  2661. size_t quantize_q4_1(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2662. if (!quant_weights) {
  2663. return ggml_quantize_q4_1(src, dst, nrow*n_per_row, n_per_row, hist);
  2664. }
  2665. size_t row_size = ggml_row_size(GGML_TYPE_Q4_1, n_per_row);
  2666. char * qrow = (char *)dst;
  2667. for (int row = 0; row < nrow; ++row) {
  2668. quantize_row_q4_1_impl(src, (block_q4_1*)qrow, n_per_row, quant_weights);
  2669. src += n_per_row;
  2670. qrow += row_size;
  2671. }
  2672. return nrow * row_size;
  2673. }
  2674. static void quantize_row_q5_0_impl(const float * restrict x, block_q5_0 * restrict y, int n_per_row, const float * quant_weights) {
  2675. static_assert(QK5_0 == 32, "QK5_0 must be 32");
  2676. if (!quant_weights) {
  2677. quantize_row_q5_0_reference(x, y, n_per_row);
  2678. return;
  2679. }
  2680. float weight[QK5_0];
  2681. int8_t L[QK5_0];
  2682. float sum_x2 = 0;
  2683. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2684. float sigma2 = sum_x2/n_per_row;
  2685. const int nb = n_per_row/QK5_0;
  2686. for (int ib = 0; ib < nb; ++ib) {
  2687. const float * xb = x + QK5_0 * ib;
  2688. const float * qw = quant_weights + QK5_0 * ib;
  2689. for (int j = 0; j < QK5_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2690. float d = make_qx_quants(QK5_0, 16, xb, L, 1, weight);
  2691. y[ib].d = GGML_FP32_TO_FP16(d);
  2692. uint32_t qh = 0;
  2693. for (int j = 0; j < 16; ++j) {
  2694. const uint8_t xi0 = L[j];
  2695. const uint8_t xi1 = L[j+16];
  2696. y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  2697. // get the 5-th bit and store it in qh at the right position
  2698. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  2699. qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
  2700. }
  2701. memcpy(&y[ib].qh, &qh, sizeof(qh));
  2702. }
  2703. }
  2704. size_t quantize_q5_0(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2705. if (!quant_weights) {
  2706. return ggml_quantize_q5_0(src, dst, nrow*n_per_row, n_per_row, hist);
  2707. }
  2708. size_t row_size = ggml_row_size(GGML_TYPE_Q5_0, n_per_row);
  2709. char * qrow = (char *)dst;
  2710. for (int row = 0; row < nrow; ++row) {
  2711. quantize_row_q5_0_impl(src, (block_q5_0*)qrow, n_per_row, quant_weights);
  2712. src += n_per_row;
  2713. qrow += row_size;
  2714. }
  2715. return nrow * row_size;
  2716. }
  2717. static void quantize_row_q5_1_impl(const float * restrict x, block_q5_1 * restrict y, int n_per_row, const float * quant_weights) {
  2718. static_assert(QK5_1 == 32, "QK5_1 must be 32");
  2719. if (!quant_weights) {
  2720. quantize_row_q5_1_reference(x, y, n_per_row);
  2721. return;
  2722. }
  2723. float weight[QK5_1];
  2724. uint8_t L[QK5_1], Laux[QK5_1];
  2725. float sum_x2 = 0;
  2726. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2727. float sigma2 = sum_x2/n_per_row;
  2728. const int nb = n_per_row/QK5_1;
  2729. for (int ib = 0; ib < nb; ++ib) {
  2730. const float * xb = x + QK5_1 * ib;
  2731. const float * qw = quant_weights + QK5_1 * ib;
  2732. for (int j = 0; j < QK5_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2733. float min;
  2734. float d = make_qkx3_quants(QK5_1, 31, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false);
  2735. y[ib].d = GGML_FP32_TO_FP16(d);
  2736. y[ib].m = GGML_FP32_TO_FP16(-min);
  2737. uint32_t qh = 0;
  2738. for (int j = 0; j < 16; ++j) {
  2739. const uint8_t xi0 = L[j];
  2740. const uint8_t xi1 = L[j+16];
  2741. y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  2742. // get the 5-th bit and store it in qh at the right position
  2743. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  2744. qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
  2745. }
  2746. memcpy(&y[ib].qh, &qh, sizeof(qh));
  2747. }
  2748. }
  2749. size_t quantize_q5_1(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2750. if (!quant_weights) {
  2751. return ggml_quantize_q5_1(src, dst, nrow*n_per_row, n_per_row, hist);
  2752. }
  2753. size_t row_size = ggml_row_size(GGML_TYPE_Q5_1, n_per_row);
  2754. char * qrow = (char *)dst;
  2755. for (int row = 0; row < nrow; ++row) {
  2756. quantize_row_q5_1_impl(src, (block_q5_1*)qrow, n_per_row, quant_weights);
  2757. src += n_per_row;
  2758. qrow += row_size;
  2759. }
  2760. return nrow * row_size;
  2761. }
  2762. // ====================== "True" 2-bit (de)-quantization
  2763. static const uint64_t iq2xxs_grid[256] = {
  2764. 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08,
  2765. 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x08080808082b0808,
  2766. 0x08080808082b082b, 0x08080808082b2b08, 0x08080808082b2b2b, 0x0808080819080819,
  2767. 0x0808080819081908, 0x0808080819190808, 0x0808080819192b08, 0x08080808192b0819,
  2768. 0x08080808192b1908, 0x080808082b080808, 0x080808082b08082b, 0x080808082b082b2b,
  2769. 0x080808082b2b082b, 0x0808081908080819, 0x0808081908081908, 0x0808081908190808,
  2770. 0x0808081908191919, 0x0808081919080808, 0x080808192b081908, 0x080808192b192b08,
  2771. 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b082b082b, 0x0808082b2b08082b,
  2772. 0x0808190808080819, 0x0808190808081908, 0x0808190808190808, 0x08081908082b0819,
  2773. 0x08081908082b1908, 0x0808190819080808, 0x080819081908082b, 0x0808190819082b08,
  2774. 0x08081908192b0808, 0x080819082b080819, 0x080819082b081908, 0x080819082b190808,
  2775. 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, 0x0808191908082b08,
  2776. 0x08081919082b0808, 0x080819191908192b, 0x08081919192b2b19, 0x080819192b080808,
  2777. 0x080819192b190819, 0x0808192b08082b19, 0x0808192b08190808, 0x0808192b19080808,
  2778. 0x0808192b2b081908, 0x0808192b2b2b1908, 0x08082b0808080808, 0x08082b0808081919,
  2779. 0x08082b0808082b08, 0x08082b0808191908, 0x08082b08082b2b08, 0x08082b0819080819,
  2780. 0x08082b0819081908, 0x08082b0819190808, 0x08082b081919082b, 0x08082b082b082b08,
  2781. 0x08082b1908081908, 0x08082b1919080808, 0x08082b2b0808082b, 0x08082b2b08191908,
  2782. 0x0819080808080819, 0x0819080808081908, 0x0819080808190808, 0x08190808082b0819,
  2783. 0x0819080819080808, 0x08190808192b0808, 0x081908082b081908, 0x081908082b190808,
  2784. 0x081908082b191919, 0x0819081908080808, 0x0819081908082b08, 0x08190819082b0808,
  2785. 0x0819081919190808, 0x0819081919192b2b, 0x081908192b080808, 0x0819082b082b1908,
  2786. 0x0819082b19081919, 0x0819190808080808, 0x0819190808082b08, 0x08191908082b0808,
  2787. 0x08191908082b1919, 0x0819190819082b19, 0x081919082b080808, 0x0819191908192b08,
  2788. 0x08191919192b082b, 0x0819192b08080808, 0x0819192b0819192b, 0x08192b0808080819,
  2789. 0x08192b0808081908, 0x08192b0808190808, 0x08192b0819080808, 0x08192b082b080819,
  2790. 0x08192b1908080808, 0x08192b1908081919, 0x08192b192b2b0808, 0x08192b2b19190819,
  2791. 0x082b080808080808, 0x082b08080808082b, 0x082b080808082b2b, 0x082b080819081908,
  2792. 0x082b0808192b0819, 0x082b08082b080808, 0x082b08082b08082b, 0x082b0819082b2b19,
  2793. 0x082b081919082b08, 0x082b082b08080808, 0x082b082b0808082b, 0x082b190808080819,
  2794. 0x082b190808081908, 0x082b190808190808, 0x082b190819080808, 0x082b19081919192b,
  2795. 0x082b191908080808, 0x082b191919080819, 0x082b1919192b1908, 0x082b192b2b190808,
  2796. 0x082b2b0808082b08, 0x082b2b08082b0808, 0x082b2b082b191908, 0x082b2b2b19081908,
  2797. 0x1908080808080819, 0x1908080808081908, 0x1908080808190808, 0x1908080808192b08,
  2798. 0x19080808082b0819, 0x19080808082b1908, 0x1908080819080808, 0x1908080819082b08,
  2799. 0x190808081919192b, 0x19080808192b0808, 0x190808082b080819, 0x190808082b081908,
  2800. 0x190808082b190808, 0x1908081908080808, 0x19080819082b0808, 0x19080819192b0819,
  2801. 0x190808192b080808, 0x190808192b081919, 0x1908082b08080819, 0x1908082b08190808,
  2802. 0x1908082b19082b08, 0x1908082b1919192b, 0x1908082b192b2b08, 0x1908190808080808,
  2803. 0x1908190808082b08, 0x19081908082b0808, 0x190819082b080808, 0x190819082b192b19,
  2804. 0x190819190819082b, 0x19081919082b1908, 0x1908192b08080808, 0x19082b0808080819,
  2805. 0x19082b0808081908, 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919,
  2806. 0x19082b1908080808, 0x19082b1919192b08, 0x19082b19192b0819, 0x19082b192b08082b,
  2807. 0x19082b2b19081919, 0x19082b2b2b190808, 0x1919080808080808, 0x1919080808082b08,
  2808. 0x1919080808190819, 0x1919080808192b19, 0x19190808082b0808, 0x191908082b080808,
  2809. 0x191908082b082b08, 0x1919081908081908, 0x191908191908082b, 0x191908192b2b1908,
  2810. 0x1919082b2b190819, 0x191919082b190808, 0x191919082b19082b, 0x1919191908082b2b,
  2811. 0x1919192b08080819, 0x1919192b19191908, 0x19192b0808080808, 0x19192b0808190819,
  2812. 0x19192b0808192b19, 0x19192b08192b1908, 0x19192b1919080808, 0x19192b2b08082b08,
  2813. 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, 0x192b0808192b2b08,
  2814. 0x192b081908080808, 0x192b081919191919, 0x192b082b08192b08, 0x192b082b192b0808,
  2815. 0x192b190808080808, 0x192b190808081919, 0x192b191908190808, 0x192b19190819082b,
  2816. 0x192b19192b081908, 0x192b2b081908082b, 0x2b08080808080808, 0x2b0808080808082b,
  2817. 0x2b08080808082b2b, 0x2b08080819080819, 0x2b0808082b08082b, 0x2b08081908081908,
  2818. 0x2b08081908192b08, 0x2b08081919080808, 0x2b08082b08190819, 0x2b08190808080819,
  2819. 0x2b08190808081908, 0x2b08190808190808, 0x2b08190808191919, 0x2b08190819080808,
  2820. 0x2b081908192b0808, 0x2b08191908080808, 0x2b0819191908192b, 0x2b0819192b191908,
  2821. 0x2b08192b08082b19, 0x2b08192b19080808, 0x2b08192b192b0808, 0x2b082b080808082b,
  2822. 0x2b082b1908081908, 0x2b082b2b08190819, 0x2b19080808081908, 0x2b19080808190808,
  2823. 0x2b190808082b1908, 0x2b19080819080808, 0x2b1908082b2b0819, 0x2b1908190819192b,
  2824. 0x2b1908192b080808, 0x2b19082b19081919, 0x2b19190808080808, 0x2b191908082b082b,
  2825. 0x2b19190819081908, 0x2b19191919190819, 0x2b192b082b080819, 0x2b192b19082b0808,
  2826. 0x2b2b08080808082b, 0x2b2b080819190808, 0x2b2b08082b081919, 0x2b2b081908082b19,
  2827. 0x2b2b082b08080808, 0x2b2b190808192b08, 0x2b2b2b0819190808, 0x2b2b2b1908081908,
  2828. };
  2829. static const uint64_t iq2xs_grid[512] = {
  2830. 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08,
  2831. 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b,
  2832. 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919,
  2833. 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b,
  2834. 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919,
  2835. 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x080808082b080808,
  2836. 0x080808082b08082b, 0x080808082b081919, 0x080808082b082b08, 0x080808082b190819,
  2837. 0x080808082b191908, 0x080808082b192b19, 0x080808082b2b0808, 0x0808081908080819,
  2838. 0x0808081908081908, 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808,
  2839. 0x080808190819082b, 0x0808081908191919, 0x0808081908192b08, 0x0808081908192b2b,
  2840. 0x08080819082b0819, 0x08080819082b1908, 0x0808081919080808, 0x080808191908082b,
  2841. 0x0808081919081919, 0x0808081919082b08, 0x0808081919190819, 0x0808081919191908,
  2842. 0x08080819192b0808, 0x08080819192b2b08, 0x080808192b080819, 0x080808192b081908,
  2843. 0x080808192b190808, 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b08081919,
  2844. 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, 0x0808082b082b0808,
  2845. 0x0808082b19080819, 0x0808082b19081908, 0x0808082b19190808, 0x0808082b19191919,
  2846. 0x0808082b2b080808, 0x0808082b2b082b2b, 0x0808190808080819, 0x0808190808081908,
  2847. 0x080819080808192b, 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b,
  2848. 0x0808190808191919, 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908,
  2849. 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, 0x0808190819082b08,
  2850. 0x0808190819190819, 0x0808190819191908, 0x080819081919192b, 0x08081908192b0808,
  2851. 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, 0x0808191908080808,
  2852. 0x080819190808082b, 0x0808191908081919, 0x0808191908082b08, 0x0808191908190819,
  2853. 0x0808191908191908, 0x08081919082b0808, 0x0808191919080819, 0x0808191919081908,
  2854. 0x0808191919190808, 0x08081919192b0819, 0x080819192b080808, 0x0808192b08080819,
  2855. 0x0808192b08081908, 0x0808192b08190808, 0x0808192b082b192b, 0x0808192b19080808,
  2856. 0x0808192b1908082b, 0x0808192b2b081908, 0x08082b0808080808, 0x08082b080808082b,
  2857. 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808082b2b, 0x08082b0808190819,
  2858. 0x08082b0808191908, 0x08082b08082b0808, 0x08082b08082b1919, 0x08082b0819080819,
  2859. 0x08082b0819081908, 0x08082b0819190808, 0x08082b0819192b08, 0x08082b082b080808,
  2860. 0x08082b082b2b0808, 0x08082b082b2b2b2b, 0x08082b1908080819, 0x08082b1908081908,
  2861. 0x08082b1908190808, 0x08082b1919080808, 0x08082b192b080819, 0x08082b192b082b19,
  2862. 0x08082b2b08080808, 0x08082b2b082b0808, 0x08082b2b082b2b08, 0x08082b2b2b19192b,
  2863. 0x08082b2b2b2b0808, 0x0819080808080819, 0x0819080808081908, 0x081908080808192b,
  2864. 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, 0x0819080808191919,
  2865. 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, 0x0819080819080808,
  2866. 0x081908081908082b, 0x0819080819081919, 0x0819080819082b08, 0x0819080819190819,
  2867. 0x0819080819191908, 0x08190808192b0808, 0x08190808192b2b2b, 0x081908082b080819,
  2868. 0x081908082b081908, 0x081908082b190808, 0x0819081908080808, 0x081908190808082b,
  2869. 0x0819081908081919, 0x0819081908082b08, 0x0819081908190819, 0x0819081908191908,
  2870. 0x08190819082b0808, 0x0819081919080819, 0x0819081919081908, 0x0819081919190808,
  2871. 0x081908192b080808, 0x081908192b191908, 0x081908192b19192b, 0x0819082b08080819,
  2872. 0x0819082b08081908, 0x0819082b0808192b, 0x0819082b08190808, 0x0819082b19080808,
  2873. 0x0819082b192b0808, 0x0819190808080808, 0x081919080808082b, 0x0819190808081919,
  2874. 0x0819190808082b08, 0x0819190808190819, 0x0819190808191908, 0x08191908082b0808,
  2875. 0x0819190819080819, 0x0819190819081908, 0x0819190819082b19, 0x0819190819190808,
  2876. 0x08191908192b1908, 0x081919082b080808, 0x0819191908080819, 0x0819191908081908,
  2877. 0x0819191908190808, 0x0819191919080808, 0x0819192b08080808, 0x0819192b08191908,
  2878. 0x0819192b19082b19, 0x08192b0808080819, 0x08192b0808081908, 0x08192b0808190808,
  2879. 0x08192b080819082b, 0x08192b0819080808, 0x08192b0819191908, 0x08192b082b08192b,
  2880. 0x08192b1908080808, 0x08192b1908081919, 0x08192b19192b192b, 0x08192b2b19190819,
  2881. 0x08192b2b2b2b2b19, 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919,
  2882. 0x082b080808082b08, 0x082b080808082b2b, 0x082b080808190819, 0x082b080808191908,
  2883. 0x082b0808082b0808, 0x082b080819080819, 0x082b080819081908, 0x082b080819190808,
  2884. 0x082b08082b080808, 0x082b08082b2b0808, 0x082b081908080819, 0x082b081908081908,
  2885. 0x082b081908190808, 0x082b081919080808, 0x082b081919082b08, 0x082b0819192b1919,
  2886. 0x082b082b08080808, 0x082b082b082b082b, 0x082b082b2b080808, 0x082b082b2b2b2b08,
  2887. 0x082b190808080819, 0x082b190808081908, 0x082b190808190808, 0x082b1908082b2b19,
  2888. 0x082b190819080808, 0x082b191908080808, 0x082b191919080819, 0x082b19191919082b,
  2889. 0x082b19192b192b19, 0x082b192b08080819, 0x082b192b08192b2b, 0x082b192b2b2b192b,
  2890. 0x082b2b0808080808, 0x082b2b0808082b08, 0x082b2b0808082b2b, 0x082b2b08082b0808,
  2891. 0x082b2b0819191919, 0x082b2b082b082b08, 0x082b2b082b2b082b, 0x082b2b19192b2b08,
  2892. 0x082b2b192b190808, 0x082b2b2b08082b08, 0x082b2b2b082b0808, 0x082b2b2b2b08082b,
  2893. 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, 0x1908080808081908,
  2894. 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, 0x190808080819082b,
  2895. 0x1908080808191919, 0x1908080808192b08, 0x19080808082b0819, 0x19080808082b1908,
  2896. 0x1908080819080808, 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08,
  2897. 0x1908080819082b2b, 0x1908080819190819, 0x1908080819191908, 0x19080808192b0808,
  2898. 0x19080808192b1919, 0x190808082b080819, 0x190808082b081908, 0x190808082b190808,
  2899. 0x1908081908080808, 0x190808190808082b, 0x1908081908081919, 0x1908081908082b08,
  2900. 0x1908081908190819, 0x1908081908191908, 0x19080819082b0808, 0x1908081919080819,
  2901. 0x1908081919081908, 0x1908081919190808, 0x190808192b080808, 0x190808192b081919,
  2902. 0x190808192b2b082b, 0x1908082b08080819, 0x1908082b08081908, 0x1908082b08190808,
  2903. 0x1908082b0819082b, 0x1908082b082b2b19, 0x1908082b19080808, 0x1908190808080808,
  2904. 0x190819080808082b, 0x1908190808081919, 0x1908190808082b08, 0x1908190808190819,
  2905. 0x1908190808191908, 0x1908190808192b19, 0x19081908082b0808, 0x1908190819080819,
  2906. 0x1908190819081908, 0x1908190819190808, 0x190819082b080808, 0x190819082b191908,
  2907. 0x1908191908080819, 0x1908191908081908, 0x1908191908190808, 0x19081919082b1908,
  2908. 0x1908191919080808, 0x190819192b192b2b, 0x1908192b08080808, 0x1908192b08082b2b,
  2909. 0x1908192b19081908, 0x1908192b19190808, 0x19082b0808080819, 0x19082b0808081908,
  2910. 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, 0x19082b0819191908,
  2911. 0x19082b08192b082b, 0x19082b1908080808, 0x19082b1908190819, 0x19082b1919081908,
  2912. 0x19082b1919190808, 0x19082b19192b2b19, 0x19082b2b08081908, 0x1919080808080808,
  2913. 0x191908080808082b, 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819,
  2914. 0x1919080808191908, 0x19190808082b0808, 0x19190808082b2b08, 0x1919080819080819,
  2915. 0x1919080819081908, 0x1919080819190808, 0x191908082b080808, 0x1919081908080819,
  2916. 0x1919081908081908, 0x1919081908190808, 0x1919081908191919, 0x1919081919080808,
  2917. 0x191908191908082b, 0x1919082b08080808, 0x1919082b19081908, 0x1919082b2b2b2b2b,
  2918. 0x1919190808080819, 0x1919190808081908, 0x1919190808190808, 0x19191908082b0819,
  2919. 0x1919190819080808, 0x19191908192b0808, 0x191919082b080819, 0x191919082b2b0819,
  2920. 0x1919191908080808, 0x1919191908082b08, 0x191919192b080808, 0x191919192b082b08,
  2921. 0x1919192b082b0819, 0x1919192b192b2b08, 0x1919192b2b2b0819, 0x19192b0808080808,
  2922. 0x19192b0808191908, 0x19192b0819080819, 0x19192b0819190808, 0x19192b082b192b19,
  2923. 0x19192b1908192b2b, 0x19192b1919080808, 0x19192b191908082b, 0x19192b2b2b081919,
  2924. 0x192b080808080819, 0x192b080808081908, 0x192b080808190808, 0x192b080819080808,
  2925. 0x192b080819191908, 0x192b0808192b082b, 0x192b08082b08192b, 0x192b08082b2b2b19,
  2926. 0x192b081908080808, 0x192b082b082b1908, 0x192b082b19082b2b, 0x192b082b2b19082b,
  2927. 0x192b190808080808, 0x192b19080819192b, 0x192b191908190808, 0x192b191919080808,
  2928. 0x192b191919081919, 0x192b19192b2b1908, 0x192b2b0808080819, 0x192b2b08192b2b2b,
  2929. 0x192b2b19082b1919, 0x192b2b2b0808192b, 0x192b2b2b19191908, 0x192b2b2b192b082b,
  2930. 0x2b08080808080808, 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08,
  2931. 0x2b08080808190819, 0x2b08080808191908, 0x2b080808082b0808, 0x2b080808082b2b2b,
  2932. 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808082b080808,
  2933. 0x2b0808082b08082b, 0x2b0808082b2b2b08, 0x2b0808082b2b2b2b, 0x2b08081908080819,
  2934. 0x2b08081908081908, 0x2b0808190808192b, 0x2b08081908190808, 0x2b08081919080808,
  2935. 0x2b08081919190819, 0x2b08081919192b19, 0x2b08082b08080808, 0x2b08082b082b0808,
  2936. 0x2b08082b2b080808, 0x2b08082b2b08082b, 0x2b08082b2b2b0808, 0x2b08082b2b2b2b08,
  2937. 0x2b08190808080819, 0x2b08190808081908, 0x2b08190808190808, 0x2b0819080819082b,
  2938. 0x2b08190808191919, 0x2b08190819080808, 0x2b081908192b0808, 0x2b0819082b082b19,
  2939. 0x2b08191908080808, 0x2b08191919081908, 0x2b0819192b2b1919, 0x2b08192b08192b08,
  2940. 0x2b08192b192b2b2b, 0x2b082b0808080808, 0x2b082b0808082b08, 0x2b082b08082b1919,
  2941. 0x2b082b0819192b2b, 0x2b082b082b080808, 0x2b082b082b08082b, 0x2b082b082b2b2b08,
  2942. 0x2b082b190808192b, 0x2b082b2b082b082b, 0x2b082b2b2b080808, 0x2b082b2b2b082b08,
  2943. 0x2b082b2b2b19192b, 0x2b082b2b2b2b2b08, 0x2b19080808080819, 0x2b19080808081908,
  2944. 0x2b19080808190808, 0x2b19080819080808, 0x2b1908081919192b, 0x2b1908082b081908,
  2945. 0x2b19081908080808, 0x2b190819082b082b, 0x2b190819192b1908, 0x2b19082b1919192b,
  2946. 0x2b19082b2b082b19, 0x2b19190808080808, 0x2b19190808081919, 0x2b19190819081908,
  2947. 0x2b19190819190808, 0x2b19190819192b08, 0x2b191919082b2b19, 0x2b1919192b190808,
  2948. 0x2b1919192b19082b, 0x2b19192b19080819, 0x2b192b0819190819, 0x2b192b082b2b192b,
  2949. 0x2b192b1919082b19, 0x2b192b2b08191919, 0x2b192b2b192b0808, 0x2b2b080808080808,
  2950. 0x2b2b08080808082b, 0x2b2b080808082b08, 0x2b2b080808082b2b, 0x2b2b0808082b0808,
  2951. 0x2b2b0808082b2b2b, 0x2b2b08082b2b0808, 0x2b2b081919190819, 0x2b2b081919192b19,
  2952. 0x2b2b08192b2b192b, 0x2b2b082b08080808, 0x2b2b082b0808082b, 0x2b2b082b08082b08,
  2953. 0x2b2b082b082b2b2b, 0x2b2b082b2b080808, 0x2b2b082b2b2b0808, 0x2b2b190819080808,
  2954. 0x2b2b19082b191919, 0x2b2b192b192b1919, 0x2b2b192b2b192b08, 0x2b2b2b0808082b2b,
  2955. 0x2b2b2b08082b0808, 0x2b2b2b08082b082b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b0808,
  2956. 0x2b2b2b082b2b2b08, 0x2b2b2b1908081908, 0x2b2b2b192b081908, 0x2b2b2b192b08192b,
  2957. 0x2b2b2b2b082b2b08, 0x2b2b2b2b082b2b2b, 0x2b2b2b2b2b190819, 0x2b2b2b2b2b2b2b2b,
  2958. };
  2959. static const uint32_t iq3xxs_grid[256] = {
  2960. 0x04040404, 0x04040414, 0x04040424, 0x04040c0c, 0x04040c1c, 0x04040c3e, 0x04041404, 0x04041414,
  2961. 0x04041c0c, 0x04042414, 0x04043e1c, 0x04043e2c, 0x040c040c, 0x040c041c, 0x040c0c04, 0x040c0c14,
  2962. 0x040c140c, 0x040c142c, 0x040c1c04, 0x040c1c14, 0x040c240c, 0x040c2c24, 0x040c3e04, 0x04140404,
  2963. 0x04140414, 0x04140424, 0x04140c0c, 0x04141404, 0x04141414, 0x04141c0c, 0x04141c1c, 0x04141c3e,
  2964. 0x04142c0c, 0x04142c3e, 0x04143e2c, 0x041c040c, 0x041c043e, 0x041c0c04, 0x041c0c14, 0x041c142c,
  2965. 0x041c3e04, 0x04240c1c, 0x04241c3e, 0x04242424, 0x04242c3e, 0x04243e1c, 0x04243e2c, 0x042c040c,
  2966. 0x042c043e, 0x042c1c14, 0x042c2c14, 0x04341c2c, 0x04343424, 0x043e0c04, 0x043e0c24, 0x043e0c34,
  2967. 0x043e241c, 0x043e340c, 0x0c04040c, 0x0c04041c, 0x0c040c04, 0x0c040c14, 0x0c04140c, 0x0c04141c,
  2968. 0x0c041c04, 0x0c041c14, 0x0c041c24, 0x0c04243e, 0x0c042c04, 0x0c0c0404, 0x0c0c0414, 0x0c0c0c0c,
  2969. 0x0c0c1404, 0x0c0c1414, 0x0c14040c, 0x0c14041c, 0x0c140c04, 0x0c140c14, 0x0c14140c, 0x0c141c04,
  2970. 0x0c143e14, 0x0c1c0404, 0x0c1c0414, 0x0c1c1404, 0x0c1c1c0c, 0x0c1c2434, 0x0c1c3434, 0x0c24040c,
  2971. 0x0c24042c, 0x0c242c04, 0x0c2c1404, 0x0c2c1424, 0x0c2c2434, 0x0c2c3e0c, 0x0c34042c, 0x0c3e1414,
  2972. 0x0c3e2404, 0x14040404, 0x14040414, 0x14040c0c, 0x14040c1c, 0x14041404, 0x14041414, 0x14041434,
  2973. 0x14041c0c, 0x14042414, 0x140c040c, 0x140c041c, 0x140c042c, 0x140c0c04, 0x140c0c14, 0x140c140c,
  2974. 0x140c1c04, 0x140c341c, 0x140c343e, 0x140c3e04, 0x14140404, 0x14140414, 0x14140c0c, 0x14140c3e,
  2975. 0x14141404, 0x14141414, 0x14141c3e, 0x14142404, 0x14142c2c, 0x141c040c, 0x141c0c04, 0x141c0c24,
  2976. 0x141c3e04, 0x141c3e24, 0x14241c2c, 0x14242c1c, 0x142c041c, 0x142c143e, 0x142c240c, 0x142c3e24,
  2977. 0x143e040c, 0x143e041c, 0x143e0c34, 0x143e242c, 0x1c04040c, 0x1c040c04, 0x1c040c14, 0x1c04140c,
  2978. 0x1c04141c, 0x1c042c04, 0x1c04342c, 0x1c043e14, 0x1c0c0404, 0x1c0c0414, 0x1c0c1404, 0x1c0c1c0c,
  2979. 0x1c0c2424, 0x1c0c2434, 0x1c14040c, 0x1c14041c, 0x1c140c04, 0x1c14142c, 0x1c142c14, 0x1c143e14,
  2980. 0x1c1c0c0c, 0x1c1c1c1c, 0x1c241c04, 0x1c24243e, 0x1c243e14, 0x1c2c0404, 0x1c2c0434, 0x1c2c1414,
  2981. 0x1c2c2c2c, 0x1c340c24, 0x1c341c34, 0x1c34341c, 0x1c3e1c1c, 0x1c3e3404, 0x24040424, 0x24040c3e,
  2982. 0x24041c2c, 0x24041c3e, 0x24042c1c, 0x24042c3e, 0x240c3e24, 0x24141404, 0x24141c3e, 0x24142404,
  2983. 0x24143404, 0x24143434, 0x241c043e, 0x241c242c, 0x24240424, 0x24242c0c, 0x24243424, 0x242c142c,
  2984. 0x242c241c, 0x242c3e04, 0x243e042c, 0x243e0c04, 0x243e0c14, 0x243e1c04, 0x2c040c14, 0x2c04240c,
  2985. 0x2c043e04, 0x2c0c0404, 0x2c0c0434, 0x2c0c1434, 0x2c0c2c2c, 0x2c140c24, 0x2c141c14, 0x2c143e14,
  2986. 0x2c1c0414, 0x2c1c2c1c, 0x2c240c04, 0x2c24141c, 0x2c24143e, 0x2c243e14, 0x2c2c0414, 0x2c2c1c0c,
  2987. 0x2c342c04, 0x2c3e1424, 0x2c3e2414, 0x34041424, 0x34042424, 0x34042434, 0x34043424, 0x340c140c,
  2988. 0x340c340c, 0x34140c3e, 0x34143424, 0x341c1c04, 0x341c1c34, 0x34242424, 0x342c042c, 0x342c2c14,
  2989. 0x34341c1c, 0x343e041c, 0x343e140c, 0x3e04041c, 0x3e04042c, 0x3e04043e, 0x3e040c04, 0x3e041c14,
  2990. 0x3e042c14, 0x3e0c1434, 0x3e0c2404, 0x3e140c14, 0x3e14242c, 0x3e142c14, 0x3e1c0404, 0x3e1c0c2c,
  2991. 0x3e1c1c1c, 0x3e1c3404, 0x3e24140c, 0x3e24240c, 0x3e2c0404, 0x3e2c0414, 0x3e2c1424, 0x3e341c04,
  2992. };
  2993. static const uint8_t ksigns_iq2xs[128] = {
  2994. 0, 129, 130, 3, 132, 5, 6, 135, 136, 9, 10, 139, 12, 141, 142, 15,
  2995. 144, 17, 18, 147, 20, 149, 150, 23, 24, 153, 154, 27, 156, 29, 30, 159,
  2996. 160, 33, 34, 163, 36, 165, 166, 39, 40, 169, 170, 43, 172, 45, 46, 175,
  2997. 48, 177, 178, 51, 180, 53, 54, 183, 184, 57, 58, 187, 60, 189, 190, 63,
  2998. 192, 65, 66, 195, 68, 197, 198, 71, 72, 201, 202, 75, 204, 77, 78, 207,
  2999. 80, 209, 210, 83, 212, 85, 86, 215, 216, 89, 90, 219, 92, 221, 222, 95,
  3000. 96, 225, 226, 99, 228, 101, 102, 231, 232, 105, 106, 235, 108, 237, 238, 111,
  3001. 240, 113, 114, 243, 116, 245, 246, 119, 120, 249, 250, 123, 252, 125, 126, 255,
  3002. };
  3003. static const uint8_t kmask_iq2xs[8] = {1, 2, 4, 8, 16, 32, 64, 128};
  3004. void dequantize_row_iq2_xxs(const block_iq2_xxs * restrict x, float * restrict y, int k) {
  3005. assert(k % QK_K == 0);
  3006. const int nb = k / QK_K;
  3007. uint32_t aux32[2];
  3008. const uint8_t * aux8 = (const uint8_t *)aux32;
  3009. for (int i = 0; i < nb; i++) {
  3010. const float d = GGML_FP16_TO_FP32(x[i].d);
  3011. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  3012. memcpy(aux32, x[i].qs + 4*ib32, 2*sizeof(uint32_t));
  3013. const float db = d * (0.5f + (aux32[1] >> 28)) * 0.25f;
  3014. for (int l = 0; l < 4; ++l) {
  3015. const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
  3016. const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
  3017. for (int j = 0; j < 8; ++j) {
  3018. y[j] = db * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
  3019. }
  3020. y += 8;
  3021. }
  3022. }
  3023. }
  3024. }
  3025. // ====================== 2.3125 bpw (de)-quantization
  3026. void dequantize_row_iq2_xs(const block_iq2_xs * restrict x, float * restrict y, int k) {
  3027. assert(k % QK_K == 0);
  3028. const int nb = k / QK_K;
  3029. float db[2];
  3030. for (int i = 0; i < nb; i++) {
  3031. const float d = GGML_FP16_TO_FP32(x[i].d);
  3032. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  3033. db[0] = d * (0.5f + (x[i].scales[ib32] & 0xf)) * 0.25f;
  3034. db[1] = d * (0.5f + (x[i].scales[ib32] >> 4)) * 0.25f;
  3035. for (int l = 0; l < 4; ++l) {
  3036. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (x[i].qs[4*ib32 + l] & 511));
  3037. const uint8_t signs = ksigns_iq2xs[x[i].qs[4*ib32 + l] >> 9];
  3038. for (int j = 0; j < 8; ++j) {
  3039. y[j] = db[l/2] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
  3040. }
  3041. y += 8;
  3042. }
  3043. }
  3044. }
  3045. }
  3046. // ====================== 3.0625 bpw (de)-quantization
  3047. void dequantize_row_iq3_xxs(const block_iq3_xxs * restrict x, float * restrict y, int k) {
  3048. assert(k % QK_K == 0);
  3049. const int nb = k / QK_K;
  3050. uint32_t aux32;
  3051. for (int i = 0; i < nb; i++) {
  3052. const float d = GGML_FP16_TO_FP32(x[i].d);
  3053. const uint8_t * qs = x[i].qs;
  3054. const uint8_t * scales_and_signs = qs + QK_K/4;
  3055. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  3056. memcpy(&aux32, scales_and_signs + 4*ib32, sizeof(uint32_t));
  3057. const float db = d * (0.5f + (aux32 >> 28)) * 0.5f;
  3058. for (int l = 0; l < 4; ++l) {
  3059. const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
  3060. const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + qs[2*l+0]);
  3061. const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + qs[2*l+1]);
  3062. for (int j = 0; j < 4; ++j) {
  3063. y[j+0] = db * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f);
  3064. y[j+4] = db * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f);
  3065. }
  3066. y += 8;
  3067. }
  3068. qs += 8;
  3069. }
  3070. }
  3071. }
  3072. //===================================== Q8_K ==============================================
  3073. void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k) {
  3074. assert(k % QK_K == 0);
  3075. const int nb = k / QK_K;
  3076. for (int i = 0; i < nb; i++) {
  3077. float max = 0;
  3078. float amax = 0;
  3079. for (int j = 0; j < QK_K; ++j) {
  3080. float ax = fabsf(x[j]);
  3081. if (ax > amax) {
  3082. amax = ax; max = x[j];
  3083. }
  3084. }
  3085. if (!amax) {
  3086. y[i].d = 0;
  3087. memset(y[i].qs, 0, QK_K);
  3088. x += QK_K;
  3089. continue;
  3090. }
  3091. //const float iscale = -128.f/max;
  3092. // We need this change for IQ2_XXS, else the AVX implementation becomes very awkward
  3093. const float iscale = -127.f/max;
  3094. for (int j = 0; j < QK_K; ++j) {
  3095. int v = nearest_int(iscale*x[j]);
  3096. y[i].qs[j] = MIN(127, v);
  3097. }
  3098. for (int j = 0; j < QK_K/16; ++j) {
  3099. int sum = 0;
  3100. for (int ii = 0; ii < 16; ++ii) {
  3101. sum += y[i].qs[j*16 + ii];
  3102. }
  3103. y[i].bsums[j] = sum;
  3104. }
  3105. y[i].d = 1/iscale;
  3106. x += QK_K;
  3107. }
  3108. }
  3109. void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k) {
  3110. assert(k % QK_K == 0);
  3111. const int nb = k / QK_K;
  3112. for (int i = 0; i < nb; i++) {
  3113. for (int j = 0; j < QK_K; ++j) {
  3114. *y++ = x[i].d * x[i].qs[j];
  3115. }
  3116. }
  3117. }
  3118. void quantize_row_q8_K(const float * restrict x, void * restrict y, int k) {
  3119. quantize_row_q8_K_reference(x, y, k);
  3120. }
  3121. //===================================== Dot ptoducts =================================
  3122. //
  3123. // Helper functions
  3124. //
  3125. #if __AVX__ || __AVX2__ || __AVX512F__
  3126. // shuffles to pick the required scales in dot products
  3127. static inline __m256i get_scale_shuffle_q3k(int i) {
  3128. static const uint8_t k_shuffle[128] = {
  3129. 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
  3130. 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
  3131. 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
  3132. 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,
  3133. };
  3134. return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
  3135. }
  3136. static inline __m256i get_scale_shuffle_k4(int i) {
  3137. static const uint8_t k_shuffle[256] = {
  3138. 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
  3139. 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
  3140. 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5,
  3141. 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
  3142. 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9,
  3143. 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
  3144. 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,
  3145. 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15
  3146. };
  3147. return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
  3148. }
  3149. static inline __m128i get_scale_shuffle(int i) {
  3150. static const uint8_t k_shuffle[128] = {
  3151. 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
  3152. 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
  3153. 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
  3154. 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
  3155. 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
  3156. 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11,
  3157. 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13,
  3158. 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15
  3159. };
  3160. return _mm_loadu_si128((const __m128i*)k_shuffle + i);
  3161. }
  3162. #endif
  3163. void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  3164. const int qk = QK8_0;
  3165. const int nb = n / qk;
  3166. assert(n % qk == 0);
  3167. const block_q4_0 * restrict x = vx;
  3168. const block_q8_0 * restrict y = vy;
  3169. #if defined(__ARM_NEON)
  3170. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3171. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3172. assert(nb % 2 == 0); // TODO: handle odd nb
  3173. for (int i = 0; i < nb; i += 2) {
  3174. const block_q4_0 * restrict x0 = &x[i + 0];
  3175. const block_q4_0 * restrict x1 = &x[i + 1];
  3176. const block_q8_0 * restrict y0 = &y[i + 0];
  3177. const block_q8_0 * restrict y1 = &y[i + 1];
  3178. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3179. const int8x16_t s8b = vdupq_n_s8(0x8);
  3180. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3181. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3182. // 4-bit -> 8-bit
  3183. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3184. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3185. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3186. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3187. // sub 8
  3188. const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
  3189. const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
  3190. const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
  3191. const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
  3192. // load y
  3193. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3194. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3195. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3196. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3197. // dot product into int32x4_t
  3198. const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
  3199. const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
  3200. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  3201. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  3202. }
  3203. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  3204. #elif defined(__AVX2__)
  3205. // Initialize accumulator with zeros
  3206. __m256 acc = _mm256_setzero_ps();
  3207. // Main loop
  3208. for (int i = 0; i < nb; ++i) {
  3209. /* Compute combined scale for the block */
  3210. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  3211. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  3212. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  3213. const __m256i off = _mm256_set1_epi8( 8 );
  3214. bx = _mm256_sub_epi8( bx, off );
  3215. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3216. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  3217. /* Multiply q with scale and accumulate */
  3218. acc = _mm256_fmadd_ps( d, q, acc );
  3219. }
  3220. *s = hsum_float_8(acc);
  3221. #elif defined(__AVX__)
  3222. // Initialize accumulator with zeros
  3223. __m256 acc = _mm256_setzero_ps();
  3224. // Main loop
  3225. for (int i = 0; i < nb; ++i) {
  3226. // Compute combined scale for the block
  3227. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  3228. const __m128i lowMask = _mm_set1_epi8(0xF);
  3229. const __m128i off = _mm_set1_epi8(8);
  3230. const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
  3231. __m128i bx = _mm_and_si128(lowMask, tmp);
  3232. __m128i by = _mm_loadu_si128((const __m128i *)y[i].qs);
  3233. bx = _mm_sub_epi8(bx, off);
  3234. const __m128i i32_0 = mul_sum_i8_pairs(bx, by);
  3235. bx = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
  3236. by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  3237. bx = _mm_sub_epi8(bx, off);
  3238. const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
  3239. // Convert int32_t to float
  3240. __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
  3241. // Apply the scale, and accumulate
  3242. acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
  3243. }
  3244. *s = hsum_float_8(acc);
  3245. #elif defined(__SSSE3__)
  3246. // set constants
  3247. const __m128i lowMask = _mm_set1_epi8(0xF);
  3248. const __m128i off = _mm_set1_epi8(8);
  3249. // Initialize accumulator with zeros
  3250. __m128 acc_0 = _mm_setzero_ps();
  3251. __m128 acc_1 = _mm_setzero_ps();
  3252. __m128 acc_2 = _mm_setzero_ps();
  3253. __m128 acc_3 = _mm_setzero_ps();
  3254. // First round without accumulation
  3255. {
  3256. _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
  3257. _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
  3258. // Compute combined scale for the block 0 and 1
  3259. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) );
  3260. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
  3261. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  3262. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
  3263. bx_0 = _mm_sub_epi8(bx_0, off);
  3264. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  3265. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  3266. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
  3267. bx_1 = _mm_sub_epi8(bx_1, off);
  3268. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  3269. _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
  3270. _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
  3271. // Compute combined scale for the block 2 and 3
  3272. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) );
  3273. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
  3274. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  3275. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
  3276. bx_2 = _mm_sub_epi8(bx_2, off);
  3277. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  3278. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  3279. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
  3280. bx_3 = _mm_sub_epi8(bx_3, off);
  3281. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  3282. // Convert int32_t to float
  3283. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  3284. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  3285. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  3286. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  3287. // Apply the scale
  3288. acc_0 = _mm_mul_ps( d_0_1, p0 );
  3289. acc_1 = _mm_mul_ps( d_0_1, p1 );
  3290. acc_2 = _mm_mul_ps( d_2_3, p2 );
  3291. acc_3 = _mm_mul_ps( d_2_3, p3 );
  3292. }
  3293. assert(nb % 2 == 0); // TODO: handle odd nb
  3294. // Main loop
  3295. for (int i = 2; i < nb; i+=2) {
  3296. _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
  3297. _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
  3298. // Compute combined scale for the block 0 and 1
  3299. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  3300. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
  3301. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  3302. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
  3303. bx_0 = _mm_sub_epi8(bx_0, off);
  3304. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  3305. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  3306. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  3307. bx_1 = _mm_sub_epi8(bx_1, off);
  3308. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  3309. _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
  3310. _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
  3311. // Compute combined scale for the block 2 and 3
  3312. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) );
  3313. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
  3314. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  3315. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
  3316. bx_2 = _mm_sub_epi8(bx_2, off);
  3317. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  3318. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  3319. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
  3320. bx_3 = _mm_sub_epi8(bx_3, off);
  3321. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  3322. // Convert int32_t to float
  3323. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  3324. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  3325. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  3326. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  3327. // Apply the scale
  3328. __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
  3329. __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
  3330. __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
  3331. __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
  3332. // Acummulate
  3333. acc_0 = _mm_add_ps(p0_d, acc_0);
  3334. acc_1 = _mm_add_ps(p1_d, acc_1);
  3335. acc_2 = _mm_add_ps(p2_d, acc_2);
  3336. acc_3 = _mm_add_ps(p3_d, acc_3);
  3337. }
  3338. *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
  3339. #elif defined(__riscv_v_intrinsic)
  3340. float sumf = 0.0;
  3341. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3342. for (int i = 0; i < nb; i++) {
  3343. // load elements
  3344. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3345. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3346. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3347. // mask and store lower part of x, and then upper part
  3348. vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3349. vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3350. vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3351. vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3352. // subtract offset
  3353. vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 8, vl);
  3354. vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 8, vl);
  3355. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3356. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3357. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3358. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3359. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3360. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3361. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  3362. }
  3363. *s = sumf;
  3364. #else
  3365. // scalar
  3366. float sumf = 0.0;
  3367. for (int i = 0; i < nb; i++) {
  3368. int sumi = 0;
  3369. for (int j = 0; j < qk/2; ++j) {
  3370. const int v0 = (x[i].qs[j] & 0x0F) - 8;
  3371. const int v1 = (x[i].qs[j] >> 4) - 8;
  3372. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  3373. }
  3374. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  3375. }
  3376. *s = sumf;
  3377. #endif
  3378. }
  3379. void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  3380. const int qk = QK8_1;
  3381. const int nb = n / qk;
  3382. assert(n % qk == 0);
  3383. const block_q4_1 * restrict x = vx;
  3384. const block_q8_1 * restrict y = vy;
  3385. // TODO: add WASM SIMD
  3386. #if defined(__ARM_NEON)
  3387. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3388. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3389. float summs = 0;
  3390. assert(nb % 2 == 0); // TODO: handle odd nb
  3391. for (int i = 0; i < nb; i += 2) {
  3392. const block_q4_1 * restrict x0 = &x[i + 0];
  3393. const block_q4_1 * restrict x1 = &x[i + 1];
  3394. const block_q8_1 * restrict y0 = &y[i + 0];
  3395. const block_q8_1 * restrict y1 = &y[i + 1];
  3396. summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s;
  3397. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3398. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3399. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3400. // 4-bit -> 8-bit
  3401. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3402. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3403. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3404. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3405. // load y
  3406. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3407. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3408. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3409. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3410. // dot product into int32x4_t
  3411. const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
  3412. const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
  3413. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d);
  3414. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d);
  3415. }
  3416. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
  3417. #elif defined(__AVX2__) || defined(__AVX__)
  3418. // Initialize accumulator with zeros
  3419. __m256 acc = _mm256_setzero_ps();
  3420. float summs = 0;
  3421. // Main loop
  3422. for (int i = 0; i < nb; ++i) {
  3423. const float d0 = GGML_FP16_TO_FP32(x[i].d);
  3424. const float d1 = y[i].d;
  3425. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  3426. const __m256 d0v = _mm256_set1_ps( d0 );
  3427. const __m256 d1v = _mm256_set1_ps( d1 );
  3428. // Compute combined scales
  3429. const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
  3430. // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
  3431. const __m256i bx = bytes_from_nibbles_32(x[i].qs);
  3432. const __m256i by = _mm256_loadu_si256( (const __m256i *)y[i].qs );
  3433. const __m256 xy = mul_sum_us8_pairs_float(bx, by);
  3434. // Accumulate d0*d1*x*y
  3435. #if defined(__AVX2__)
  3436. acc = _mm256_fmadd_ps( d0d1, xy, acc );
  3437. #else
  3438. acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc );
  3439. #endif
  3440. }
  3441. *s = hsum_float_8(acc) + summs;
  3442. #elif defined(__riscv_v_intrinsic)
  3443. float sumf = 0.0;
  3444. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3445. for (int i = 0; i < nb; i++) {
  3446. // load elements
  3447. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3448. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3449. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3450. // mask and store lower part of x, and then upper part
  3451. vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3452. vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3453. vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3454. vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3455. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3456. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3457. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3458. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3459. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3460. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3461. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  3462. }
  3463. *s = sumf;
  3464. #else
  3465. // scalar
  3466. float sumf = 0.0;
  3467. for (int i = 0; i < nb; i++) {
  3468. int sumi = 0;
  3469. for (int j = 0; j < qk/2; ++j) {
  3470. const int v0 = (x[i].qs[j] & 0x0F);
  3471. const int v1 = (x[i].qs[j] >> 4);
  3472. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  3473. }
  3474. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  3475. }
  3476. *s = sumf;
  3477. #endif
  3478. }
  3479. void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  3480. const int qk = QK8_0;
  3481. const int nb = n / qk;
  3482. assert(n % qk == 0);
  3483. assert(qk == QK5_0);
  3484. const block_q5_0 * restrict x = vx;
  3485. const block_q8_0 * restrict y = vy;
  3486. #if defined(__ARM_NEON)
  3487. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3488. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3489. uint32_t qh0;
  3490. uint32_t qh1;
  3491. uint64_t tmp0[4];
  3492. uint64_t tmp1[4];
  3493. assert(nb % 2 == 0); // TODO: handle odd nb
  3494. for (int i = 0; i < nb; i += 2) {
  3495. const block_q5_0 * restrict x0 = &x[i];
  3496. const block_q5_0 * restrict x1 = &x[i + 1];
  3497. const block_q8_0 * restrict y0 = &y[i];
  3498. const block_q8_0 * restrict y1 = &y[i + 1];
  3499. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3500. // extract the 5th bit via lookup table ((!b) << 4)
  3501. memcpy(&qh0, x0->qh, sizeof(qh0));
  3502. memcpy(&qh1, x1->qh, sizeof(qh1));
  3503. tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
  3504. tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
  3505. tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
  3506. tmp0[3] = table_b2b_1[(qh0 >> 24) ];
  3507. tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
  3508. tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
  3509. tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
  3510. tmp1[3] = table_b2b_1[(qh1 >> 24) ];
  3511. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  3512. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  3513. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  3514. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  3515. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3516. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3517. // 4-bit -> 8-bit
  3518. int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3519. int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3520. int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3521. int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3522. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  3523. const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
  3524. const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
  3525. const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
  3526. const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
  3527. // load y
  3528. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3529. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3530. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3531. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3532. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  3533. ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  3534. ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  3535. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  3536. ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  3537. ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  3538. }
  3539. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  3540. #elif defined(__wasm_simd128__)
  3541. v128_t sumv = wasm_f32x4_splat(0.0f);
  3542. uint32_t qh;
  3543. uint64_t tmp[4];
  3544. // TODO: check if unrolling this is better
  3545. for (int i = 0; i < nb; ++i) {
  3546. const block_q5_0 * restrict x0 = &x[i];
  3547. const block_q8_0 * restrict y0 = &y[i];
  3548. const v128_t m4b = wasm_i8x16_splat(0x0F);
  3549. // extract the 5th bit
  3550. memcpy(&qh, x0->qh, sizeof(qh));
  3551. tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
  3552. tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
  3553. tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
  3554. tmp[3] = table_b2b_1[(qh >> 24) ];
  3555. const v128_t qhl = wasm_v128_load(tmp + 0);
  3556. const v128_t qhh = wasm_v128_load(tmp + 2);
  3557. const v128_t v0 = wasm_v128_load(x0->qs);
  3558. // 4-bit -> 8-bit
  3559. const v128_t v0l = wasm_v128_and (v0, m4b);
  3560. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  3561. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  3562. const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
  3563. const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
  3564. // load y
  3565. const v128_t v1l = wasm_v128_load(y0->qs);
  3566. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  3567. // int8x16 -> int16x8
  3568. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  3569. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  3570. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  3571. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  3572. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  3573. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  3574. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  3575. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  3576. // dot product
  3577. sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
  3578. wasm_i32x4_add(
  3579. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  3580. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  3581. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  3582. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  3583. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
  3584. }
  3585. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  3586. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
  3587. #elif defined(__AVX2__)
  3588. // Initialize accumulator with zeros
  3589. __m256 acc = _mm256_setzero_ps();
  3590. // Main loop
  3591. for (int i = 0; i < nb; i++) {
  3592. /* Compute combined scale for the block */
  3593. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  3594. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  3595. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3596. bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
  3597. bx = _mm256_or_si256(bx, bxhi);
  3598. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3599. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  3600. /* Multiply q with scale and accumulate */
  3601. acc = _mm256_fmadd_ps(d, q, acc);
  3602. }
  3603. *s = hsum_float_8(acc);
  3604. #elif defined(__AVX__)
  3605. // Initialize accumulator with zeros
  3606. __m256 acc = _mm256_setzero_ps();
  3607. __m128i mask = _mm_set1_epi8((char)0xF0);
  3608. // Main loop
  3609. for (int i = 0; i < nb; i++) {
  3610. /* Compute combined scale for the block */
  3611. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  3612. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  3613. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3614. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  3615. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  3616. bxhil = _mm_andnot_si128(bxhil, mask);
  3617. bxhih = _mm_andnot_si128(bxhih, mask);
  3618. __m128i bxl = _mm256_castsi256_si128(bx);
  3619. __m128i bxh = _mm256_extractf128_si256(bx, 1);
  3620. bxl = _mm_or_si128(bxl, bxhil);
  3621. bxh = _mm_or_si128(bxh, bxhih);
  3622. bx = MM256_SET_M128I(bxh, bxl);
  3623. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3624. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  3625. /* Multiply q with scale and accumulate */
  3626. acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
  3627. }
  3628. *s = hsum_float_8(acc);
  3629. #elif defined(__riscv_v_intrinsic)
  3630. float sumf = 0.0;
  3631. uint32_t qh;
  3632. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3633. // These temporary registers are for masking and shift operations
  3634. vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
  3635. vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl);
  3636. vuint32m2_t vt_3 = __riscv_vsll_vx_u32m2(vt_2, 16, vl);
  3637. vuint32m2_t vt_4 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
  3638. for (int i = 0; i < nb; i++) {
  3639. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  3640. // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  3641. vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(vt_2, qh, vl);
  3642. vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(xha_0, vt_1, vl);
  3643. vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
  3644. // ((qh & (1u << (j + 16))) >> (j + 12));
  3645. vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(vt_3, qh, vl);
  3646. vuint32m2_t xhl_1 = __riscv_vsrl_vv_u32m2(xha_1, vt_4, vl);
  3647. // narrowing
  3648. vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xhl_0, vl);
  3649. vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
  3650. vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xhl_1, vl);
  3651. vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
  3652. // load
  3653. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3654. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3655. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3656. vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3657. vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3658. vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
  3659. vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
  3660. vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3661. vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3662. vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 16, vl);
  3663. vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 16, vl);
  3664. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3665. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3666. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3667. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3668. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3669. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3670. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  3671. }
  3672. *s = sumf;
  3673. #else
  3674. // scalar
  3675. float sumf = 0.0;
  3676. for (int i = 0; i < nb; i++) {
  3677. uint32_t qh;
  3678. memcpy(&qh, x[i].qh, sizeof(qh));
  3679. int sumi = 0;
  3680. for (int j = 0; j < qk/2; ++j) {
  3681. const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  3682. const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
  3683. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  3684. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  3685. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  3686. }
  3687. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  3688. }
  3689. *s = sumf;
  3690. #endif
  3691. }
  3692. void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  3693. const int qk = QK8_1;
  3694. const int nb = n / qk;
  3695. assert(n % qk == 0);
  3696. assert(qk == QK5_1);
  3697. const block_q5_1 * restrict x = vx;
  3698. const block_q8_1 * restrict y = vy;
  3699. #if defined(__ARM_NEON)
  3700. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3701. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3702. float summs0 = 0.0f;
  3703. float summs1 = 0.0f;
  3704. uint32_t qh0;
  3705. uint32_t qh1;
  3706. uint64_t tmp0[4];
  3707. uint64_t tmp1[4];
  3708. assert(nb % 2 == 0); // TODO: handle odd nb
  3709. for (int i = 0; i < nb; i += 2) {
  3710. const block_q5_1 * restrict x0 = &x[i];
  3711. const block_q5_1 * restrict x1 = &x[i + 1];
  3712. const block_q8_1 * restrict y0 = &y[i];
  3713. const block_q8_1 * restrict y1 = &y[i + 1];
  3714. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3715. summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s;
  3716. summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s;
  3717. // extract the 5th bit via lookup table ((b) << 4)
  3718. memcpy(&qh0, x0->qh, sizeof(qh0));
  3719. memcpy(&qh1, x1->qh, sizeof(qh1));
  3720. tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
  3721. tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
  3722. tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
  3723. tmp0[3] = table_b2b_0[(qh0 >> 24) ];
  3724. tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
  3725. tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
  3726. tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
  3727. tmp1[3] = table_b2b_0[(qh1 >> 24) ];
  3728. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  3729. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  3730. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  3731. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  3732. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3733. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3734. // 4-bit -> 8-bit
  3735. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3736. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3737. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3738. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3739. // add high bit
  3740. const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
  3741. const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
  3742. const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
  3743. const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
  3744. // load y
  3745. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3746. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3747. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3748. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3749. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  3750. ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  3751. ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d);
  3752. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  3753. ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  3754. ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d);
  3755. }
  3756. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
  3757. #elif defined(__wasm_simd128__)
  3758. v128_t sumv = wasm_f32x4_splat(0.0f);
  3759. float summs = 0.0f;
  3760. uint32_t qh;
  3761. uint64_t tmp[4];
  3762. // TODO: check if unrolling this is better
  3763. for (int i = 0; i < nb; ++i) {
  3764. const block_q5_1 * restrict x0 = &x[i];
  3765. const block_q8_1 * restrict y0 = &y[i];
  3766. summs += GGML_FP16_TO_FP32(x0->m) * y0->s;
  3767. const v128_t m4b = wasm_i8x16_splat(0x0F);
  3768. // extract the 5th bit
  3769. memcpy(&qh, x0->qh, sizeof(qh));
  3770. tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
  3771. tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
  3772. tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
  3773. tmp[3] = table_b2b_0[(qh >> 24) ];
  3774. const v128_t qhl = wasm_v128_load(tmp + 0);
  3775. const v128_t qhh = wasm_v128_load(tmp + 2);
  3776. const v128_t v0 = wasm_v128_load(x0->qs);
  3777. // 4-bit -> 8-bit
  3778. const v128_t v0l = wasm_v128_and (v0, m4b);
  3779. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  3780. // add high bit
  3781. const v128_t v0lf = wasm_v128_or(v0l, qhl);
  3782. const v128_t v0hf = wasm_v128_or(v0h, qhh);
  3783. // load y
  3784. const v128_t v1l = wasm_v128_load(y0->qs);
  3785. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  3786. // int8x16 -> int16x8
  3787. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  3788. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  3789. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  3790. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  3791. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  3792. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  3793. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  3794. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  3795. // dot product
  3796. sumv = wasm_f32x4_add(sumv,
  3797. wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add(
  3798. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  3799. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  3800. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  3801. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  3802. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d)));
  3803. }
  3804. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  3805. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
  3806. #elif defined(__AVX2__)
  3807. // Initialize accumulator with zeros
  3808. __m256 acc = _mm256_setzero_ps();
  3809. float summs = 0.0f;
  3810. // Main loop
  3811. for (int i = 0; i < nb; i++) {
  3812. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  3813. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  3814. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  3815. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3816. bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
  3817. bx = _mm256_or_si256(bx, bxhi);
  3818. const __m256 dy = _mm256_set1_ps(y[i].d);
  3819. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3820. const __m256 q = mul_sum_us8_pairs_float(bx, by);
  3821. acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
  3822. }
  3823. *s = hsum_float_8(acc) + summs;
  3824. #elif defined(__AVX__)
  3825. // Initialize accumulator with zeros
  3826. __m256 acc = _mm256_setzero_ps();
  3827. __m128i mask = _mm_set1_epi8(0x10);
  3828. float summs = 0.0f;
  3829. // Main loop
  3830. for (int i = 0; i < nb; i++) {
  3831. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  3832. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  3833. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  3834. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3835. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  3836. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  3837. bxhil = _mm_and_si128(bxhil, mask);
  3838. bxhih = _mm_and_si128(bxhih, mask);
  3839. __m128i bxl = _mm256_castsi256_si128(bx);
  3840. __m128i bxh = _mm256_extractf128_si256(bx, 1);
  3841. bxl = _mm_or_si128(bxl, bxhil);
  3842. bxh = _mm_or_si128(bxh, bxhih);
  3843. bx = MM256_SET_M128I(bxh, bxl);
  3844. const __m256 dy = _mm256_set1_ps(y[i].d);
  3845. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3846. const __m256 q = mul_sum_us8_pairs_float(bx, by);
  3847. acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
  3848. }
  3849. *s = hsum_float_8(acc) + summs;
  3850. #elif defined(__riscv_v_intrinsic)
  3851. float sumf = 0.0;
  3852. uint32_t qh;
  3853. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3854. // temporary registers for shift operations
  3855. vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
  3856. vuint32m2_t vt_2 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
  3857. for (int i = 0; i < nb; i++) {
  3858. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  3859. // load qh
  3860. vuint32m2_t vqh = __riscv_vmv_v_x_u32m2(qh, vl);
  3861. // ((qh >> (j + 0)) << 4) & 0x10;
  3862. vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(vqh, vt_1, vl);
  3863. vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
  3864. vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(xhl_0, 0x10, vl);
  3865. // ((qh >> (j + 12)) ) & 0x10;
  3866. vuint32m2_t xhr_1 = __riscv_vsrl_vv_u32m2(vqh, vt_2, vl);
  3867. vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(xhr_1, 0x10, vl);
  3868. // narrowing
  3869. vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xha_0, vl);
  3870. vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
  3871. vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xha_1, vl);
  3872. vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
  3873. // load
  3874. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3875. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3876. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3877. vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3878. vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3879. vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
  3880. vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
  3881. vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3882. vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3883. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3884. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3885. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3886. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3887. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3888. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3889. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  3890. }
  3891. *s = sumf;
  3892. #else
  3893. // scalar
  3894. float sumf = 0.0;
  3895. for (int i = 0; i < nb; i++) {
  3896. uint32_t qh;
  3897. memcpy(&qh, x[i].qh, sizeof(qh));
  3898. int sumi = 0;
  3899. for (int j = 0; j < qk/2; ++j) {
  3900. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  3901. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  3902. const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
  3903. const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
  3904. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  3905. }
  3906. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  3907. }
  3908. *s = sumf;
  3909. #endif
  3910. }
  3911. void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  3912. const int qk = QK8_0;
  3913. const int nb = n / qk;
  3914. assert(n % qk == 0);
  3915. const block_q8_0 * restrict x = vx;
  3916. const block_q8_0 * restrict y = vy;
  3917. #if defined(__ARM_NEON)
  3918. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3919. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3920. assert(nb % 2 == 0); // TODO: handle odd nb
  3921. for (int i = 0; i < nb; i += 2) {
  3922. const block_q8_0 * restrict x0 = &x[i + 0];
  3923. const block_q8_0 * restrict x1 = &x[i + 1];
  3924. const block_q8_0 * restrict y0 = &y[i + 0];
  3925. const block_q8_0 * restrict y1 = &y[i + 1];
  3926. const int8x16_t x0_0 = vld1q_s8(x0->qs);
  3927. const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
  3928. const int8x16_t x1_0 = vld1q_s8(x1->qs);
  3929. const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
  3930. // load y
  3931. const int8x16_t y0_0 = vld1q_s8(y0->qs);
  3932. const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
  3933. const int8x16_t y1_0 = vld1q_s8(y1->qs);
  3934. const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
  3935. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  3936. ggml_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
  3937. ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  3938. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  3939. ggml_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
  3940. ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  3941. }
  3942. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  3943. #elif defined(__AVX2__) || defined(__AVX__)
  3944. // Initialize accumulator with zeros
  3945. __m256 acc = _mm256_setzero_ps();
  3946. // Main loop
  3947. for (int i = 0; i < nb; ++i) {
  3948. // Compute combined scale for the block
  3949. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  3950. __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs);
  3951. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3952. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  3953. // Multiply q with scale and accumulate
  3954. #if defined(__AVX2__)
  3955. acc = _mm256_fmadd_ps( d, q, acc );
  3956. #else
  3957. acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc );
  3958. #endif
  3959. }
  3960. *s = hsum_float_8(acc);
  3961. #elif defined(__riscv_v_intrinsic)
  3962. float sumf = 0.0;
  3963. size_t vl = __riscv_vsetvl_e8m1(qk);
  3964. for (int i = 0; i < nb; i++) {
  3965. // load elements
  3966. vint8m1_t bx = __riscv_vle8_v_i8m1(x[i].qs, vl);
  3967. vint8m1_t by = __riscv_vle8_v_i8m1(y[i].qs, vl);
  3968. vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx, by, vl);
  3969. vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3970. vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl);
  3971. int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum);
  3972. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  3973. }
  3974. *s = sumf;
  3975. #else
  3976. // scalar
  3977. float sumf = 0.0;
  3978. for (int i = 0; i < nb; i++) {
  3979. int sumi = 0;
  3980. for (int j = 0; j < qk; j++) {
  3981. sumi += x[i].qs[j]*y[i].qs[j];
  3982. }
  3983. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  3984. }
  3985. *s = sumf;
  3986. #endif
  3987. }
  3988. #if QK_K == 256
  3989. void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  3990. const block_q2_K * restrict x = vx;
  3991. const block_q8_K * restrict y = vy;
  3992. const int nb = n / QK_K;
  3993. #ifdef __ARM_NEON
  3994. const uint8x16_t m3 = vdupq_n_u8(0x3);
  3995. const uint8x16_t m4 = vdupq_n_u8(0xF);
  3996. const int32x4_t vzero = vdupq_n_s32(0);
  3997. ggml_int8x16x2_t q2bytes;
  3998. uint8_t aux[16];
  3999. float sum = 0;
  4000. for (int i = 0; i < nb; ++i) {
  4001. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4002. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4003. const uint8_t * restrict q2 = x[i].qs;
  4004. const int8_t * restrict q8 = y[i].qs;
  4005. const uint8_t * restrict sc = x[i].scales;
  4006. const uint8x16_t mins_and_scales = vld1q_u8(sc);
  4007. const uint8x16_t scales = vandq_u8(mins_and_scales, m4);
  4008. vst1q_u8(aux, scales);
  4009. const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4);
  4010. const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
  4011. const ggml_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}};
  4012. const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])),
  4013. vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0])));
  4014. const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])),
  4015. vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1])));
  4016. sum += dmin * vaddvq_s32(vaddq_s32(s0, s1));
  4017. int isum = 0;
  4018. int is = 0;
  4019. // We use this macro instead of a function call because for some reason
  4020. // the code runs 2-3% slower, even if the function is declared inline
  4021. #define MULTIPLY_ACCUM_WITH_SCALE(index)\
  4022. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\
  4023. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)];
  4024. #define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\
  4025. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\
  4026. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\
  4027. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\
  4028. MULTIPLY_ACCUM_WITH_SCALE((index));
  4029. for (int j = 0; j < QK_K/128; ++j) {
  4030. const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32;
  4031. ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  4032. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3));
  4033. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3));
  4034. MULTIPLY_ACCUM_WITH_SCALE(0);
  4035. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2);
  4036. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4);
  4037. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6);
  4038. is += 8;
  4039. }
  4040. sum += d * isum;
  4041. }
  4042. *s = sum;
  4043. #elif defined __AVX2__
  4044. const __m256i m3 = _mm256_set1_epi8(3);
  4045. const __m128i m4 = _mm_set1_epi8(0xF);
  4046. __m256 acc = _mm256_setzero_ps();
  4047. for (int i = 0; i < nb; ++i) {
  4048. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4049. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4050. const uint8_t * restrict q2 = x[i].qs;
  4051. const int8_t * restrict q8 = y[i].qs;
  4052. const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  4053. const __m128i scales8 = _mm_and_si128(mins_and_scales, m4);
  4054. const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
  4055. const __m256i mins = _mm256_cvtepi8_epi16(mins8);
  4056. const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums));
  4057. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc);
  4058. const __m256i all_scales = _mm256_cvtepi8_epi16(scales8);
  4059. const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
  4060. const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
  4061. const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
  4062. __m256i sumi = _mm256_setzero_si256();
  4063. for (int j = 0; j < QK_K/128; ++j) {
  4064. const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32;
  4065. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4066. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4067. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4068. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4069. const __m256i q2_0 = _mm256_and_si256(q2bits, m3);
  4070. const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3);
  4071. const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3);
  4072. const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3);
  4073. __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
  4074. __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
  4075. __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2);
  4076. __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3);
  4077. p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0);
  4078. p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1);
  4079. p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2);
  4080. p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3);
  4081. p0 = _mm256_add_epi32(p0, p1);
  4082. p2 = _mm256_add_epi32(p2, p3);
  4083. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2));
  4084. }
  4085. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  4086. }
  4087. *s = hsum_float_8(acc);
  4088. #elif defined __AVX__
  4089. const __m128i m3 = _mm_set1_epi8(0x3);
  4090. const __m128i m4 = _mm_set1_epi8(0xF);
  4091. const __m128i m2 = _mm_set1_epi8(0x2);
  4092. __m256 acc = _mm256_setzero_ps();
  4093. for (int i = 0; i < nb; ++i) {
  4094. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4095. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4096. const uint8_t * restrict q2 = x[i].qs;
  4097. const int8_t * restrict q8 = y[i].qs;
  4098. // load mins and scales from block_q2_K.scales[QK_K/16]
  4099. const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  4100. const __m128i scales16 = _mm_and_si128(mins_and_scales, m4);
  4101. const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
  4102. const __m128i mins_0 = _mm_cvtepi8_epi16(mins16);
  4103. const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16));
  4104. // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2
  4105. const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0]));
  4106. const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8]));
  4107. // sumf += -dmin * summs in 32bits*8
  4108. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc);
  4109. const __m128i scales_0 = _mm_cvtepi8_epi16(scales16);
  4110. const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16));
  4111. const __m128i scales[2] = { scales_0, scales_1 };
  4112. __m128i sumi_0 = _mm_setzero_si128();
  4113. __m128i sumi_1 = _mm_setzero_si128();
  4114. for (int j = 0; j < QK_K/128; ++j) {
  4115. // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K]
  4116. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4117. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4118. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4119. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4120. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4121. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4122. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4123. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4124. // load 2bits*16*8 from block_q2_K.qs[QK_K/4]
  4125. __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
  4126. const __m128i q2_0 = _mm_and_si128(q2bits, m3);
  4127. const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  4128. const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  4129. const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  4130. q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
  4131. const __m128i q2_1 = _mm_and_si128(q2bits, m3);
  4132. const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  4133. const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  4134. const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  4135. // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8
  4136. __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0);
  4137. __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1);
  4138. __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2);
  4139. __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3);
  4140. __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4);
  4141. __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5);
  4142. __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6);
  4143. __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7);
  4144. // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8
  4145. __m128i shuffle = _mm_set1_epi16(0x0100);
  4146. p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0);
  4147. shuffle = _mm_add_epi16(shuffle, m2);
  4148. p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1);
  4149. shuffle = _mm_add_epi16(shuffle, m2);
  4150. p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2);
  4151. shuffle = _mm_add_epi16(shuffle, m2);
  4152. p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3);
  4153. shuffle = _mm_add_epi16(shuffle, m2);
  4154. p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4);
  4155. shuffle = _mm_add_epi16(shuffle, m2);
  4156. p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5);
  4157. shuffle = _mm_add_epi16(shuffle, m2);
  4158. p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6);
  4159. shuffle = _mm_add_epi16(shuffle, m2);
  4160. p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7);
  4161. p0 = _mm_add_epi32(p0, p1);
  4162. p2 = _mm_add_epi32(p2, p3);
  4163. p4 = _mm_add_epi32(p4, p5);
  4164. p6 = _mm_add_epi32(p6, p7);
  4165. // isum in 32bits*4*2
  4166. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2));
  4167. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6));
  4168. }
  4169. // sumf += dall * isum - dmin * summs in 32bits
  4170. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  4171. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc);
  4172. }
  4173. *s = hsum_float_8(acc);
  4174. #elif defined __riscv_v_intrinsic
  4175. float sumf = 0;
  4176. uint8_t temp_01[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  4177. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
  4178. for (int i = 0; i < nb; ++i) {
  4179. const uint8_t * q2 = x[i].qs;
  4180. const int8_t * q8 = y[i].qs;
  4181. const uint8_t * sc = x[i].scales;
  4182. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4183. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4184. size_t vl = 16;
  4185. vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl);
  4186. vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl);
  4187. vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl);
  4188. vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl);
  4189. vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl);
  4190. vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl));
  4191. vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl);
  4192. vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  4193. sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums);
  4194. vl = 32;
  4195. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  4196. vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl);
  4197. uint8_t is=0;
  4198. int isum=0;
  4199. for (int j = 0; j < QK_K/128; ++j) {
  4200. // load Q2
  4201. vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl);
  4202. vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl);
  4203. vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03 , vl);
  4204. vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03 , vl);
  4205. vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03 , vl);
  4206. // duplicate scale elements for product
  4207. vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0+is, vl), vl);
  4208. vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2+is, vl), vl);
  4209. vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4+is, vl), vl);
  4210. vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6+is, vl), vl);
  4211. vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl));
  4212. vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl));
  4213. vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl));
  4214. vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl));
  4215. // load Q8
  4216. vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
  4217. vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
  4218. vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8+64, vl);
  4219. vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8+96, vl);
  4220. vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl);
  4221. vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl);
  4222. vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl);
  4223. vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl);
  4224. vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl);
  4225. vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl);
  4226. isum += __riscv_vmv_x_s_i32m1_i32(isum1);
  4227. q2+=32; q8+=128; is=8;
  4228. }
  4229. sumf += dall * isum;
  4230. }
  4231. *s = sumf;
  4232. #else
  4233. float sumf = 0;
  4234. for (int i = 0; i < nb; ++i) {
  4235. const uint8_t * q2 = x[i].qs;
  4236. const int8_t * q8 = y[i].qs;
  4237. const uint8_t * sc = x[i].scales;
  4238. int summs = 0;
  4239. for (int j = 0; j < 16; ++j) {
  4240. summs += y[i].bsums[j] * (sc[j] >> 4);
  4241. }
  4242. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4243. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4244. int isum = 0;
  4245. int is = 0;
  4246. int d;
  4247. for (int k = 0; k < QK_K/128; ++k) {
  4248. int shift = 0;
  4249. for (int j = 0; j < 4; ++j) {
  4250. d = sc[is++] & 0xF;
  4251. int isuml = 0;
  4252. for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
  4253. isum += d * isuml;
  4254. d = sc[is++] & 0xF;
  4255. isuml = 0;
  4256. for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
  4257. isum += d * isuml;
  4258. shift += 2;
  4259. q8 += 32;
  4260. }
  4261. q2 += 32;
  4262. }
  4263. sumf += dall * isum - dmin * summs;
  4264. }
  4265. *s = sumf;
  4266. #endif
  4267. }
  4268. #else
  4269. void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  4270. const block_q2_K * restrict x = vx;
  4271. const block_q8_K * restrict y = vy;
  4272. const int nb = n / QK_K;
  4273. #ifdef __ARM_NEON
  4274. const uint8x16_t m3 = vdupq_n_u8(0x3);
  4275. const int32x4_t vzero = vdupq_n_s32(0);
  4276. ggml_int8x16x4_t q2bytes;
  4277. uint32_t aux32[2];
  4278. const uint8_t * scales = (const uint8_t *)aux32;
  4279. float sum = 0;
  4280. for (int i = 0; i < nb; ++i) {
  4281. const float d = y[i].d * (float)x[i].d;
  4282. const float dmin = -y[i].d * (float)x[i].dmin;
  4283. const uint8_t * restrict q2 = x[i].qs;
  4284. const int8_t * restrict q8 = y[i].qs;
  4285. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4286. aux32[0] = sc[0] & 0x0f0f0f0f;
  4287. aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
  4288. sum += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
  4289. int isum1 = 0, isum2 = 0;
  4290. const uint8x16_t q2bits = vld1q_u8(q2);
  4291. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  4292. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits, m3));
  4293. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 2), m3));
  4294. q2bytes.val[2] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 4), m3));
  4295. q2bytes.val[3] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 6), m3));
  4296. isum1 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * scales[0];
  4297. isum2 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * scales[1];
  4298. isum1 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[2], q8bytes.val[2])) * scales[2];
  4299. isum2 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[3], q8bytes.val[3])) * scales[3];
  4300. sum += d * (isum1 + isum2);
  4301. }
  4302. *s = sum;
  4303. #elif defined __AVX2__
  4304. const __m256i m3 = _mm256_set1_epi8(3);
  4305. __m256 acc = _mm256_setzero_ps();
  4306. uint32_t ud, um;
  4307. const uint8_t * restrict db = (const uint8_t *)&ud;
  4308. const uint8_t * restrict mb = (const uint8_t *)&um;
  4309. float summs = 0;
  4310. // TODO: optimize this
  4311. for (int i = 0; i < nb; ++i) {
  4312. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4313. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4314. const uint8_t * restrict q2 = x[i].qs;
  4315. const int8_t * restrict q8 = y[i].qs;
  4316. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4317. ud = (sc[0] >> 0) & 0x0f0f0f0f;
  4318. um = (sc[0] >> 4) & 0x0f0f0f0f;
  4319. int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
  4320. summs += dmin * smin;
  4321. const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
  4322. const __m256i q2_0 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 2), q2bits), m3);
  4323. const __m256i q2_1 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 6), _mm_srli_epi16(q2bits, 4)), m3);
  4324. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  4325. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  4326. const __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
  4327. const __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
  4328. const __m256i p_0 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 0));
  4329. const __m256i p_1 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 1));
  4330. const __m256i p_2 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 0));
  4331. const __m256i p_3 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 1));
  4332. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0), acc);
  4333. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1), acc);
  4334. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2), acc);
  4335. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3), acc);
  4336. }
  4337. *s = hsum_float_8(acc) + summs;
  4338. #elif defined __AVX__
  4339. const __m128i m3 = _mm_set1_epi8(3);
  4340. __m256 acc = _mm256_setzero_ps();
  4341. uint32_t ud, um;
  4342. const uint8_t * restrict db = (const uint8_t *)&ud;
  4343. const uint8_t * restrict mb = (const uint8_t *)&um;
  4344. float summs = 0;
  4345. // TODO: optimize this
  4346. for (int i = 0; i < nb; ++i) {
  4347. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4348. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4349. const uint8_t * restrict q2 = x[i].qs;
  4350. const int8_t * restrict q8 = y[i].qs;
  4351. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4352. ud = (sc[0] >> 0) & 0x0f0f0f0f;
  4353. um = (sc[0] >> 4) & 0x0f0f0f0f;
  4354. int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
  4355. summs += dmin * smin;
  4356. const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
  4357. const __m128i q2_0 = _mm_and_si128(q2bits, m3);
  4358. const __m128i q2_1 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  4359. const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  4360. const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  4361. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  4362. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  4363. const __m128i p0 = _mm_maddubs_epi16(q2_0, _mm256_extractf128_si256(q8_0, 0));
  4364. const __m128i p1 = _mm_maddubs_epi16(q2_1, _mm256_extractf128_si256(q8_0, 1));
  4365. const __m128i p2 = _mm_maddubs_epi16(q2_2, _mm256_extractf128_si256(q8_1, 0));
  4366. const __m128i p3 = _mm_maddubs_epi16(q2_3, _mm256_extractf128_si256(q8_1, 1));
  4367. const __m256i p_0 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p0, p0)), _mm_cvtepi16_epi32(p0));
  4368. const __m256i p_1 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p1, p1)), _mm_cvtepi16_epi32(p1));
  4369. const __m256i p_2 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p2, p2)), _mm_cvtepi16_epi32(p2));
  4370. const __m256i p_3 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p3, p3)), _mm_cvtepi16_epi32(p3));
  4371. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0)), acc);
  4372. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1)), acc);
  4373. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2)), acc);
  4374. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3)), acc);
  4375. }
  4376. *s = hsum_float_8(acc) + summs;
  4377. #elif defined __riscv_v_intrinsic
  4378. uint32_t aux32[2];
  4379. const uint8_t * scales = (const uint8_t *)aux32;
  4380. float sumf = 0;
  4381. for (int i = 0; i < nb; ++i) {
  4382. const float d = y[i].d * (float)x[i].d;
  4383. const float dmin = -y[i].d * (float)x[i].dmin;
  4384. const uint8_t * restrict q2 = x[i].qs;
  4385. const int8_t * restrict q8 = y[i].qs;
  4386. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4387. aux32[0] = sc[0] & 0x0f0f0f0f;
  4388. aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
  4389. sumf += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
  4390. int isum1 = 0;
  4391. int isum2 = 0;
  4392. size_t vl = 16;
  4393. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  4394. // load Q2
  4395. vuint8mf2_t q2_x = __riscv_vle8_v_u8mf2(q2, vl);
  4396. vint8mf2_t q2_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q2_x, 0x03, vl));
  4397. vint8mf2_t q2_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x2, vl), 0x03 , vl));
  4398. vint8mf2_t q2_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x4, vl), 0x03 , vl));
  4399. vint8mf2_t q2_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x6, vl), 0x03 , vl));
  4400. // load Q8, and take product with Q2
  4401. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q2_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  4402. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q2_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  4403. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q2_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  4404. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q2_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  4405. vint16m1_t vs_0 = __riscv_vredsum_vs_i16m1_i16m1(p0, vzero, vl);
  4406. vint16m1_t vs_1 = __riscv_vredsum_vs_i16m1_i16m1(p1, vzero, vl);
  4407. vint16m1_t vs_2 = __riscv_vredsum_vs_i16m1_i16m1(p2, vzero, vl);
  4408. vint16m1_t vs_3 = __riscv_vredsum_vs_i16m1_i16m1(p3, vzero, vl);
  4409. isum1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[0];
  4410. isum2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[1];
  4411. isum1 += __riscv_vmv_x_s_i16m1_i16(vs_2) * scales[2];
  4412. isum2 += __riscv_vmv_x_s_i16m1_i16(vs_3) * scales[3];
  4413. sumf += d * (isum1 + isum2);
  4414. }
  4415. *s = sumf;
  4416. #else
  4417. float sumf = 0;
  4418. int isum[4];
  4419. for (int i = 0; i < nb; ++i) {
  4420. const uint8_t * q2 = x[i].qs;
  4421. const int8_t * q8 = y[i].qs;
  4422. const uint8_t * sc = x[i].scales;
  4423. int summs = 0;
  4424. for (int j = 0; j < QK_K/16; ++j) {
  4425. summs += y[i].bsums[j] * (sc[j] >> 4);
  4426. }
  4427. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4428. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4429. isum[0] = isum[1] = isum[2] = isum[3] = 0;
  4430. for (int l = 0; l < 16; ++l) {
  4431. isum[0] += q8[l+ 0] * ((q2[l] >> 0) & 3);
  4432. isum[1] += q8[l+16] * ((q2[l] >> 2) & 3);
  4433. isum[2] += q8[l+32] * ((q2[l] >> 4) & 3);
  4434. isum[3] += q8[l+48] * ((q2[l] >> 6) & 3);
  4435. }
  4436. for (int l = 0; l < 4; ++l) {
  4437. isum[l] *= (sc[l] & 0xF);
  4438. }
  4439. sumf += dall * (isum[0] + isum[1] + isum[2] + isum[3]) - dmin * summs;
  4440. }
  4441. *s = sumf;
  4442. #endif
  4443. }
  4444. #endif
  4445. #if QK_K == 256
  4446. void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  4447. assert(n % QK_K == 0);
  4448. const uint32_t kmask1 = 0x03030303;
  4449. const uint32_t kmask2 = 0x0f0f0f0f;
  4450. const block_q3_K * restrict x = vx;
  4451. const block_q8_K * restrict y = vy;
  4452. const int nb = n / QK_K;
  4453. #ifdef __ARM_NEON
  4454. uint32_t aux[3];
  4455. uint32_t utmp[4];
  4456. const uint8x16_t m3b = vdupq_n_u8(0x3);
  4457. const int32x4_t vzero = vdupq_n_s32(0);
  4458. const uint8x16_t m0 = vdupq_n_u8(1);
  4459. const uint8x16_t m1 = vshlq_n_u8(m0, 1);
  4460. const uint8x16_t m2 = vshlq_n_u8(m0, 2);
  4461. const uint8x16_t m3 = vshlq_n_u8(m0, 3);
  4462. const int8_t m32 = 32;
  4463. ggml_int8x16x4_t q3bytes;
  4464. float sum = 0;
  4465. for (int i = 0; i < nb; ++i) {
  4466. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4467. const uint8_t * restrict q3 = x[i].qs;
  4468. const uint8_t * restrict qh = x[i].hmask;
  4469. const int8_t * restrict q8 = y[i].qs;
  4470. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
  4471. ggml_uint8x16x4_t q3h;
  4472. int32_t isum = 0;
  4473. // Set up scales
  4474. memcpy(aux, x[i].scales, 12);
  4475. utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
  4476. utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
  4477. utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
  4478. utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
  4479. int8_t * scale = (int8_t *)utmp;
  4480. for (int j = 0; j < 16; ++j) scale[j] -= m32;
  4481. for (int j = 0; j < QK_K/128; ++j) {
  4482. const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32;
  4483. const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64;
  4484. const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64;
  4485. q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2);
  4486. q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2);
  4487. q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1);
  4488. q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1);
  4489. q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0]));
  4490. q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1]));
  4491. q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
  4492. q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
  4493. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0];
  4494. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1];
  4495. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2];
  4496. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3];
  4497. scale += 4;
  4498. q3h.val[0] = vbicq_u8(m2, qhbits.val[0]);
  4499. q3h.val[1] = vbicq_u8(m2, qhbits.val[1]);
  4500. q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1);
  4501. q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1);
  4502. q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0]));
  4503. q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1]));
  4504. q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
  4505. q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
  4506. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0];
  4507. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1];
  4508. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2];
  4509. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3];
  4510. scale += 4;
  4511. if (j == 0) {
  4512. qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4);
  4513. qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4);
  4514. }
  4515. }
  4516. sum += d * isum;
  4517. }
  4518. *s = sum;
  4519. #elif defined __AVX2__
  4520. const __m256i m3 = _mm256_set1_epi8(3);
  4521. const __m256i mone = _mm256_set1_epi8(1);
  4522. const __m128i m32 = _mm_set1_epi8(32);
  4523. __m256 acc = _mm256_setzero_ps();
  4524. uint32_t aux[3];
  4525. for (int i = 0; i < nb; ++i) {
  4526. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4527. const uint8_t * restrict q3 = x[i].qs;
  4528. const int8_t * restrict q8 = y[i].qs;
  4529. // Set up scales
  4530. memcpy(aux, x[i].scales, 12);
  4531. __m128i scales128 = _mm_set_epi32(
  4532. ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
  4533. ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
  4534. (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
  4535. (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
  4536. scales128 = _mm_sub_epi8(scales128, m32);
  4537. const __m256i all_scales = _mm256_cvtepi8_epi16(scales128);
  4538. const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
  4539. const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
  4540. const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
  4541. // high bit
  4542. const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask);
  4543. // integer accumulator
  4544. __m256i sumi = _mm256_setzero_si256();
  4545. int bit = 0;
  4546. int is = 0;
  4547. for (int j = 0; j < QK_K/128; ++j) {
  4548. // load low 2 bits
  4549. const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32;
  4550. // prepare low and high bits
  4551. const __m256i q3l_0 = _mm256_and_si256(q3bits, m3);
  4552. const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4553. ++bit;
  4554. const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3);
  4555. const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4556. ++bit;
  4557. const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3);
  4558. const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4559. ++bit;
  4560. const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3);
  4561. const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4562. ++bit;
  4563. // load Q8 quants
  4564. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4565. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4566. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4567. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4568. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  4569. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  4570. // and 2 if the high bit was set)
  4571. __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
  4572. __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
  4573. __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2);
  4574. __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3);
  4575. __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
  4576. __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
  4577. __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2);
  4578. __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3);
  4579. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  4580. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  4581. p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
  4582. p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
  4583. // multiply with scales
  4584. p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0);
  4585. p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1);
  4586. p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2);
  4587. p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3);
  4588. // accumulate
  4589. p16_0 = _mm256_add_epi32(p16_0, p16_1);
  4590. p16_2 = _mm256_add_epi32(p16_2, p16_3);
  4591. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2));
  4592. }
  4593. // multiply with block scale and accumulate
  4594. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  4595. }
  4596. *s = hsum_float_8(acc);
  4597. #elif defined __AVX__
  4598. const __m128i m3 = _mm_set1_epi8(3);
  4599. const __m128i mone = _mm_set1_epi8(1);
  4600. const __m128i m32 = _mm_set1_epi8(32);
  4601. const __m128i m2 = _mm_set1_epi8(2);
  4602. __m256 acc = _mm256_setzero_ps();
  4603. const uint32_t *aux;
  4604. for (int i = 0; i < nb; ++i) {
  4605. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4606. const uint8_t * restrict q3 = x[i].qs;
  4607. const int8_t * restrict q8 = y[i].qs;
  4608. // Set up scales
  4609. aux = (const uint32_t *)x[i].scales;
  4610. __m128i scales128 = _mm_set_epi32(
  4611. ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
  4612. ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
  4613. (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
  4614. (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
  4615. scales128 = _mm_sub_epi8(scales128, m32);
  4616. const __m128i scales_0 = _mm_cvtepi8_epi16(scales128);
  4617. const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128));
  4618. const __m128i scales[2] = { scales_0, scales_1 };
  4619. // high bit *128*2 from block_q3_K.hmask[QK_K/8]
  4620. const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]);
  4621. const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]);
  4622. // integer accumulator
  4623. __m128i sumi_0 = _mm_setzero_si128();
  4624. __m128i sumi_1 = _mm_setzero_si128();
  4625. for (int j = 0; j < QK_K/128; ++j) {
  4626. // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4]
  4627. const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
  4628. const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
  4629. // prepare low and high bits
  4630. const int bit = j << 2;
  4631. const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3);
  4632. const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3);
  4633. const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2);
  4634. const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2);
  4635. const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3);
  4636. const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3);
  4637. const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
  4638. const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
  4639. const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3);
  4640. const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3);
  4641. const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
  4642. const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
  4643. const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3);
  4644. const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3);
  4645. const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
  4646. const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
  4647. // load Q8 quants from block_q8_K.qs[QK_K]
  4648. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4649. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4650. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4651. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4652. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4653. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4654. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4655. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4656. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  4657. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  4658. // and 2 if the high bit was set)
  4659. __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0);
  4660. __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1);
  4661. __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2);
  4662. __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3);
  4663. __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4);
  4664. __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5);
  4665. __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6);
  4666. __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7);
  4667. __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0);
  4668. __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1);
  4669. __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2);
  4670. __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3);
  4671. __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4);
  4672. __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5);
  4673. __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6);
  4674. __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7);
  4675. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  4676. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  4677. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  4678. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  4679. p16_4 = _mm_sub_epi16(p16_4, q8s_4);
  4680. p16_5 = _mm_sub_epi16(p16_5, q8s_5);
  4681. p16_6 = _mm_sub_epi16(p16_6, q8s_6);
  4682. p16_7 = _mm_sub_epi16(p16_7, q8s_7);
  4683. // multiply with scales
  4684. __m128i shuffle = _mm_set1_epi16(0x0100);
  4685. p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0);
  4686. shuffle = _mm_add_epi16(shuffle, m2);
  4687. p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1);
  4688. shuffle = _mm_add_epi16(shuffle, m2);
  4689. p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2);
  4690. shuffle = _mm_add_epi16(shuffle, m2);
  4691. p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3);
  4692. shuffle = _mm_add_epi16(shuffle, m2);
  4693. p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4);
  4694. shuffle = _mm_add_epi16(shuffle, m2);
  4695. p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5);
  4696. shuffle = _mm_add_epi16(shuffle, m2);
  4697. p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6);
  4698. shuffle = _mm_add_epi16(shuffle, m2);
  4699. p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7);
  4700. // accumulate
  4701. p16_0 = _mm_add_epi32(p16_0, p16_1);
  4702. p16_2 = _mm_add_epi32(p16_2, p16_3);
  4703. p16_4 = _mm_add_epi32(p16_4, p16_5);
  4704. p16_6 = _mm_add_epi32(p16_6, p16_7);
  4705. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  4706. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6));
  4707. }
  4708. // multiply with block scale and accumulate
  4709. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  4710. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
  4711. }
  4712. *s = hsum_float_8(acc);
  4713. #elif defined __riscv_v_intrinsic
  4714. uint32_t aux[3];
  4715. uint32_t utmp[4];
  4716. float sumf = 0;
  4717. for (int i = 0; i < nb; ++i) {
  4718. const uint8_t * restrict q3 = x[i].qs;
  4719. const uint8_t * restrict qh = x[i].hmask;
  4720. const int8_t * restrict q8 = y[i].qs;
  4721. memcpy(aux, x[i].scales, 12);
  4722. utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
  4723. utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
  4724. utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
  4725. utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
  4726. int8_t * scale = (int8_t *)utmp;
  4727. for (int j = 0; j < 16; ++j) scale[j] -= 32;
  4728. size_t vl = 32;
  4729. uint8_t m = 1;
  4730. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  4731. vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl);
  4732. int sum_t = 0;
  4733. for (int j = 0; j < QK_K; j += 128) {
  4734. vl = 32;
  4735. // load Q3
  4736. vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl);
  4737. vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl));
  4738. vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03 , vl));
  4739. vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03 , vl));
  4740. vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03 , vl));
  4741. // compute mask for subtraction
  4742. vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl);
  4743. vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl);
  4744. vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_m(vmask_0, q3_0, 0x4, vl);
  4745. m <<= 1;
  4746. vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
  4747. vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl);
  4748. vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_m(vmask_1, q3_1, 0x4, vl);
  4749. m <<= 1;
  4750. vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
  4751. vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl);
  4752. vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_m(vmask_2, q3_2, 0x4, vl);
  4753. m <<= 1;
  4754. vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl);
  4755. vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl);
  4756. vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_m(vmask_3, q3_3, 0x4, vl);
  4757. m <<= 1;
  4758. // load Q8 and take product with Q3
  4759. vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl);
  4760. vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  4761. vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
  4762. vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
  4763. vl = 16;
  4764. // retrieve lane to multiply with scale
  4765. vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl);
  4766. vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl);
  4767. vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl);
  4768. vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl);
  4769. vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl);
  4770. vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl);
  4771. vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl);
  4772. vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl);
  4773. vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl);
  4774. vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl);
  4775. vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl);
  4776. vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl);
  4777. sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
  4778. q3 += 32; q8 += 128; scale += 8;
  4779. }
  4780. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  4781. sumf += d*sum_t;
  4782. }
  4783. *s = sumf;
  4784. #else
  4785. // scalar version
  4786. // This function is written like this so the compiler can manage to vectorize most of it
  4787. // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
  4788. // manually vectorized version above. Every other version I tried would run at least 4 times slower.
  4789. // The ideal situation would be if we could just write the code once, and the compiler would
  4790. // automatically produce the best possible set of machine instructions, instead of us having to manually
  4791. // write vectorized versions for AVX, ARM_NEON, etc.
  4792. int8_t aux8[QK_K];
  4793. int16_t aux16[8];
  4794. float sums [8];
  4795. int32_t aux32[8];
  4796. memset(sums, 0, 8*sizeof(float));
  4797. uint32_t auxs[4];
  4798. const int8_t * scales = (const int8_t*)auxs;
  4799. float sumf = 0;
  4800. for (int i = 0; i < nb; ++i) {
  4801. const uint8_t * restrict q3 = x[i].qs;
  4802. const uint8_t * restrict hm = x[i].hmask;
  4803. const int8_t * restrict q8 = y[i].qs;
  4804. memset(aux32, 0, 8*sizeof(int32_t));
  4805. int8_t * restrict a = aux8;
  4806. uint8_t m = 1;
  4807. for (int j = 0; j < QK_K; j += 128) {
  4808. for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
  4809. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  4810. a += 32; m <<= 1;
  4811. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
  4812. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  4813. a += 32; m <<= 1;
  4814. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
  4815. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  4816. a += 32; m <<= 1;
  4817. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
  4818. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  4819. a += 32; m <<= 1;
  4820. q3 += 32;
  4821. }
  4822. a = aux8;
  4823. memcpy(auxs, x[i].scales, 12);
  4824. uint32_t tmp = auxs[2];
  4825. auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
  4826. auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
  4827. auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
  4828. auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
  4829. for (int j = 0; j < QK_K/16; ++j) {
  4830. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  4831. for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
  4832. q8 += 8; a += 8;
  4833. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  4834. for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
  4835. q8 += 8; a += 8;
  4836. }
  4837. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  4838. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  4839. }
  4840. for (int l = 0; l < 8; ++l) sumf += sums[l];
  4841. *s = sumf;
  4842. #endif
  4843. }
  4844. #else
  4845. void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  4846. assert(n % QK_K == 0);
  4847. const block_q3_K * restrict x = vx;
  4848. const block_q8_K * restrict y = vy;
  4849. const int nb = n / QK_K;
  4850. #ifdef __ARM_NEON
  4851. const int32x4_t vzero = vdupq_n_s32(0);
  4852. const uint8x16_t m3b = vdupq_n_u8(0x3);
  4853. const uint8x16_t mh = vdupq_n_u8(4);
  4854. ggml_int8x16x4_t q3bytes;
  4855. uint16_t aux16[2];
  4856. int8_t * scales = (int8_t *)aux16;
  4857. float sum = 0;
  4858. for (int i = 0; i < nb; ++i) {
  4859. ggml_uint8x16x4_t q3h;
  4860. const uint8x8_t hbits = vld1_u8(x[i].hmask);
  4861. const uint8x16_t q3bits = vld1q_u8(x[i].qs);
  4862. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(y[i].qs);
  4863. const uint16_t a = *(const uint16_t *)x[i].scales;
  4864. aux16[0] = a & 0x0f0f;
  4865. aux16[1] = (a >> 4) & 0x0f0f;
  4866. for (int j = 0; j < 4; ++j) scales[j] -= 8;
  4867. int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
  4868. const float d = y[i].d * (float)x[i].d;
  4869. const uint8x16_t htmp = vcombine_u8(hbits, vshr_n_u8(hbits, 1));
  4870. q3h.val[0] = vandq_u8(mh, vshlq_n_u8(htmp, 2));
  4871. q3h.val[1] = vandq_u8(mh, htmp);
  4872. q3h.val[2] = vandq_u8(mh, vshrq_n_u8(htmp, 2));
  4873. q3h.val[3] = vandq_u8(mh, vshrq_n_u8(htmp, 4));
  4874. q3bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q3bits, m3b), q3h.val[0]));
  4875. q3bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 2), m3b), q3h.val[1]));
  4876. q3bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 4), m3b), q3h.val[2]));
  4877. q3bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q3bits, 6), q3h.val[3]));
  4878. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes.val[0])) * scales[0];
  4879. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes.val[1])) * scales[2];
  4880. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes.val[2])) * scales[1];
  4881. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes.val[3])) * scales[3];
  4882. sum += d * isum;
  4883. }
  4884. *s = sum;
  4885. #elif defined __AVX2__
  4886. const __m256i m3 = _mm256_set1_epi8(3);
  4887. const __m256i m1 = _mm256_set1_epi8(1);
  4888. __m256 acc = _mm256_setzero_ps();
  4889. uint64_t aux64;
  4890. uint16_t aux16[2];
  4891. const int8_t * aux8 = (const int8_t *)aux16;
  4892. for (int i = 0; i < nb; ++i) {
  4893. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4894. const uint8_t * restrict q3 = x[i].qs;
  4895. const int8_t * restrict q8 = y[i].qs;
  4896. const uint16_t a = *(const uint16_t *)x[i].scales;
  4897. aux16[0] = a & 0x0f0f;
  4898. aux16[1] = (a >> 4) & 0x0f0f;
  4899. const __m256i scale_0 = MM256_SET_M128I(_mm_set1_epi16(aux8[2] - 8), _mm_set1_epi16(aux8[0] - 8));
  4900. const __m256i scale_1 = MM256_SET_M128I(_mm_set1_epi16(aux8[3] - 8), _mm_set1_epi16(aux8[1] - 8));
  4901. memcpy(&aux64, x[i].hmask, 8);
  4902. const __m128i haux = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
  4903. __m256i q3h_0 = MM256_SET_M128I(_mm_srli_epi16(haux, 2), haux);
  4904. __m256i q3h_1 = _mm256_srli_epi16(q3h_0, 4);
  4905. q3h_0 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_0, m1), 2);
  4906. q3h_1 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_1, m1), 2);
  4907. // load low 2 bits
  4908. const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
  4909. // prepare low and high bits
  4910. const __m256i q3aux = MM256_SET_M128I(_mm_srli_epi16(q3bits, 2), q3bits);
  4911. const __m256i q3l_0 = _mm256_and_si256(q3aux, m3);
  4912. const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3aux, 4), m3);
  4913. // load Q8 quants
  4914. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  4915. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  4916. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  4917. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  4918. // and 2 if the high bit was set)
  4919. const __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
  4920. const __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
  4921. __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
  4922. __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
  4923. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  4924. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  4925. // multiply with scales
  4926. p16_0 = _mm256_madd_epi16(scale_0, p16_0);
  4927. p16_1 = _mm256_madd_epi16(scale_1, p16_1);
  4928. p16_0 = _mm256_add_epi32(p16_0, p16_1);
  4929. // multiply with block scale and accumulate
  4930. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16_0), acc);
  4931. }
  4932. *s = hsum_float_8(acc);
  4933. #elif defined __AVX__
  4934. const __m128i m3 = _mm_set1_epi8(3);
  4935. const __m128i m1 = _mm_set1_epi8(1);
  4936. __m256 acc = _mm256_setzero_ps();
  4937. uint64_t aux64;
  4938. uint16_t aux16[2];
  4939. const int8_t * aux8 = (const int8_t *)aux16;
  4940. for (int i = 0; i < nb; ++i) {
  4941. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4942. const uint8_t * restrict q3 = x[i].qs;
  4943. const int8_t * restrict q8 = y[i].qs;
  4944. const uint16_t a = *(const uint16_t *)x[i].scales;
  4945. aux16[0] = a & 0x0f0f;
  4946. aux16[1] = (a >> 4) & 0x0f0f;
  4947. const __m128i scale_0 = _mm_set1_epi16(aux8[0] - 8);
  4948. const __m128i scale_1 = _mm_set1_epi16(aux8[2] - 8);
  4949. const __m128i scale_2 = _mm_set1_epi16(aux8[1] - 8);
  4950. const __m128i scale_3 = _mm_set1_epi16(aux8[3] - 8);
  4951. memcpy(&aux64, x[i].hmask, 8);
  4952. __m128i q3h_0 = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
  4953. __m128i q3h_1 = _mm_srli_epi16(q3h_0, 2);
  4954. __m128i q3h_2 = _mm_srli_epi16(q3h_0, 4);
  4955. __m128i q3h_3 = _mm_srli_epi16(q3h_0, 6);
  4956. q3h_0 = _mm_slli_epi16(_mm_andnot_si128(q3h_0, m1), 2);
  4957. q3h_1 = _mm_slli_epi16(_mm_andnot_si128(q3h_1, m1), 2);
  4958. q3h_2 = _mm_slli_epi16(_mm_andnot_si128(q3h_2, m1), 2);
  4959. q3h_3 = _mm_slli_epi16(_mm_andnot_si128(q3h_3, m1), 2);
  4960. // load low 2 bits
  4961. const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
  4962. // prepare low and high bits
  4963. const __m128i q3l_0 = _mm_and_si128(q3bits, m3);
  4964. const __m128i q3l_1 = _mm_and_si128(_mm_srli_epi16(q3bits, 2), m3);
  4965. const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits, 4), m3);
  4966. const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits, 6), m3);
  4967. // load Q8 quants
  4968. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  4969. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  4970. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm_maddubs_epi16,
  4971. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  4972. // and 2 if the high bit was set)
  4973. const __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, _mm256_extractf128_si256(q8_0, 0));
  4974. const __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, _mm256_extractf128_si256(q8_0, 1));
  4975. const __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, _mm256_extractf128_si256(q8_1, 0));
  4976. const __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, _mm256_extractf128_si256(q8_1, 1));
  4977. __m128i p16_0 = _mm_maddubs_epi16(q3l_0, _mm256_extractf128_si256(q8_0, 0));
  4978. __m128i p16_1 = _mm_maddubs_epi16(q3l_1, _mm256_extractf128_si256(q8_0, 1));
  4979. __m128i p16_2 = _mm_maddubs_epi16(q3l_2, _mm256_extractf128_si256(q8_1, 0));
  4980. __m128i p16_3 = _mm_maddubs_epi16(q3l_3, _mm256_extractf128_si256(q8_1, 1));
  4981. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  4982. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  4983. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  4984. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  4985. // multiply with scales
  4986. p16_0 = _mm_madd_epi16(scale_0, p16_0);
  4987. p16_1 = _mm_madd_epi16(scale_1, p16_1);
  4988. p16_2 = _mm_madd_epi16(scale_2, p16_2);
  4989. p16_3 = _mm_madd_epi16(scale_3, p16_3);
  4990. p16_0 = _mm_add_epi32(p16_0, p16_2);
  4991. p16_1 = _mm_add_epi32(p16_1, p16_3);
  4992. __m256i p16 = MM256_SET_M128I(p16_1, p16_0);
  4993. // multiply with block scale and accumulate
  4994. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16)), acc);
  4995. }
  4996. *s = hsum_float_8(acc);
  4997. #elif defined __riscv_v_intrinsic
  4998. uint16_t aux16[2];
  4999. int8_t * scales = (int8_t *)aux16;
  5000. float sumf = 0;
  5001. for (int i = 0; i < nb; ++i) {
  5002. const uint8_t * restrict q3 = x[i].qs;
  5003. const int8_t * restrict q8 = y[i].qs;
  5004. const uint16_t a = *(const uint16_t *)x[i].scales;
  5005. aux16[0] = a & 0x0f0f;
  5006. aux16[1] = (a >> 4) & 0x0f0f;
  5007. for (int j = 0; j < 4; ++j) scales[j] -= 8;
  5008. int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
  5009. const float d = y[i].d * (float)x[i].d;
  5010. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  5011. // load qh
  5012. vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(x[i].hmask, 8);
  5013. vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
  5014. size_t vl = 16;
  5015. // extend and combine both qh_x1 and qh_x2
  5016. vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
  5017. vuint8mf2_t qh_0 = __riscv_vand_vx_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
  5018. vuint8mf2_t qh_1 = __riscv_vand_vx_u8mf2(qh_x, 0x4, vl);
  5019. vuint8mf2_t qh_2 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
  5020. vuint8mf2_t qh_3 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), 0x4, vl);
  5021. // load Q3
  5022. vuint8mf2_t q3_x = __riscv_vle8_v_u8mf2(q3, vl);
  5023. vuint8mf2_t q3h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q3_x, 0x3, vl), qh_0, vl);
  5024. vuint8mf2_t q3h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 2, vl), 0x3, vl), qh_1, vl);
  5025. vuint8mf2_t q3h_2 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 4, vl), 0x3, vl), qh_2, vl);
  5026. vuint8mf2_t q3h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 0x6, vl), qh_3, vl);
  5027. vint8mf2_t q3_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_0);
  5028. vint8mf2_t q3_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_1);
  5029. vint8mf2_t q3_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_2);
  5030. vint8mf2_t q3_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_3);
  5031. // load Q8 and take product with Q3
  5032. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q3_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  5033. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q3_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  5034. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q3_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  5035. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q3_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  5036. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  5037. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  5038. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  5039. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  5040. isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scales[0];
  5041. isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scales[2];
  5042. isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scales[1];
  5043. isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scales[3];
  5044. sumf += d * isum;
  5045. }
  5046. *s = sumf;
  5047. #else
  5048. int8_t aux8[QK_K];
  5049. int16_t aux16[8];
  5050. float sums [8];
  5051. int32_t aux32[8];
  5052. int32_t scales[4];
  5053. memset(sums, 0, 8*sizeof(float));
  5054. float sumf = 0;
  5055. for (int i = 0; i < nb; ++i) {
  5056. const uint8_t * restrict q3 = x[i].qs;
  5057. const uint8_t * restrict hm = x[i].hmask;
  5058. const int8_t * restrict q8 = y[i].qs;
  5059. int8_t * restrict a = aux8;
  5060. for (int l = 0; l < 8; ++l) {
  5061. a[l+ 0] = (int8_t)((q3[l+0] >> 0) & 3) - (hm[l] & 0x01 ? 0 : 4);
  5062. a[l+ 8] = (int8_t)((q3[l+8] >> 0) & 3) - (hm[l] & 0x02 ? 0 : 4);
  5063. a[l+16] = (int8_t)((q3[l+0] >> 2) & 3) - (hm[l] & 0x04 ? 0 : 4);
  5064. a[l+24] = (int8_t)((q3[l+8] >> 2) & 3) - (hm[l] & 0x08 ? 0 : 4);
  5065. a[l+32] = (int8_t)((q3[l+0] >> 4) & 3) - (hm[l] & 0x10 ? 0 : 4);
  5066. a[l+40] = (int8_t)((q3[l+8] >> 4) & 3) - (hm[l] & 0x20 ? 0 : 4);
  5067. a[l+48] = (int8_t)((q3[l+0] >> 6) & 3) - (hm[l] & 0x40 ? 0 : 4);
  5068. a[l+56] = (int8_t)((q3[l+8] >> 6) & 3) - (hm[l] & 0x80 ? 0 : 4);
  5069. }
  5070. scales[0] = (x[i].scales[0] & 0xF) - 8;
  5071. scales[1] = (x[i].scales[0] >> 4) - 8;
  5072. scales[2] = (x[i].scales[1] & 0xF) - 8;
  5073. scales[3] = (x[i].scales[1] >> 4) - 8;
  5074. memset(aux32, 0, 8*sizeof(int32_t));
  5075. for (int j = 0; j < QK_K/16; ++j) {
  5076. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5077. q8 += 8; a += 8;
  5078. for (int l = 0; l < 8; ++l) aux16[l] += q8[l] * a[l];
  5079. q8 += 8; a += 8;
  5080. for (int l = 0; l < 8; ++l) aux32[l] += scales[j] * aux16[l];
  5081. }
  5082. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5083. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  5084. }
  5085. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5086. *s = sumf;
  5087. #endif
  5088. }
  5089. #endif
  5090. #if QK_K == 256
  5091. void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  5092. assert(n % QK_K == 0);
  5093. const block_q4_K * restrict x = vx;
  5094. const block_q8_K * restrict y = vy;
  5095. const int nb = n / QK_K;
  5096. static const uint32_t kmask1 = 0x3f3f3f3f;
  5097. static const uint32_t kmask2 = 0x0f0f0f0f;
  5098. static const uint32_t kmask3 = 0x03030303;
  5099. uint32_t utmp[4];
  5100. #ifdef __ARM_NEON
  5101. const uint8x16_t m4b = vdupq_n_u8(0xf);
  5102. const int32x4_t mzero = vdupq_n_s32(0);
  5103. ggml_int8x16x2_t q4bytes;
  5104. ggml_int8x16x2_t q8bytes;
  5105. float sumf = 0;
  5106. for (int i = 0; i < nb; ++i) {
  5107. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5108. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5109. const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
  5110. memcpy(utmp, x[i].scales, 12);
  5111. uint32x2_t mins8 = { 0 };
  5112. mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0);
  5113. mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1);
  5114. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5115. utmp[0] &= kmask1;
  5116. const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8)));
  5117. const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
  5118. vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
  5119. sumf -= dmin * vaddvq_s32(prod);
  5120. const uint8_t * scales = (const uint8_t *)utmp;
  5121. const uint8_t * restrict q4 = x[i].qs;
  5122. const int8_t * restrict q8 = y[i].qs;
  5123. int32_t sumi1 = 0;
  5124. int32_t sumi2 = 0;
  5125. for (int j = 0; j < QK_K/64; ++j) {
  5126. const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32;
  5127. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  5128. q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
  5129. q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
  5130. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  5131. sumi1 += vaddvq_s32(p1) * scales[2*j+0];
  5132. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  5133. q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
  5134. q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
  5135. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  5136. sumi2 += vaddvq_s32(p2) * scales[2*j+1];
  5137. }
  5138. sumf += d * (sumi1 + sumi2);
  5139. }
  5140. *s = sumf;
  5141. #elif defined __AVX2__
  5142. const __m256i m4 = _mm256_set1_epi8(0xF);
  5143. __m256 acc = _mm256_setzero_ps();
  5144. __m128 acc_m = _mm_setzero_ps();
  5145. for (int i = 0; i < nb; ++i) {
  5146. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5147. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5148. memcpy(utmp, x[i].scales, 12);
  5149. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5150. const uint32_t uaux = utmp[1] & kmask1;
  5151. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5152. utmp[2] = uaux;
  5153. utmp[0] &= kmask1;
  5154. const uint8_t * restrict q4 = x[i].qs;
  5155. const int8_t * restrict q8 = y[i].qs;
  5156. const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
  5157. const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
  5158. const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
  5159. const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
  5160. acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m);
  5161. const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
  5162. const __m256i scales = MM256_SET_M128I(sc128, sc128);
  5163. __m256i sumi = _mm256_setzero_si256();
  5164. for (int j = 0; j < QK_K/64; ++j) {
  5165. const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
  5166. const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
  5167. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  5168. const __m256i q4l = _mm256_and_si256(q4bits, m4);
  5169. const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
  5170. const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5171. __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
  5172. p16l = _mm256_madd_epi16(scale_l, p16l);
  5173. const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5174. __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
  5175. p16h = _mm256_madd_epi16(scale_h, p16h);
  5176. const __m256i sumj = _mm256_add_epi32(p16l, p16h);
  5177. sumi = _mm256_add_epi32(sumi, sumj);
  5178. }
  5179. __m256 vd = _mm256_set1_ps(d);
  5180. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
  5181. }
  5182. acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
  5183. acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
  5184. *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
  5185. #elif defined __AVX__
  5186. const __m128i m4 = _mm_set1_epi8(0xF);
  5187. const __m128i m2 = _mm_set1_epi8(0x2);
  5188. __m256 acc = _mm256_setzero_ps();
  5189. __m128 acc_m = _mm_setzero_ps();
  5190. for (int i = 0; i < nb; ++i) {
  5191. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5192. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5193. const uint8_t * restrict q4 = x[i].qs;
  5194. const int8_t * restrict q8 = y[i].qs;
  5195. memcpy(utmp, x[i].scales, 12);
  5196. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5197. const uint32_t uaux = utmp[1] & kmask1;
  5198. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5199. utmp[2] = uaux;
  5200. utmp[0] &= kmask1;
  5201. const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
  5202. const __m128i scales = _mm_cvtepu8_epi16(utmps);
  5203. const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
  5204. const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
  5205. const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
  5206. const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
  5207. const __m128i prod = _mm_madd_epi16(mins, q8s);
  5208. acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m);
  5209. __m128i sumi_0 = _mm_setzero_si128();
  5210. __m128i sumi_1 = _mm_setzero_si128();
  5211. __m128i shuffle = _mm_set1_epi16(0x0100);
  5212. for (int j = 0; j < QK_K/64; ++j) {
  5213. const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle);
  5214. shuffle = _mm_add_epi16(shuffle, m2);
  5215. const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle);
  5216. shuffle = _mm_add_epi16(shuffle, m2);
  5217. __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  5218. const __m128i q4l_0 = _mm_and_si128(q4bits, m4);
  5219. const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
  5220. q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  5221. const __m128i q4l_1 = _mm_and_si128(q4bits, m4);
  5222. const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
  5223. const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5224. __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0);
  5225. p16l = _mm_madd_epi16(scale_l, p16l);
  5226. sumi_0 = _mm_add_epi32(sumi_0, p16l);
  5227. const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5228. p16l = _mm_maddubs_epi16(q4l_1, q8l_1);
  5229. p16l = _mm_madd_epi16(scale_l, p16l);
  5230. sumi_1 = _mm_add_epi32(sumi_1, p16l);
  5231. const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5232. __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0);
  5233. p16h = _mm_madd_epi16(scale_h, p16h);
  5234. sumi_0 = _mm_add_epi32(sumi_0, p16h);
  5235. const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5236. p16h = _mm_maddubs_epi16(q4h_1, q8h_1);
  5237. p16h = _mm_madd_epi16(scale_h, p16h);
  5238. sumi_1 = _mm_add_epi32(sumi_1, p16h);
  5239. }
  5240. __m256 vd = _mm256_set1_ps(d);
  5241. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  5242. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
  5243. }
  5244. acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
  5245. acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
  5246. *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
  5247. #elif defined __riscv_v_intrinsic
  5248. const uint8_t * scales = (const uint8_t*)&utmp[0];
  5249. const uint8_t * mins = (const uint8_t*)&utmp[2];
  5250. float sumf = 0;
  5251. for (int i = 0; i < nb; ++i) {
  5252. size_t vl = 8;
  5253. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5254. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5255. vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
  5256. vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
  5257. vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
  5258. memcpy(utmp, x[i].scales, 12);
  5259. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5260. const uint32_t uaux = utmp[1] & kmask1;
  5261. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5262. utmp[2] = uaux;
  5263. utmp[0] &= kmask1;
  5264. vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
  5265. vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
  5266. vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
  5267. vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  5268. sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
  5269. const uint8_t * restrict q4 = x[i].qs;
  5270. const int8_t * restrict q8 = y[i].qs;
  5271. vl = 32;
  5272. int32_t sum_1 = 0;
  5273. int32_t sum_2 = 0;
  5274. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  5275. for (int j = 0; j < QK_K/64; ++j) {
  5276. // load Q4
  5277. vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
  5278. // load Q8 and multiply it with lower Q4 nibble
  5279. vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
  5280. vint8m1_t q4_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
  5281. vint16m2_t qv_0 = __riscv_vwmul_vv_i16m2(q4_0, q8_0, vl);
  5282. vint16m1_t vs_0 = __riscv_vredsum_vs_i16m2_i16m1(qv_0, vzero, vl);
  5283. sum_1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[2*j+0];
  5284. // load Q8 and multiply it with upper Q4 nibble
  5285. vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
  5286. vint8m1_t q4_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
  5287. vint16m2_t qv_1 = __riscv_vwmul_vv_i16m2(q4_1, q8_1, vl);
  5288. vint16m1_t vs_1 = __riscv_vredsum_vs_i16m2_i16m1(qv_1, vzero, vl);
  5289. sum_2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[2*j+1];
  5290. q4 += 32; q8 += 64;
  5291. }
  5292. sumf += d*(sum_1 + sum_2);
  5293. }
  5294. *s = sumf;
  5295. #else
  5296. const uint8_t * scales = (const uint8_t*)&utmp[0];
  5297. const uint8_t * mins = (const uint8_t*)&utmp[2];
  5298. int8_t aux8[QK_K];
  5299. int16_t aux16[8];
  5300. float sums [8];
  5301. int32_t aux32[8];
  5302. memset(sums, 0, 8*sizeof(float));
  5303. float sumf = 0;
  5304. for (int i = 0; i < nb; ++i) {
  5305. const uint8_t * restrict q4 = x[i].qs;
  5306. const int8_t * restrict q8 = y[i].qs;
  5307. memset(aux32, 0, 8*sizeof(int32_t));
  5308. int8_t * restrict a = aux8;
  5309. for (int j = 0; j < QK_K/64; ++j) {
  5310. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
  5311. a += 32;
  5312. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
  5313. a += 32; q4 += 32;
  5314. }
  5315. memcpy(utmp, x[i].scales, 12);
  5316. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5317. const uint32_t uaux = utmp[1] & kmask1;
  5318. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5319. utmp[2] = uaux;
  5320. utmp[0] &= kmask1;
  5321. int sumi = 0;
  5322. for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
  5323. a = aux8;
  5324. int is = 0;
  5325. for (int j = 0; j < QK_K/32; ++j) {
  5326. int32_t scale = scales[is++];
  5327. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5328. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5329. q8 += 8; a += 8;
  5330. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5331. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5332. q8 += 8; a += 8;
  5333. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5334. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5335. q8 += 8; a += 8;
  5336. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5337. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5338. q8 += 8; a += 8;
  5339. }
  5340. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5341. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  5342. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  5343. sumf -= dmin * sumi;
  5344. }
  5345. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5346. *s = sumf;
  5347. #endif
  5348. }
  5349. #else
  5350. void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  5351. assert(n % QK_K == 0);
  5352. const block_q4_K * restrict x = vx;
  5353. const block_q8_K * restrict y = vy;
  5354. const int nb = n / QK_K;
  5355. #ifdef __ARM_NEON
  5356. const uint8x16_t m4b = vdupq_n_u8(0xf);
  5357. const int32x4_t mzero = vdupq_n_s32(0);
  5358. float sumf = 0;
  5359. ggml_int8x16x2_t q4bytes;
  5360. ggml_int8x16x4_t q8bytes;
  5361. float sum_mins = 0.f;
  5362. uint16_t aux16[2];
  5363. const uint8_t * restrict scales = (const uint8_t *)aux16;
  5364. for (int i = 0; i < nb; ++i) {
  5365. const uint8_t * restrict q4 = x[i].qs;
  5366. const int8_t * restrict q8 = y[i].qs;
  5367. const uint16_t * restrict a = (const uint16_t *)x[i].scales;
  5368. aux16[0] = a[0] & 0x0f0f;
  5369. aux16[1] = (a[0] >> 4) & 0x0f0f;
  5370. const int32_t summi = scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]);
  5371. sum_mins += y[i].d * (float)x[i].d[1] * summi;
  5372. const float d = y[i].d * (float)x[i].d[0];
  5373. const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4);
  5374. q8bytes = ggml_vld1q_s8_x4(q8);
  5375. q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
  5376. q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
  5377. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  5378. const int32_t sumi1 = vaddvq_s32(p1) * scales[0];
  5379. q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
  5380. q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
  5381. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[2]), q4bytes.val[1], q8bytes.val[3]);
  5382. const int32_t sumi2 = vaddvq_s32(p2) * scales[1];
  5383. sumf += d * (sumi1 + sumi2);
  5384. }
  5385. *s = sumf - sum_mins;
  5386. #elif defined __AVX2__
  5387. const __m256i m4 = _mm256_set1_epi8(0xF);
  5388. __m256 acc = _mm256_setzero_ps();
  5389. float summs = 0;
  5390. uint16_t aux16[2];
  5391. const uint8_t * scales = (const uint8_t *)aux16;
  5392. for (int i = 0; i < nb; ++i) {
  5393. const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
  5394. const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
  5395. const __m256 vd = _mm256_set1_ps(d);
  5396. const uint16_t * a = (const uint16_t *)x[i].scales;
  5397. aux16[0] = a[0] & 0x0f0f;
  5398. aux16[1] = (a[0] >> 4) & 0x0f0f;
  5399. summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  5400. const uint8_t * restrict q4 = x[i].qs;
  5401. const int8_t * restrict q8 = y[i].qs;
  5402. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
  5403. const __m256i q4l = _mm256_and_si256(q4bits, m4);
  5404. const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
  5405. const __m256i q8l = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5406. const __m256i q8h = _mm256_loadu_si256((const __m256i*)(q8+32));
  5407. const __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
  5408. const __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
  5409. const __m256i p32l = _mm256_madd_epi16(_mm256_set1_epi16(scales[0]), p16l);
  5410. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32l), acc);
  5411. const __m256i p32h = _mm256_madd_epi16(_mm256_set1_epi16(scales[1]), p16h);
  5412. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32h), acc);
  5413. }
  5414. *s = hsum_float_8(acc) - summs;
  5415. #elif defined __AVX__
  5416. const __m128i m4 = _mm_set1_epi8(0xF);
  5417. __m256 acc = _mm256_setzero_ps();
  5418. float summs = 0;
  5419. uint16_t aux16[2];
  5420. const uint8_t * scales = (const uint8_t *)aux16;
  5421. for (int i = 0; i < nb; ++i) {
  5422. const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
  5423. const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
  5424. const __m256 vd = _mm256_set1_ps(d);
  5425. const uint16_t * a = (const uint16_t *)x[i].scales;
  5426. aux16[0] = a[0] & 0x0f0f;
  5427. aux16[1] = (a[0] >> 4) & 0x0f0f;
  5428. summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  5429. const uint8_t * restrict q4 = x[i].qs;
  5430. const int8_t * restrict q8 = y[i].qs;
  5431. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
  5432. const __m128i q4bits_0 = _mm256_extractf128_si256(q4bits, 0);
  5433. const __m128i q4bits_1 = _mm256_extractf128_si256(q4bits, 1);
  5434. const __m128i q4_0 = _mm_and_si128(q4bits_0, m4);
  5435. const __m128i q4_1 = _mm_and_si128(q4bits_1, m4);
  5436. const __m128i q4_2 = _mm_and_si128(_mm_srli_epi16(q4bits_0, 4), m4);
  5437. const __m128i q4_3 = _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4);
  5438. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5439. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5440. const __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
  5441. const __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
  5442. const __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
  5443. const __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
  5444. const __m128i p32_0 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_0);
  5445. const __m128i p32_1 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_1);
  5446. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_1, p32_0))), acc);
  5447. const __m128i p32_2 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_2);
  5448. const __m128i p32_3 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_3);
  5449. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_3, p32_2))), acc);
  5450. }
  5451. *s = hsum_float_8(acc) - summs;
  5452. #elif defined __riscv_v_intrinsic
  5453. uint16_t s16[2];
  5454. const uint8_t * restrict scales = (const uint8_t *)s16;
  5455. float sumf = 0;
  5456. for (int i = 0; i < nb; ++i) {
  5457. const uint8_t * restrict q4 = x[i].qs;
  5458. const int8_t * restrict q8 = y[i].qs;
  5459. const uint16_t * restrict b = (const uint16_t *)x[i].scales;
  5460. s16[0] = b[0] & 0x0f0f;
  5461. s16[1] = (b[0] >> 4) & 0x0f0f;
  5462. sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  5463. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
  5464. size_t vl = 32;
  5465. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  5466. // load Q4
  5467. vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
  5468. // load Q8 and multiply it with lower Q4 nibble
  5469. vint8m1_t q4_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
  5470. vint16m2_t va_0 = __riscv_vwmul_vv_i16m2(q4_a, __riscv_vle8_v_i8m1(q8, vl), vl);
  5471. vint16m1_t aux1 = __riscv_vredsum_vs_i16m2_i16m1(va_0, vzero, vl);
  5472. sumf += d*scales[0]*__riscv_vmv_x_s_i16m1_i16(aux1);
  5473. // load Q8 and multiply it with upper Q4 nibble
  5474. vint8m1_t q4_s = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
  5475. vint16m2_t va_1 = __riscv_vwmul_vv_i16m2(q4_s, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  5476. vint16m1_t aux2 = __riscv_vredsum_vs_i16m2_i16m1(va_1, vzero, vl);
  5477. sumf += d*scales[1]*__riscv_vmv_x_s_i16m1_i16(aux2);
  5478. }
  5479. *s = sumf;
  5480. #else
  5481. uint8_t aux8[QK_K];
  5482. int16_t aux16[16];
  5483. float sums [8];
  5484. memset(sums, 0, 8*sizeof(float));
  5485. uint16_t s16[2];
  5486. const uint8_t * restrict scales = (const uint8_t *)s16;
  5487. float sumf = 0;
  5488. for (int i = 0; i < nb; ++i) {
  5489. const uint8_t * restrict q4 = x[i].qs;
  5490. const int8_t * restrict q8 = y[i].qs;
  5491. uint8_t * restrict a = aux8;
  5492. for (int l = 0; l < 32; ++l) a[l+ 0] = q4[l] & 0xF;
  5493. for (int l = 0; l < 32; ++l) a[l+32] = q4[l] >> 4;
  5494. const uint16_t * restrict b = (const uint16_t *)x[i].scales;
  5495. s16[0] = b[0] & 0x0f0f;
  5496. s16[1] = (b[0] >> 4) & 0x0f0f;
  5497. sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  5498. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
  5499. for (int j = 0; j < QK_K/32; ++j) {
  5500. for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
  5501. q8 += 16; a += 16;
  5502. for (int l = 0; l < 16; ++l) aux16[l] += q8[l] * a[l];
  5503. q8 += 16; a += 16;
  5504. const float dl = d * scales[j];
  5505. for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[l+8]);
  5506. }
  5507. }
  5508. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5509. *s = sumf;
  5510. #endif
  5511. }
  5512. #endif
  5513. #if QK_K == 256
  5514. void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  5515. assert(n % QK_K == 0);
  5516. const block_q5_K * restrict x = vx;
  5517. const block_q8_K * restrict y = vy;
  5518. const int nb = n / QK_K;
  5519. static const uint32_t kmask1 = 0x3f3f3f3f;
  5520. static const uint32_t kmask2 = 0x0f0f0f0f;
  5521. static const uint32_t kmask3 = 0x03030303;
  5522. uint32_t utmp[4];
  5523. #ifdef __ARM_NEON
  5524. const uint8x16_t m4b = vdupq_n_u8(0xf);
  5525. const uint8x16_t mone = vdupq_n_u8(1);
  5526. const uint8x16_t mtwo = vdupq_n_u8(2);
  5527. const int32x4_t mzero = vdupq_n_s32(0);
  5528. ggml_int8x16x4_t q5bytes;
  5529. float sumf = 0;
  5530. for (int i = 0; i < nb; ++i) {
  5531. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5532. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5533. const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
  5534. memcpy(utmp, x[i].scales, 12);
  5535. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5536. const uint32_t uaux = utmp[1] & kmask1;
  5537. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5538. utmp[2] = uaux;
  5539. utmp[0] &= kmask1;
  5540. const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8);
  5541. const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8));
  5542. const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
  5543. vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
  5544. int32_t sumi_mins = vaddvq_s32(prod);
  5545. const uint8_t * scales = (const uint8_t *)utmp;
  5546. const uint8_t * restrict q5 = x[i].qs;
  5547. const uint8_t * restrict qh = x[i].qh;
  5548. const int8_t * restrict q8 = y[i].qs;
  5549. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
  5550. ggml_uint8x16x4_t q5h;
  5551. int32_t sumi = 0;
  5552. for (int j = 0; j < QK_K/64; ++j) {
  5553. const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32;
  5554. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  5555. q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
  5556. q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
  5557. q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3);
  5558. q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3);
  5559. qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2);
  5560. qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2);
  5561. q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0]));
  5562. q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1]));
  5563. q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2]));
  5564. q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3]));
  5565. sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++;
  5566. sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++;
  5567. }
  5568. sumf += d * sumi - dmin * sumi_mins;
  5569. }
  5570. *s = sumf;
  5571. #elif defined __AVX2__
  5572. const __m256i m4 = _mm256_set1_epi8(0xF);
  5573. const __m128i mzero = _mm_setzero_si128();
  5574. const __m256i mone = _mm256_set1_epi8(1);
  5575. __m256 acc = _mm256_setzero_ps();
  5576. float summs = 0.f;
  5577. for (int i = 0; i < nb; ++i) {
  5578. const uint8_t * restrict q5 = x[i].qs;
  5579. const int8_t * restrict q8 = y[i].qs;
  5580. #if QK_K == 256
  5581. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5582. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5583. memcpy(utmp, x[i].scales, 12);
  5584. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5585. const uint32_t uaux = utmp[1] & kmask1;
  5586. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5587. utmp[2] = uaux;
  5588. utmp[0] &= kmask1;
  5589. #else
  5590. // TODO
  5591. const float d = 0, dmin = 0;
  5592. #endif
  5593. const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
  5594. const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
  5595. const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
  5596. const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
  5597. const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
  5598. summs += dmin * _mm_extract_epi32(hsum, 0);
  5599. const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
  5600. const __m256i scales = MM256_SET_M128I(sc128, sc128);
  5601. const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh);
  5602. __m256i hmask = mone;
  5603. __m256i sumi = _mm256_setzero_si256();
  5604. int bit = 0;
  5605. for (int j = 0; j < QK_K/64; ++j) {
  5606. const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
  5607. const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
  5608. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32;
  5609. const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
  5610. const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
  5611. const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0);
  5612. hmask = _mm256_slli_epi16(hmask, 1);
  5613. const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
  5614. const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
  5615. const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1);
  5616. hmask = _mm256_slli_epi16(hmask, 1);
  5617. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5618. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5619. __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0);
  5620. __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1);
  5621. p16_0 = _mm256_madd_epi16(scale_0, p16_0);
  5622. p16_1 = _mm256_madd_epi16(scale_1, p16_1);
  5623. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  5624. }
  5625. __m256 vd = _mm256_set1_ps(d);
  5626. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
  5627. }
  5628. *s = hsum_float_8(acc) + summs;
  5629. #elif defined __AVX__
  5630. const __m128i m4 = _mm_set1_epi8(0xF);
  5631. const __m128i mzero = _mm_setzero_si128();
  5632. const __m128i mone = _mm_set1_epi8(1);
  5633. const __m128i m2 = _mm_set1_epi8(2);
  5634. __m256 acc = _mm256_setzero_ps();
  5635. float summs = 0.f;
  5636. for (int i = 0; i < nb; ++i) {
  5637. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5638. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5639. const uint8_t * restrict q5 = x[i].qs;
  5640. const int8_t * restrict q8 = y[i].qs;
  5641. memcpy(utmp, x[i].scales, 12);
  5642. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5643. const uint32_t uaux = utmp[1] & kmask1;
  5644. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5645. utmp[2] = uaux;
  5646. utmp[0] &= kmask1;
  5647. const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
  5648. const __m128i scales = _mm_cvtepu8_epi16(utmps);
  5649. const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
  5650. const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
  5651. const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
  5652. const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
  5653. const __m128i prod = _mm_madd_epi16(mins, q8s);
  5654. const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
  5655. summs += dmin * _mm_extract_epi32(hsum, 0);
  5656. const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]);
  5657. const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]);
  5658. __m128i hmask = mone;
  5659. __m128i sumi_0 = _mm_setzero_si128();
  5660. __m128i sumi_1 = _mm_setzero_si128();
  5661. int bit = 0;
  5662. __m128i shuffle = _mm_set1_epi16(0x0100);
  5663. for (int j = 0; j < QK_K/64; ++j) {
  5664. const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
  5665. shuffle = _mm_add_epi16(shuffle, m2);
  5666. const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
  5667. shuffle = _mm_add_epi16(shuffle, m2);
  5668. const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
  5669. const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
  5670. __m128i q5l_0 = _mm_and_si128(q5bits_0, m4);
  5671. __m128i q5l_1 = _mm_and_si128(q5bits_1, m4);
  5672. __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
  5673. __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
  5674. __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0);
  5675. __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1);
  5676. hmask = _mm_slli_epi16(hmask, 1);
  5677. __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5678. __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5679. __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0);
  5680. __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1);
  5681. p16_0 = _mm_madd_epi16(scale_0, p16_0);
  5682. p16_1 = _mm_madd_epi16(scale_0, p16_1);
  5683. q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4);
  5684. q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4);
  5685. q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
  5686. q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
  5687. q5_0 = _mm_add_epi8(q5l_0, q5h_0);
  5688. q5_1 = _mm_add_epi8(q5l_1, q5h_1);
  5689. hmask = _mm_slli_epi16(hmask, 1);
  5690. q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5691. q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5692. __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0);
  5693. __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1);
  5694. p16_2 = _mm_madd_epi16(scale_1, p16_2);
  5695. p16_3 = _mm_madd_epi16(scale_1, p16_3);
  5696. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  5697. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  5698. }
  5699. __m256 vd = _mm256_set1_ps(d);
  5700. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  5701. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
  5702. }
  5703. *s = hsum_float_8(acc) + summs;
  5704. #elif defined __riscv_v_intrinsic
  5705. const uint8_t * scales = (const uint8_t*)&utmp[0];
  5706. const uint8_t * mins = (const uint8_t*)&utmp[2];
  5707. float sumf = 0;
  5708. float sums = 0.0;
  5709. size_t vl;
  5710. for (int i = 0; i < nb; ++i) {
  5711. vl = 8;
  5712. const uint8_t * restrict q5 = x[i].qs;
  5713. const uint8_t * restrict hm = x[i].qh;
  5714. const int8_t * restrict q8 = y[i].qs;
  5715. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5716. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  5717. vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
  5718. vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
  5719. vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
  5720. memcpy(utmp, x[i].scales, 12);
  5721. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5722. const uint32_t uaux = utmp[1] & kmask1;
  5723. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5724. utmp[2] = uaux;
  5725. utmp[0] &= kmask1;
  5726. vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
  5727. vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
  5728. vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
  5729. vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  5730. sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
  5731. vl = 32;
  5732. int32_t aux32 = 0;
  5733. int is = 0;
  5734. uint8_t m = 1;
  5735. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  5736. vuint8m1_t vqh = __riscv_vle8_v_u8m1(hm, vl);
  5737. for (int j = 0; j < QK_K/64; ++j) {
  5738. // load Q5 and Q8
  5739. vuint8m1_t q5_x = __riscv_vle8_v_u8m1(q5, vl);
  5740. vint8m1_t q8_y1 = __riscv_vle8_v_i8m1(q8, vl);
  5741. vint8m1_t q8_y2 = __riscv_vle8_v_i8m1(q8+32, vl);
  5742. // compute mask for addition
  5743. vint8m1_t q5_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q5_x, 0x0F, vl));
  5744. vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
  5745. vbool8_t vmask_1 = __riscv_vmsne_vx_u8m1_b8(qh_m1, 0, vl);
  5746. vint8m1_t q5_m1 = __riscv_vadd_vx_i8m1_m(vmask_1, q5_a, 16, vl);
  5747. m <<= 1;
  5748. vint8m1_t q5_l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q5_x, 0x04, vl));
  5749. vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
  5750. vbool8_t vmask_2 = __riscv_vmsne_vx_u8m1_b8(qh_m2, 0, vl);
  5751. vint8m1_t q5_m2 = __riscv_vadd_vx_i8m1_m(vmask_2, q5_l, 16, vl);
  5752. m <<= 1;
  5753. vint16m2_t v0 = __riscv_vwmul_vv_i16m2(q5_m1, q8_y1, vl);
  5754. vint16m2_t v1 = __riscv_vwmul_vv_i16m2(q5_m2, q8_y2, vl);
  5755. vint32m4_t vs1 = __riscv_vwmul_vx_i32m4(v0, scales[is++], vl);
  5756. vint32m4_t vs2 = __riscv_vwmul_vx_i32m4(v1, scales[is++], vl);
  5757. vint32m1_t vacc1 = __riscv_vredsum_vs_i32m4_i32m1(vs1, vzero, vl);
  5758. vint32m1_t vacc2 = __riscv_vredsum_vs_i32m4_i32m1(vs2, vzero, vl);
  5759. aux32 += __riscv_vmv_x_s_i32m1_i32(vacc1) + __riscv_vmv_x_s_i32m1_i32(vacc2);
  5760. q5 += 32; q8 += 64;
  5761. }
  5762. vfloat32m1_t vaux = __riscv_vfmul_vf_f32m1(__riscv_vfmv_v_f_f32m1(aux32, 1), d, 1);
  5763. sums += __riscv_vfmv_f_s_f32m1_f32(vaux);
  5764. }
  5765. *s = sumf+sums;
  5766. #else
  5767. const uint8_t * scales = (const uint8_t*)&utmp[0];
  5768. const uint8_t * mins = (const uint8_t*)&utmp[2];
  5769. int8_t aux8[QK_K];
  5770. int16_t aux16[8];
  5771. float sums [8];
  5772. int32_t aux32[8];
  5773. memset(sums, 0, 8*sizeof(float));
  5774. float sumf = 0;
  5775. for (int i = 0; i < nb; ++i) {
  5776. const uint8_t * restrict q4 = x[i].qs;
  5777. const uint8_t * restrict hm = x[i].qh;
  5778. const int8_t * restrict q8 = y[i].qs;
  5779. memset(aux32, 0, 8*sizeof(int32_t));
  5780. int8_t * restrict a = aux8;
  5781. uint8_t m = 1;
  5782. for (int j = 0; j < QK_K/64; ++j) {
  5783. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
  5784. for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
  5785. a += 32; m <<= 1;
  5786. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
  5787. for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
  5788. a += 32; m <<= 1;
  5789. q4 += 32;
  5790. }
  5791. memcpy(utmp, x[i].scales, 12);
  5792. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5793. const uint32_t uaux = utmp[1] & kmask1;
  5794. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5795. utmp[2] = uaux;
  5796. utmp[0] &= kmask1;
  5797. int sumi = 0;
  5798. for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
  5799. a = aux8;
  5800. int is = 0;
  5801. for (int j = 0; j < QK_K/32; ++j) {
  5802. int32_t scale = scales[is++];
  5803. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5804. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5805. q8 += 8; a += 8;
  5806. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5807. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5808. q8 += 8; a += 8;
  5809. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5810. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5811. q8 += 8; a += 8;
  5812. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5813. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5814. q8 += 8; a += 8;
  5815. }
  5816. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5817. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  5818. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  5819. sumf -= dmin * sumi;
  5820. }
  5821. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5822. *s = sumf;
  5823. #endif
  5824. }
  5825. #else
  5826. void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  5827. assert(n % QK_K == 0);
  5828. const block_q5_K * restrict x = vx;
  5829. const block_q8_K * restrict y = vy;
  5830. const int nb = n / QK_K;
  5831. #ifdef __ARM_NEON
  5832. const uint8x16_t m4b = vdupq_n_u8(0xf);
  5833. const uint8x16_t mh = vdupq_n_u8(16);
  5834. const int32x4_t mzero = vdupq_n_s32(0);
  5835. ggml_int8x16x4_t q5bytes;
  5836. ggml_uint8x16x4_t q5h;
  5837. float sumf = 0;
  5838. for (int i = 0; i < nb; ++i) {
  5839. const float d = y[i].d * (float)x[i].d;
  5840. const int8_t * sc = x[i].scales;
  5841. const uint8_t * restrict q5 = x[i].qs;
  5842. const uint8_t * restrict qh = x[i].qh;
  5843. const int8_t * restrict q8 = y[i].qs;
  5844. const uint8x8_t qhbits = vld1_u8(qh);
  5845. const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5);
  5846. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  5847. const uint8x16_t htmp = vcombine_u8(qhbits, vshr_n_u8(qhbits, 1));
  5848. q5h.val[0] = vbicq_u8(mh, vshlq_n_u8(htmp, 4));
  5849. q5h.val[1] = vbicq_u8(mh, vshlq_n_u8(htmp, 2));
  5850. q5h.val[2] = vbicq_u8(mh, htmp);
  5851. q5h.val[3] = vbicq_u8(mh, vshrq_n_u8(htmp, 2));
  5852. q5bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[0], m4b)), vreinterpretq_s8_u8(q5h.val[0]));
  5853. q5bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[1], m4b)), vreinterpretq_s8_u8(q5h.val[1]));
  5854. q5bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[0], 4)), vreinterpretq_s8_u8(q5h.val[2]));
  5855. q5bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[1], 4)), vreinterpretq_s8_u8(q5h.val[3]));
  5856. int32_t sumi1 = sc[0] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]));
  5857. int32_t sumi2 = sc[1] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[1], q8bytes.val[1]));
  5858. int32_t sumi3 = sc[2] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]));
  5859. int32_t sumi4 = sc[3] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[3], q8bytes.val[3]));
  5860. sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
  5861. }
  5862. *s = sumf;
  5863. #elif defined __AVX2__
  5864. const __m256i m4 = _mm256_set1_epi8(0xF);
  5865. const __m256i mone = _mm256_set1_epi8(1);
  5866. __m256 acc = _mm256_setzero_ps();
  5867. for (int i = 0; i < nb; ++i) {
  5868. const uint8_t * restrict q5 = x[i].qs;
  5869. const int8_t * restrict q8 = y[i].qs;
  5870. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5871. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
  5872. const __m256i scale_l = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[1]), _mm_set1_epi16(x[i].scales[0]));
  5873. const __m256i scale_h = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[3]), _mm_set1_epi16(x[i].scales[2]));
  5874. int64_t aux64;
  5875. memcpy(&aux64, x[i].qh, 8);
  5876. const __m128i haux128 = _mm_set_epi64x(aux64 >> 1, aux64);
  5877. const __m256i haux256 = MM256_SET_M128I(_mm_srli_epi16(haux128, 2), haux128);
  5878. const __m256i q5h_0 = _mm256_slli_epi16(_mm256_andnot_si256(haux256, mone), 4);
  5879. const __m256i q5h_1 = _mm256_slli_epi16(_mm256_andnot_si256(_mm256_srli_epi16(haux256, 4), mone), 4);
  5880. const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
  5881. const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
  5882. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5883. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5884. const __m256i p16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5l_0, q8_0));
  5885. const __m256i p16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5l_1, q8_1));
  5886. const __m256i s16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5h_0, q8_0));
  5887. const __m256i s16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5h_1, q8_1));
  5888. const __m256i dot = _mm256_sub_epi32(_mm256_add_epi32(p16_0, p16_1), _mm256_add_epi32(s16_0, s16_1));
  5889. acc = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(dot), acc);
  5890. }
  5891. *s = hsum_float_8(acc);
  5892. #elif defined __AVX__
  5893. const __m128i m4 = _mm_set1_epi8(0xF);
  5894. const __m128i mone = _mm_set1_epi8(1);
  5895. __m256 acc = _mm256_setzero_ps();
  5896. for (int i = 0; i < nb; ++i) {
  5897. const uint8_t * restrict q5 = x[i].qs;
  5898. const int8_t * restrict q8 = y[i].qs;
  5899. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5900. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
  5901. const __m128i scale_0 = _mm_set1_epi16(x[i].scales[0]);
  5902. const __m128i scale_1 = _mm_set1_epi16(x[i].scales[1]);
  5903. const __m128i scale_2 = _mm_set1_epi16(x[i].scales[2]);
  5904. const __m128i scale_3 = _mm_set1_epi16(x[i].scales[3]);
  5905. int64_t aux64;
  5906. memcpy(&aux64, x[i].qh, 8);
  5907. const __m128i haux128_0 = _mm_set_epi64x(aux64 >> 1, aux64);
  5908. const __m128i haux128_1 = _mm_srli_epi16(haux128_0, 2);
  5909. const __m128i q5h_0 = _mm_slli_epi16(_mm_andnot_si128(haux128_0, mone), 4);
  5910. const __m128i q5h_1 = _mm_slli_epi16(_mm_andnot_si128(haux128_1, mone), 4);
  5911. const __m128i q5h_2 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_0, 4), mone), 4);
  5912. const __m128i q5h_3 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_1, 4), mone), 4);
  5913. const __m128i q5l_0 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 0), m4);
  5914. const __m128i q5l_1 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 1), m4);
  5915. const __m128i q5l_2 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 0), 4), m4);
  5916. const __m128i q5l_3 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 1), 4), m4);
  5917. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5918. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5919. const __m128i p16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5l_0, _mm256_extractf128_si256(q8_0, 0)));
  5920. const __m128i p16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5l_1, _mm256_extractf128_si256(q8_0, 1)));
  5921. const __m128i p16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5l_2, _mm256_extractf128_si256(q8_1, 0)));
  5922. const __m128i p16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5l_3, _mm256_extractf128_si256(q8_1, 1)));
  5923. const __m128i s16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5h_0, _mm256_extractf128_si256(q8_0, 0)));
  5924. const __m128i s16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5h_1, _mm256_extractf128_si256(q8_0, 1)));
  5925. const __m128i s16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5h_2, _mm256_extractf128_si256(q8_1, 0)));
  5926. const __m128i s16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5h_3, _mm256_extractf128_si256(q8_1, 1)));
  5927. const __m128i dot_0 = _mm_sub_epi32(_mm_add_epi32(p16_0, p16_2), _mm_add_epi32(s16_0, s16_2));
  5928. const __m128i dot_1 = _mm_sub_epi32(_mm_add_epi32(p16_1, p16_3), _mm_add_epi32(s16_1, s16_3));
  5929. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(dot_1, dot_0))), acc);
  5930. }
  5931. *s = hsum_float_8(acc);
  5932. #elif defined __riscv_v_intrinsic
  5933. float sumf = 0;
  5934. for (int i = 0; i < nb; ++i) {
  5935. const float d = y[i].d * (float)x[i].d;
  5936. const int8_t * sc = x[i].scales;
  5937. const uint8_t * restrict q5 = x[i].qs;
  5938. const uint8_t * restrict qh = x[i].qh;
  5939. const int8_t * restrict q8 = y[i].qs;
  5940. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  5941. // load qh
  5942. vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(qh, 8);
  5943. vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
  5944. size_t vl = 16;
  5945. // combine both qh_1 and qh_2
  5946. vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
  5947. vuint8mf2_t qh_h0 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
  5948. vuint8mf2_t qh_h1 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), vl), 16, vl);
  5949. vuint8mf2_t qh_h2 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(qh_x, vl), 16, vl);
  5950. vuint8mf2_t qh_h3 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
  5951. vint8mf2_t qh_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h0);
  5952. vint8mf2_t qh_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h1);
  5953. vint8mf2_t qh_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h2);
  5954. vint8mf2_t qh_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h3);
  5955. // load q5
  5956. vuint8mf2_t q5_x1 = __riscv_vle8_v_u8mf2(q5, vl);
  5957. vuint8mf2_t q5_x2 = __riscv_vle8_v_u8mf2(q5+16, vl);
  5958. vint8mf2_t q5s_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x1, 0xF, vl));
  5959. vint8mf2_t q5s_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x2, 0xF, vl));
  5960. vint8mf2_t q5s_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x1, 0x4, vl));
  5961. vint8mf2_t q5s_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x2, 0x4, vl));
  5962. vint8mf2_t q5_0 = __riscv_vsub_vv_i8mf2(q5s_0, qh_0, vl);
  5963. vint8mf2_t q5_1 = __riscv_vsub_vv_i8mf2(q5s_1, qh_1, vl);
  5964. vint8mf2_t q5_2 = __riscv_vsub_vv_i8mf2(q5s_2, qh_2, vl);
  5965. vint8mf2_t q5_3 = __riscv_vsub_vv_i8mf2(q5s_3, qh_3, vl);
  5966. // load Q8 and multiply it with Q5
  5967. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q5_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  5968. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q5_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  5969. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q5_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  5970. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q5_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  5971. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  5972. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  5973. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  5974. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  5975. int32_t sumi1 = sc[0] * __riscv_vmv_x_s_i32m1_i32(vs_0);
  5976. int32_t sumi2 = sc[1] * __riscv_vmv_x_s_i32m1_i32(vs_1);
  5977. int32_t sumi3 = sc[2] * __riscv_vmv_x_s_i32m1_i32(vs_2);
  5978. int32_t sumi4 = sc[3] * __riscv_vmv_x_s_i32m1_i32(vs_3);
  5979. sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
  5980. }
  5981. *s = sumf;
  5982. #else
  5983. int8_t aux8[QK_K];
  5984. int16_t aux16[16];
  5985. float sums [8];
  5986. memset(sums, 0, 8*sizeof(float));
  5987. float sumf = 0;
  5988. for (int i = 0; i < nb; ++i) {
  5989. const uint8_t * restrict q4 = x[i].qs;
  5990. const uint8_t * restrict hm = x[i].qh;
  5991. const int8_t * restrict q8 = y[i].qs;
  5992. int8_t * restrict a = aux8;
  5993. for (int l = 0; l < 32; ++l) {
  5994. a[l+ 0] = q4[l] & 0xF;
  5995. a[l+32] = q4[l] >> 4;
  5996. }
  5997. for (int is = 0; is < 8; ++is) {
  5998. uint8_t m = 1 << is;
  5999. for (int l = 0; l < 8; ++l) a[8*is + l] -= (hm[l] & m ? 0 : 16);
  6000. }
  6001. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6002. const int8_t * restrict sc = x[i].scales;
  6003. for (int j = 0; j < QK_K/16; ++j) {
  6004. const float dl = d * sc[j];
  6005. for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
  6006. for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[8+l]);
  6007. q8 += 16; a += 16;
  6008. }
  6009. }
  6010. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6011. *s = sumf;
  6012. #endif
  6013. }
  6014. #endif
  6015. #if QK_K == 256
  6016. void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  6017. assert(n % QK_K == 0);
  6018. const block_q6_K * restrict x = vx;
  6019. const block_q8_K * restrict y = vy;
  6020. const int nb = n / QK_K;
  6021. #ifdef __ARM_NEON
  6022. float sum = 0;
  6023. const uint8x16_t m4b = vdupq_n_u8(0xF);
  6024. const int32x4_t vzero = vdupq_n_s32(0);
  6025. //const int8x16_t m32s = vdupq_n_s8(32);
  6026. const uint8x16_t mone = vdupq_n_u8(3);
  6027. ggml_int8x16x4_t q6bytes;
  6028. ggml_uint8x16x4_t q6h;
  6029. for (int i = 0; i < nb; ++i) {
  6030. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  6031. const uint8_t * restrict q6 = x[i].ql;
  6032. const uint8_t * restrict qh = x[i].qh;
  6033. const int8_t * restrict q8 = y[i].qs;
  6034. const int8_t * restrict scale = x[i].scales;
  6035. const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
  6036. const int8x16_t scales = vld1q_s8(scale);
  6037. const ggml_int16x8x2_t q6scales = {{vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}};
  6038. const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])),
  6039. vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))),
  6040. vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])),
  6041. vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1]))));
  6042. int32_t isum_mins = vaddvq_s32(prod);
  6043. int32_t isum = 0;
  6044. for (int j = 0; j < QK_K/128; ++j) {
  6045. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32;
  6046. ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64;
  6047. ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  6048. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
  6049. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
  6050. uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2);
  6051. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6052. shifted = vshrq_n_u8(qhbits.val[1], 2);
  6053. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6054. //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
  6055. //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
  6056. //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s);
  6057. //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s);
  6058. q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0]));
  6059. q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1]));
  6060. q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2]));
  6061. q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3]));
  6062. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  6063. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  6064. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  6065. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  6066. scale += 4;
  6067. q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  6068. shifted = vshrq_n_u8(qhbits.val[0], 4);
  6069. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6070. shifted = vshrq_n_u8(qhbits.val[1], 4);
  6071. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6072. shifted = vshrq_n_u8(qhbits.val[0], 6);
  6073. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6074. shifted = vshrq_n_u8(qhbits.val[1], 6);
  6075. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6076. //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s);
  6077. //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s);
  6078. //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s);
  6079. //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s);
  6080. q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0]));
  6081. q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1]));
  6082. q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2]));
  6083. q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3]));
  6084. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  6085. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  6086. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  6087. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  6088. scale += 4;
  6089. }
  6090. //sum += isum * d_all * y[i].d;
  6091. sum += d_all * y[i].d * (isum - 32 * isum_mins);
  6092. }
  6093. *s = sum;
  6094. #elif defined __AVX2__
  6095. const __m256i m4 = _mm256_set1_epi8(0xF);
  6096. const __m256i m2 = _mm256_set1_epi8(3);
  6097. const __m256i m32s = _mm256_set1_epi8(32);
  6098. __m256 acc = _mm256_setzero_ps();
  6099. for (int i = 0; i < nb; ++i) {
  6100. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6101. const uint8_t * restrict q4 = x[i].ql;
  6102. const uint8_t * restrict qh = x[i].qh;
  6103. const int8_t * restrict q8 = y[i].qs;
  6104. const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  6105. __m256i sumi = _mm256_setzero_si256();
  6106. int is = 0;
  6107. for (int j = 0; j < QK_K/128; ++j) {
  6108. const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0));
  6109. const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1));
  6110. const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2));
  6111. const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3));
  6112. is += 4;
  6113. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  6114. const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  6115. const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32;
  6116. const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4);
  6117. const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4);
  6118. const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4);
  6119. const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4);
  6120. const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
  6121. const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1);
  6122. const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2);
  6123. const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3);
  6124. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6125. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6126. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6127. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6128. __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
  6129. __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
  6130. __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2);
  6131. __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3);
  6132. __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
  6133. __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
  6134. __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2);
  6135. __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3);
  6136. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  6137. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  6138. p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
  6139. p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
  6140. p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
  6141. p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
  6142. p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2);
  6143. p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3);
  6144. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  6145. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3));
  6146. }
  6147. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  6148. }
  6149. *s = hsum_float_8(acc);
  6150. #elif defined __AVX__
  6151. const __m128i m4 = _mm_set1_epi8(0xF);
  6152. const __m128i m3 = _mm_set1_epi8(3);
  6153. const __m128i m32s = _mm_set1_epi8(32);
  6154. const __m128i m2 = _mm_set1_epi8(2);
  6155. __m256 acc = _mm256_setzero_ps();
  6156. for (int i = 0; i < nb; ++i) {
  6157. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6158. const uint8_t * restrict q4 = x[i].ql;
  6159. const uint8_t * restrict qh = x[i].qh;
  6160. const int8_t * restrict q8 = y[i].qs;
  6161. const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  6162. __m128i sumi_0 = _mm_setzero_si128();
  6163. __m128i sumi_1 = _mm_setzero_si128();
  6164. __m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  6165. for (int j = 0; j < QK_K/128; ++j) {
  6166. const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
  6167. const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
  6168. const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4);
  6169. const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4);
  6170. const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4);
  6171. const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4);
  6172. const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4);
  6173. const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4);
  6174. const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4);
  6175. const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4);
  6176. const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  6177. const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  6178. const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  6179. const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  6180. const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0);
  6181. const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1);
  6182. const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2);
  6183. const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3);
  6184. const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4);
  6185. const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5);
  6186. const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6);
  6187. const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7);
  6188. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6189. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6190. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6191. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6192. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6193. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6194. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6195. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6196. __m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0);
  6197. __m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1);
  6198. __m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2);
  6199. __m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3);
  6200. __m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4);
  6201. __m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5);
  6202. __m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6);
  6203. __m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7);
  6204. __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0);
  6205. __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1);
  6206. __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2);
  6207. __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3);
  6208. __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4);
  6209. __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5);
  6210. __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6);
  6211. __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7);
  6212. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  6213. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  6214. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  6215. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  6216. p16_4 = _mm_sub_epi16(p16_4, q8s_4);
  6217. p16_5 = _mm_sub_epi16(p16_5, q8s_5);
  6218. p16_6 = _mm_sub_epi16(p16_6, q8s_6);
  6219. p16_7 = _mm_sub_epi16(p16_7, q8s_7);
  6220. const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
  6221. shuffle = _mm_add_epi8(shuffle, m2);
  6222. const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
  6223. shuffle = _mm_add_epi8(shuffle, m2);
  6224. const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle);
  6225. shuffle = _mm_add_epi8(shuffle, m2);
  6226. const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle);
  6227. shuffle = _mm_add_epi8(shuffle, m2);
  6228. p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
  6229. p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
  6230. p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
  6231. p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
  6232. p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4);
  6233. p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5);
  6234. p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6);
  6235. p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7);
  6236. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  6237. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  6238. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6));
  6239. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7));
  6240. }
  6241. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  6242. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
  6243. }
  6244. *s = hsum_float_8(acc);
  6245. #elif defined __riscv_v_intrinsic
  6246. float sumf = 0;
  6247. for (int i = 0; i < nb; ++i) {
  6248. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6249. const uint8_t * restrict q6 = x[i].ql;
  6250. const uint8_t * restrict qh = x[i].qh;
  6251. const int8_t * restrict q8 = y[i].qs;
  6252. const int8_t * restrict scale = x[i].scales;
  6253. size_t vl;
  6254. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  6255. int sum_t = 0;
  6256. int is = 0;
  6257. for (int j = 0; j < QK_K/128; ++j) {
  6258. vl = 32;
  6259. // load qh
  6260. vuint8m1_t qh_x = __riscv_vle8_v_u8m1(qh, vl);
  6261. // load Q6
  6262. vuint8m1_t q6_0 = __riscv_vle8_v_u8m1(q6, vl);
  6263. vuint8m1_t q6_1 = __riscv_vle8_v_u8m1(q6+32, vl);
  6264. vuint8m1_t q6a_0 = __riscv_vand_vx_u8m1(q6_0, 0x0F, vl);
  6265. vuint8m1_t q6a_1 = __riscv_vand_vx_u8m1(q6_1, 0x0F, vl);
  6266. vuint8m1_t q6s_0 = __riscv_vsrl_vx_u8m1(q6_0, 0x04, vl);
  6267. vuint8m1_t q6s_1 = __riscv_vsrl_vx_u8m1(q6_1, 0x04, vl);
  6268. vuint8m1_t qh_0 = __riscv_vand_vx_u8m1(qh_x, 0x03, vl);
  6269. vuint8m1_t qh_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x2, vl), 0x03 , vl);
  6270. vuint8m1_t qh_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x4, vl), 0x03 , vl);
  6271. vuint8m1_t qh_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x6, vl), 0x03 , vl);
  6272. vuint8m1_t qhi_0 = __riscv_vor_vv_u8m1(q6a_0, __riscv_vsll_vx_u8m1(qh_0, 0x04, vl), vl);
  6273. vuint8m1_t qhi_1 = __riscv_vor_vv_u8m1(q6a_1, __riscv_vsll_vx_u8m1(qh_1, 0x04, vl), vl);
  6274. vuint8m1_t qhi_2 = __riscv_vor_vv_u8m1(q6s_0, __riscv_vsll_vx_u8m1(qh_2, 0x04, vl), vl);
  6275. vuint8m1_t qhi_3 = __riscv_vor_vv_u8m1(q6s_1, __riscv_vsll_vx_u8m1(qh_3, 0x04, vl), vl);
  6276. vint8m1_t a_0 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_0), 32, vl);
  6277. vint8m1_t a_1 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_1), 32, vl);
  6278. vint8m1_t a_2 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_2), 32, vl);
  6279. vint8m1_t a_3 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_3), 32, vl);
  6280. // load Q8 and take product
  6281. vint16m2_t va_q_0 = __riscv_vwmul_vv_i16m2(a_0, __riscv_vle8_v_i8m1(q8, vl), vl);
  6282. vint16m2_t va_q_1 = __riscv_vwmul_vv_i16m2(a_1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  6283. vint16m2_t va_q_2 = __riscv_vwmul_vv_i16m2(a_2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
  6284. vint16m2_t va_q_3 = __riscv_vwmul_vv_i16m2(a_3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
  6285. vl = 16;
  6286. vint32m2_t vaux_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 0), scale[is+0], vl);
  6287. vint32m2_t vaux_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 1), scale[is+1], vl);
  6288. vint32m2_t vaux_2 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 0), scale[is+2], vl);
  6289. vint32m2_t vaux_3 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 1), scale[is+3], vl);
  6290. vint32m2_t vaux_4 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 0), scale[is+4], vl);
  6291. vint32m2_t vaux_5 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 1), scale[is+5], vl);
  6292. vint32m2_t vaux_6 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 0), scale[is+6], vl);
  6293. vint32m2_t vaux_7 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 1), scale[is+7], vl);
  6294. vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_0, vaux_1, vl), vzero, vl);
  6295. vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_2, vaux_3, vl), isum0, vl);
  6296. vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_4, vaux_5, vl), isum1, vl);
  6297. vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_6, vaux_7, vl), isum2, vl);
  6298. sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
  6299. q6 += 64; qh += 32; q8 += 128; is=8;
  6300. }
  6301. sumf += d * sum_t;
  6302. }
  6303. *s = sumf;
  6304. #else
  6305. int8_t aux8[QK_K];
  6306. int16_t aux16[8];
  6307. float sums [8];
  6308. int32_t aux32[8];
  6309. memset(sums, 0, 8*sizeof(float));
  6310. float sumf = 0;
  6311. for (int i = 0; i < nb; ++i) {
  6312. const uint8_t * restrict q4 = x[i].ql;
  6313. const uint8_t * restrict qh = x[i].qh;
  6314. const int8_t * restrict q8 = y[i].qs;
  6315. memset(aux32, 0, 8*sizeof(int32_t));
  6316. int8_t * restrict a = aux8;
  6317. for (int j = 0; j < QK_K; j += 128) {
  6318. for (int l = 0; l < 32; ++l) {
  6319. a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  6320. a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  6321. a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  6322. a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  6323. }
  6324. a += 128;
  6325. q4 += 64;
  6326. qh += 32;
  6327. }
  6328. a = aux8;
  6329. int is = 0;
  6330. for (int j = 0; j < QK_K/16; ++j) {
  6331. int scale = x[i].scales[is++];
  6332. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6333. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6334. q8 += 8; a += 8;
  6335. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6336. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6337. q8 += 8; a += 8;
  6338. }
  6339. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6340. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  6341. }
  6342. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6343. *s = sumf;
  6344. #endif
  6345. }
  6346. #else
  6347. void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  6348. assert(n % QK_K == 0);
  6349. const block_q6_K * restrict x = vx;
  6350. const block_q8_K * restrict y = vy;
  6351. const int nb = n / QK_K;
  6352. #ifdef __ARM_NEON
  6353. float sum = 0;
  6354. const uint8x16_t m4b = vdupq_n_u8(0xF);
  6355. const int8x16_t m32s = vdupq_n_s8(32);
  6356. const int32x4_t vzero = vdupq_n_s32(0);
  6357. const uint8x16_t mone = vdupq_n_u8(3);
  6358. ggml_int8x16x4_t q6bytes;
  6359. ggml_uint8x16x4_t q6h;
  6360. for (int i = 0; i < nb; ++i) {
  6361. const float d_all = (float)x[i].d;
  6362. const uint8_t * restrict q6 = x[i].ql;
  6363. const uint8_t * restrict qh = x[i].qh;
  6364. const int8_t * restrict q8 = y[i].qs;
  6365. const int8_t * restrict scale = x[i].scales;
  6366. int32_t isum = 0;
  6367. uint8x16_t qhbits = vld1q_u8(qh);
  6368. ggml_uint8x16x2_t q6bits = ggml_vld1q_u8_x2(q6);
  6369. ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  6370. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits), 4);
  6371. uint8x16_t shifted = vshrq_n_u8(qhbits, 2);
  6372. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6373. shifted = vshrq_n_u8(qhbits, 4);
  6374. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6375. shifted = vshrq_n_u8(qhbits, 6);
  6376. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6377. q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
  6378. q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
  6379. q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[2])), m32s);
  6380. q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[3])), m32s);
  6381. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  6382. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  6383. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  6384. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  6385. sum += isum * d_all * y[i].d;
  6386. }
  6387. *s = sum;
  6388. #elif defined __AVX2__
  6389. const __m256i m4 = _mm256_set1_epi8(0xF);
  6390. const __m256i m2 = _mm256_set1_epi8(3);
  6391. const __m256i m32s = _mm256_set1_epi8(32);
  6392. __m256 acc = _mm256_setzero_ps();
  6393. for (int i = 0; i < nb; ++i) {
  6394. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6395. const uint8_t * restrict q4 = x[i].ql;
  6396. const uint8_t * restrict qh = x[i].qh;
  6397. const int8_t * restrict q8 = y[i].qs;
  6398. const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
  6399. const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
  6400. const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
  6401. const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
  6402. __m256i sumi = _mm256_setzero_si256();
  6403. const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
  6404. const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
  6405. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
  6406. const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
  6407. const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4);
  6408. const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4);
  6409. const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
  6410. const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_1);
  6411. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6412. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6413. __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
  6414. __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
  6415. __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
  6416. __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
  6417. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  6418. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  6419. p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
  6420. p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
  6421. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  6422. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  6423. }
  6424. *s = hsum_float_8(acc);
  6425. #elif defined __AVX__
  6426. const __m128i m4 = _mm_set1_epi8(0xF);
  6427. const __m128i m2 = _mm_set1_epi8(3);
  6428. const __m128i m32s = _mm_set1_epi8(32);
  6429. __m256 acc = _mm256_setzero_ps();
  6430. for (int i = 0; i < nb; ++i) {
  6431. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6432. const uint8_t * restrict q4 = x[i].ql;
  6433. const uint8_t * restrict qh = x[i].qh;
  6434. const int8_t * restrict q8 = y[i].qs;
  6435. const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
  6436. const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
  6437. const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
  6438. const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
  6439. __m128i sumi_0 = _mm_setzero_si128();
  6440. __m128i sumi_1 = _mm_setzero_si128();
  6441. const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
  6442. const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
  6443. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
  6444. const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
  6445. const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH, m2), 4);
  6446. const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 2), m2), 4);
  6447. const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 4), m2), 4);
  6448. const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 6), m2), 4);
  6449. const __m128i q4_0 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 0), m4), q4h_0);
  6450. const __m128i q4_1 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 1), m4), q4h_1);
  6451. const __m128i q4_2 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 0), 4), m4), q4h_2);
  6452. const __m128i q4_3 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 1), 4), m4), q4h_3);
  6453. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6454. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6455. __m128i q8s_0 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 0));
  6456. __m128i q8s_1 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 1));
  6457. __m128i q8s_2 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 0));
  6458. __m128i q8s_3 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 1));
  6459. __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
  6460. __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
  6461. __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
  6462. __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
  6463. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  6464. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  6465. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  6466. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  6467. p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
  6468. p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
  6469. p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
  6470. p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
  6471. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  6472. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  6473. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi_1, sumi_0))), acc);
  6474. }
  6475. *s = hsum_float_8(acc);
  6476. #elif defined __riscv_v_intrinsic
  6477. float sumf = 0;
  6478. for (int i = 0; i < nb; ++i) {
  6479. const float d_all = (float)x[i].d;
  6480. const uint8_t * restrict q6 = x[i].ql;
  6481. const uint8_t * restrict qh = x[i].qh;
  6482. const int8_t * restrict q8 = y[i].qs;
  6483. const int8_t * restrict scale = x[i].scales;
  6484. int32_t isum = 0;
  6485. size_t vl = 16;
  6486. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  6487. // load Q6
  6488. vuint8mf2_t q6_0 = __riscv_vle8_v_u8mf2(q6, vl);
  6489. vuint8mf2_t q6_1 = __riscv_vle8_v_u8mf2(q6+16, vl);
  6490. // load qh
  6491. vuint8mf2_t qh_x = __riscv_vle8_v_u8mf2(qh, vl);
  6492. vuint8mf2_t qh0 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  6493. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  6494. vuint8mf2_t qh1 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  6495. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  6496. vuint8mf2_t qh2 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  6497. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  6498. vuint8mf2_t qh3 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  6499. vuint8mf2_t q6h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_0, 0xF, vl), qh0, vl);
  6500. vuint8mf2_t q6h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_1, 0xF, vl), qh1, vl);
  6501. vuint8mf2_t q6h_2 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_0, 0x4, vl), qh2, vl);
  6502. vuint8mf2_t q6h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_1, 0x4, vl), qh3, vl);
  6503. vint8mf2_t q6v_0 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_0), 32, vl);
  6504. vint8mf2_t q6v_1 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_1), 32, vl);
  6505. vint8mf2_t q6v_2 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_2), 32, vl);
  6506. vint8mf2_t q6v_3 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_3), 32, vl);
  6507. // load Q8 and take product
  6508. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q6v_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  6509. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q6v_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  6510. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q6v_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  6511. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q6v_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  6512. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  6513. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  6514. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  6515. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  6516. isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scale[0];
  6517. isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scale[1];
  6518. isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scale[2];
  6519. isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scale[3];
  6520. sumf += isum * d_all * y[i].d;
  6521. }
  6522. *s = sumf;
  6523. #else
  6524. int8_t aux8[QK_K];
  6525. int16_t aux16[8];
  6526. float sums [8];
  6527. int32_t aux32[8];
  6528. memset(sums, 0, 8*sizeof(float));
  6529. float sumf = 0;
  6530. for (int i = 0; i < nb; ++i) {
  6531. const uint8_t * restrict q4 = x[i].ql;
  6532. const uint8_t * restrict qh = x[i].qh;
  6533. const int8_t * restrict q8 = y[i].qs;
  6534. memset(aux32, 0, 8*sizeof(int32_t));
  6535. int8_t * restrict a = aux8;
  6536. for (int l = 0; l < 16; ++l) {
  6537. a[l+ 0] = (int8_t)((q4[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  6538. a[l+16] = (int8_t)((q4[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  6539. a[l+32] = (int8_t)((q4[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  6540. a[l+48] = (int8_t)((q4[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  6541. }
  6542. int is = 0;
  6543. for (int j = 0; j < QK_K/16; ++j) {
  6544. int scale = x[i].scales[is++];
  6545. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6546. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6547. q8 += 8; a += 8;
  6548. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6549. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6550. q8 += 8; a += 8;
  6551. }
  6552. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6553. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  6554. }
  6555. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6556. *s = sumf;
  6557. #endif
  6558. }
  6559. #endif
  6560. static const int8_t keven_signs_q2xs[1024] = {
  6561. 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1,
  6562. 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1,
  6563. 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1,
  6564. 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1,
  6565. 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1,
  6566. 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1,
  6567. 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1,
  6568. 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1,
  6569. 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1,
  6570. 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1,
  6571. 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1,
  6572. 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1,
  6573. 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1,
  6574. 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1,
  6575. 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1,
  6576. 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1,
  6577. 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1,
  6578. 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1,
  6579. 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1,
  6580. 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1,
  6581. 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1,
  6582. 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1,
  6583. 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1,
  6584. 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1,
  6585. 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1,
  6586. 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
  6587. 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,
  6588. 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1,
  6589. 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1,
  6590. 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1,
  6591. 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,
  6592. 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1,
  6593. };
  6594. void ggml_vec_dot_iq2_xxs_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  6595. assert(n % QK_K == 0);
  6596. const block_iq2_xxs * restrict x = vx;
  6597. const block_q8_K * restrict y = vy;
  6598. const int nb = n / QK_K;
  6599. #if defined(__ARM_NEON)
  6600. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  6601. uint32_t aux32[4];
  6602. const uint8_t * aux8 = (const uint8_t *)aux32;
  6603. ggml_int8x16x4_t q2u;
  6604. ggml_int8x16x4_t q2s;
  6605. ggml_int8x16x4_t q8b;
  6606. float sumf = 0;
  6607. for (int i = 0; i < nb; ++i) {
  6608. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6609. const uint16_t * restrict q2 = x[i].qs;
  6610. const int8_t * restrict q8 = y[i].qs;
  6611. float sumf1 = 0, sumf2 = 0;
  6612. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  6613. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  6614. memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8;
  6615. q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 0])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 1])));
  6616. q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 2])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 3])));
  6617. q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 8])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 9])));
  6618. q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[10])), vld1_s8((const void *)(iq2xxs_grid + aux8[11])));
  6619. q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127))));
  6620. q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127))));
  6621. q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 7) & 127))));
  6622. q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 21) & 127))));
  6623. q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]);
  6624. q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]);
  6625. q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]);
  6626. q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]);
  6627. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]);
  6628. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]);
  6629. sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28));
  6630. sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28));
  6631. }
  6632. sumf += d*(sumf1 + sumf2);
  6633. }
  6634. *s = 0.25f * sumf;
  6635. #elif defined(__AVX2__)
  6636. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  6637. uint32_t aux32[4];
  6638. const uint8_t * aux8 = (const uint8_t *)aux32;
  6639. __m256 accumf = _mm256_setzero_ps();
  6640. for (int i = 0; i < nb; ++i) {
  6641. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6642. const uint16_t * restrict q2 = x[i].qs;
  6643. const int8_t * restrict q8 = y[i].qs;
  6644. __m256i sumi1 = _mm256_setzero_si256();
  6645. __m256i sumi2 = _mm256_setzero_si256();
  6646. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  6647. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  6648. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  6649. memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8;
  6650. const __m256i q2_1 = _mm256_set_epi64x(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]);
  6651. const __m256i q2_2 = _mm256_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]);
  6652. const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127],
  6653. signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]);
  6654. const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127],
  6655. signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]);
  6656. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1);
  6657. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2);
  6658. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  6659. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  6660. const uint16_t ls1 = aux32[1] >> 28;
  6661. const uint16_t ls2 = aux32[3] >> 28;
  6662. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
  6663. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
  6664. sumi1 = _mm256_add_epi32(sumi1, p1);
  6665. sumi2 = _mm256_add_epi32(sumi2, p2);
  6666. }
  6667. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  6668. }
  6669. *s = 0.125f * hsum_float_8(accumf);
  6670. #else
  6671. uint32_t aux32[2];
  6672. const uint8_t * aux8 = (const uint8_t *)aux32;
  6673. float sumf = 0.f;
  6674. for (int i = 0; i < nb; ++i) {
  6675. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6676. const uint16_t * restrict q2 = x[i].qs;
  6677. const int8_t * restrict q8 = y[i].qs;
  6678. int32_t bsum = 0;
  6679. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  6680. memcpy(aux32, q2, 2*sizeof(uint32_t));
  6681. q2 += 4;
  6682. const uint32_t ls = 2*(aux32[1] >> 28) + 1;
  6683. int32_t sumi = 0;
  6684. for (int l = 0; l < 4; ++l) {
  6685. const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
  6686. const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
  6687. for (int j = 0; j < 8; ++j) {
  6688. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  6689. }
  6690. q8 += 8;
  6691. }
  6692. bsum += sumi * ls;
  6693. }
  6694. sumf += d * bsum;
  6695. }
  6696. *s = 0.125f * sumf;
  6697. #endif
  6698. }
  6699. void ggml_vec_dot_iq2_xs_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  6700. assert(n % QK_K == 0);
  6701. const block_iq2_xs * restrict x = vx;
  6702. const block_q8_K * restrict y = vy;
  6703. const int nb = n / QK_K;
  6704. #if defined(__ARM_NEON)
  6705. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  6706. ggml_int8x16x4_t q2u;
  6707. ggml_int8x16x4_t q2s;
  6708. ggml_int8x16x4_t q8b;
  6709. int32x4x4_t scales32;
  6710. float sumf = 0;
  6711. for (int i = 0; i < nb; ++i) {
  6712. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6713. const uint16_t * restrict q2 = x[i].qs;
  6714. const int8_t * restrict q8 = y[i].qs;
  6715. const uint8x8_t scales8 = vld1_u8(x[i].scales);
  6716. const uint8x8_t scales_l = vand_u8(scales8, vdup_n_u8(0xf));
  6717. const uint8x8_t scales_h = vshr_n_u8(scales8, 4);
  6718. uint8x16_t scales = vcombine_u8(vzip1_u8(scales_l, scales_h), vzip2_u8(scales_l, scales_h));
  6719. scales = vaddq_u8(vshlq_n_u8(scales, 1), vdupq_n_u8(1));
  6720. const uint16x8_t scales1 = vmovl_u8(vget_low_u8(scales));
  6721. const uint16x8_t scales2 = vmovl_u8(vget_high_u8(scales));
  6722. scales32.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales1)));
  6723. scales32.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales1)));
  6724. scales32.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales2)));
  6725. scales32.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales2)));
  6726. int32x4_t sumi = vdupq_n_s32(0);
  6727. for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
  6728. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  6729. q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[0] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[1] & 511))));
  6730. q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[2] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[3] & 511))));
  6731. q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[4] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[5] & 511))));
  6732. q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[6] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[7] & 511))));
  6733. q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[0] >> 9))), vld1_s8((const void *)(signs64 + (q2[1] >> 9))));
  6734. q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[2] >> 9))), vld1_s8((const void *)(signs64 + (q2[3] >> 9))));
  6735. q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[4] >> 9))), vld1_s8((const void *)(signs64 + (q2[5] >> 9))));
  6736. q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[6] >> 9))), vld1_s8((const void *)(signs64 + (q2[7] >> 9))));
  6737. q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]);
  6738. q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]);
  6739. q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]);
  6740. q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]);
  6741. const int32x4_t p1 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]);
  6742. const int32x4_t p2 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[1], q8b.val[1]);
  6743. const int32x4_t p3 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]);
  6744. const int32x4_t p4 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[3], q8b.val[3]);
  6745. const int32x4_t p = vpaddq_s32(vpaddq_s32(p1, p2), vpaddq_s32(p3, p4));
  6746. sumi = vmlaq_s32(sumi, p, scales32.val[ib64]);
  6747. q2 += 8;
  6748. }
  6749. sumf += d*vaddvq_s32(sumi);
  6750. }
  6751. *s = 0.125f * sumf;
  6752. #elif defined(__AVX2__)
  6753. const __m128i m4 = _mm_set1_epi8(0xf);
  6754. const __m128i m1 = _mm_set1_epi8(1);
  6755. const __m256i m511 = _mm256_set1_epi16(511);
  6756. const __m256i mone = _mm256_set1_epi8(1);
  6757. static const uint8_t k_bit_helper[32] = {
  6758. 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
  6759. 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
  6760. };
  6761. static const char block_sign_shuffle_mask_1[32] = {
  6762. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
  6763. 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
  6764. };
  6765. static const char block_sign_shuffle_mask_2[32] = {
  6766. 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
  6767. 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
  6768. };
  6769. static const uint8_t bit_selector_mask_bytes[32] = {
  6770. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  6771. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  6772. };
  6773. const __m256i bit_helper = _mm256_loadu_si256((const __m256i*)k_bit_helper);
  6774. const __m256i bit_selector_mask = _mm256_loadu_si256((const __m256i*)bit_selector_mask_bytes);
  6775. const __m256i block_sign_shuffle_1 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_1);
  6776. const __m256i block_sign_shuffle_2 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_2);
  6777. uint64_t aux64;
  6778. // somewhat hacky, but gives a significant boost in performance
  6779. __m256i aux_gindex;
  6780. const uint16_t * gindex = (const uint16_t *)&aux_gindex;
  6781. __m256 accumf = _mm256_setzero_ps();
  6782. for (int i = 0; i < nb; ++i) {
  6783. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6784. const uint16_t * restrict q2 = x[i].qs;
  6785. const int8_t * restrict q8 = y[i].qs;
  6786. memcpy(&aux64, x[i].scales, 8);
  6787. __m128i stmp = _mm_set1_epi64x(aux64);
  6788. stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4));
  6789. const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1);
  6790. __m256i sumi1 = _mm256_setzero_si256();
  6791. __m256i sumi2 = _mm256_setzero_si256();
  6792. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) {
  6793. const __m256i q2_data = _mm256_loadu_si256((const __m256i*)q2); q2 += 16;
  6794. aux_gindex = _mm256_and_si256(q2_data, m511);
  6795. const __m256i partial_sign_bits = _mm256_srli_epi16(q2_data, 9);
  6796. const __m256i partial_sign_bits_upper = _mm256_srli_epi16(q2_data, 13);
  6797. const __m256i partial_sign_bits_for_counting = _mm256_xor_si256(partial_sign_bits, partial_sign_bits_upper);
  6798. const __m256i odd_bits = _mm256_shuffle_epi8(bit_helper, partial_sign_bits_for_counting);
  6799. const __m256i full_sign_bits = _mm256_or_si256(partial_sign_bits, odd_bits);
  6800. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  6801. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  6802. const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  6803. const __m256i q8_4 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  6804. const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]],
  6805. iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]);
  6806. const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]],
  6807. iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]);
  6808. const __m256i q2_3 = _mm256_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]],
  6809. iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]);
  6810. const __m256i q2_4 = _mm256_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]],
  6811. iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]);
  6812. const __m128i full_signs_l = _mm256_castsi256_si128(full_sign_bits);
  6813. const __m128i full_signs_h = _mm256_extractf128_si256(full_sign_bits, 1);
  6814. const __m256i full_signs_1 = _mm256_set_m128i(full_signs_l, full_signs_l);
  6815. const __m256i full_signs_2 = _mm256_set_m128i(full_signs_h, full_signs_h);
  6816. __m256i signs;
  6817. signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_1);
  6818. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  6819. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone));
  6820. signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_2);
  6821. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  6822. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone));
  6823. signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_1);
  6824. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  6825. const __m256i q8s_3 = _mm256_sign_epi8(q8_3, _mm256_or_si256(signs, mone));
  6826. signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_2);
  6827. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  6828. const __m256i q8s_4 = _mm256_sign_epi8(q8_4, _mm256_or_si256(signs, mone));
  6829. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  6830. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  6831. const __m256i dot3 = _mm256_maddubs_epi16(q2_3, q8s_3);
  6832. const __m256i dot4 = _mm256_maddubs_epi16(q2_4, q8s_4);
  6833. const __m256i sc1 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0)));
  6834. const __m256i sc2 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1)));
  6835. const __m256i sc3 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2)));
  6836. const __m256i sc4 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3)));
  6837. sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot1, sc1));
  6838. sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot2, sc2));
  6839. sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot3, sc3));
  6840. sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot4, sc4));
  6841. }
  6842. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  6843. }
  6844. *s = 0.125f * hsum_float_8(accumf);
  6845. #else
  6846. float sumf = 0.f;
  6847. for (int i = 0; i < nb; ++i) {
  6848. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6849. const uint16_t * restrict q2 = x[i].qs;
  6850. const uint8_t * restrict sc = x[i].scales;
  6851. const int8_t * restrict q8 = y[i].qs;
  6852. int32_t bsum = 0;
  6853. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  6854. const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1;
  6855. const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1;
  6856. int32_t sumi = 0;
  6857. for (int l = 0; l < 2; ++l) {
  6858. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
  6859. const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
  6860. for (int j = 0; j < 8; ++j) {
  6861. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  6862. }
  6863. q8 += 8;
  6864. }
  6865. bsum += sumi * ls1;
  6866. sumi = 0;
  6867. for (int l = 2; l < 4; ++l) {
  6868. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
  6869. const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
  6870. for (int j = 0; j < 8; ++j) {
  6871. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  6872. }
  6873. q8 += 8;
  6874. }
  6875. bsum += sumi * ls2;
  6876. q2 += 4;
  6877. }
  6878. sumf += d * bsum;
  6879. }
  6880. *s = 0.125f * sumf;
  6881. #endif
  6882. }
  6883. // TODO
  6884. void ggml_vec_dot_iq3_xxs_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  6885. assert(n % QK_K == 0);
  6886. const block_iq3_xxs * restrict x = vx;
  6887. const block_q8_K * restrict y = vy;
  6888. const int nb = n / QK_K;
  6889. #if defined(__ARM_NEON)
  6890. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  6891. uint32_t aux32[2];
  6892. ggml_int8x16x4_t q3s;
  6893. ggml_int8x16x4_t q8b;
  6894. float sumf = 0;
  6895. for (int i = 0; i < nb; ++i) {
  6896. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6897. const uint8_t * restrict q3 = x[i].qs;
  6898. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  6899. const int8_t * restrict q8 = y[i].qs;
  6900. float sumf1 = 0, sumf2 = 0;
  6901. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  6902. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  6903. memcpy(aux32, gas, 2*sizeof(uint32_t)); gas += 2*sizeof(uint32_t);
  6904. const uint32x4_t aux32x4_0 = {iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]};
  6905. const uint32x4_t aux32x4_1 = {iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]};
  6906. const uint32x4_t aux32x4_2 = {iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]};
  6907. const uint32x4_t aux32x4_3 = {iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]};
  6908. q3 += 16;
  6909. q3s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 7) & 127))));
  6910. q3s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 21) & 127))));
  6911. q3s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127))));
  6912. q3s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127))));
  6913. q3s.val[0] = vmulq_s8(q3s.val[0], vreinterpretq_s8_u32(aux32x4_0));
  6914. q3s.val[1] = vmulq_s8(q3s.val[1], vreinterpretq_s8_u32(aux32x4_1));
  6915. q3s.val[2] = vmulq_s8(q3s.val[2], vreinterpretq_s8_u32(aux32x4_2));
  6916. q3s.val[3] = vmulq_s8(q3s.val[3], vreinterpretq_s8_u32(aux32x4_3));
  6917. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]);
  6918. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]);
  6919. sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[0] >> 28));
  6920. sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[1] >> 28));
  6921. }
  6922. sumf += d*(sumf1 + sumf2);
  6923. }
  6924. *s = 0.5f * sumf;
  6925. #elif defined(__AVX2__)
  6926. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  6927. uint32_t aux32[2];
  6928. __m256 accumf = _mm256_setzero_ps();
  6929. for (int i = 0; i < nb; ++i) {
  6930. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6931. const uint8_t * restrict q3 = x[i].qs;
  6932. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  6933. const int8_t * restrict q8 = y[i].qs;
  6934. __m256i sumi1 = _mm256_setzero_si256();
  6935. __m256i sumi2 = _mm256_setzero_si256();
  6936. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  6937. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  6938. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  6939. const __m256i q2_1 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]],
  6940. iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]);
  6941. q3 += 8;
  6942. const __m256i q2_2 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]],
  6943. iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]);
  6944. q3 += 8;
  6945. memcpy(aux32, gas, 8); gas += 8;
  6946. const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127],
  6947. signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]);
  6948. const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127],
  6949. signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]);
  6950. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1);
  6951. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2);
  6952. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  6953. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  6954. const uint16_t ls1 = aux32[0] >> 28;
  6955. const uint16_t ls2 = aux32[1] >> 28;
  6956. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
  6957. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
  6958. sumi1 = _mm256_add_epi32(sumi1, p1);
  6959. sumi2 = _mm256_add_epi32(sumi2, p2);
  6960. }
  6961. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  6962. }
  6963. *s = 0.25f * hsum_float_8(accumf);
  6964. #else
  6965. uint32_t aux32;
  6966. float sumf = 0.f;
  6967. for (int i = 0; i < nb; ++i) {
  6968. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6969. const uint8_t * restrict q3 = x[i].qs;
  6970. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  6971. const int8_t * restrict q8 = y[i].qs;
  6972. int32_t bsum = 0;
  6973. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  6974. memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t);
  6975. const uint32_t ls = 2*(aux32 >> 28) + 1;
  6976. int32_t sumi = 0;
  6977. for (int l = 0; l < 4; ++l) {
  6978. const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]);
  6979. const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]);
  6980. const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
  6981. for (int j = 0; j < 4; ++j) {
  6982. sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1);
  6983. sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1);
  6984. }
  6985. q8 += 8;
  6986. }
  6987. q3 += 8;
  6988. bsum += sumi * ls;
  6989. }
  6990. sumf += d * bsum;
  6991. }
  6992. *s = 0.25f * sumf;
  6993. #endif
  6994. }
  6995. // ================================ IQ2 quantization =============================================
  6996. typedef struct {
  6997. uint64_t * grid;
  6998. int * map;
  6999. uint16_t * neighbours;
  7000. } iq2_entry_t;
  7001. static iq2_entry_t iq2_data[2] = {
  7002. {NULL, NULL, NULL},
  7003. {NULL, NULL, NULL},
  7004. };
  7005. static inline int iq2_data_index(int grid_size) {
  7006. GGML_ASSERT(grid_size == 256 || grid_size == 512);
  7007. return grid_size == 256 ? 0 : 1;
  7008. }
  7009. static int iq2_compare_func(const void * left, const void * right) {
  7010. const int * l = (const int *)left;
  7011. const int * r = (const int *)right;
  7012. return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0;
  7013. }
  7014. void iq2xs_init_impl(int grid_size) {
  7015. const int gindex = iq2_data_index(grid_size);
  7016. if (iq2_data[gindex].grid) {
  7017. return;
  7018. }
  7019. static const uint16_t kgrid_256[256] = {
  7020. 0, 2, 5, 8, 10, 17, 20, 32, 34, 40, 42, 65, 68, 80, 88, 97,
  7021. 100, 128, 130, 138, 162, 257, 260, 272, 277, 320, 388, 408, 512, 514, 546, 642,
  7022. 1025, 1028, 1040, 1057, 1060, 1088, 1090, 1096, 1120, 1153, 1156, 1168, 1188, 1280, 1282, 1288,
  7023. 1312, 1350, 1385, 1408, 1425, 1545, 1552, 1600, 1668, 1700, 2048, 2053, 2056, 2068, 2088, 2113,
  7024. 2116, 2128, 2130, 2184, 2308, 2368, 2562, 2580, 4097, 4100, 4112, 4129, 4160, 4192, 4228, 4240,
  7025. 4245, 4352, 4360, 4384, 4432, 4442, 4480, 4644, 4677, 5120, 5128, 5152, 5157, 5193, 5248, 5400,
  7026. 5474, 5632, 5654, 6145, 6148, 6160, 6208, 6273, 6400, 6405, 6560, 6737, 8192, 8194, 8202, 8260,
  7027. 8289, 8320, 8322, 8489, 8520, 8704, 8706, 9217, 9220, 9232, 9280, 9302, 9472, 9537, 9572, 9872,
  7028. 10248, 10272, 10388, 10820, 16385, 16388, 16400, 16408, 16417, 16420, 16448, 16456, 16470, 16480, 16513, 16516,
  7029. 16528, 16640, 16672, 16737, 16768, 16773, 16897, 16912, 16968, 16982, 17000, 17408, 17416, 17440, 17536, 17561,
  7030. 17682, 17700, 17920, 18433, 18436, 18448, 18496, 18501, 18688, 18776, 18785, 18818, 19013, 19088, 20480, 20488,
  7031. 20497, 20505, 20512, 20608, 20616, 20740, 20802, 20900, 21137, 21648, 21650, 21770, 22017, 22100, 22528, 22545,
  7032. 22553, 22628, 22848, 23048, 24580, 24592, 24640, 24680, 24832, 24917, 25112, 25184, 25600, 25605, 25872, 25874,
  7033. 25988, 26690, 32768, 32770, 32778, 32833, 32898, 33028, 33048, 33088, 33297, 33793, 33796, 33808, 33813, 33856,
  7034. 33888, 34048, 34118, 34196, 34313, 34368, 34400, 34818, 35076, 35345, 36868, 36880, 36900, 36928, 37025, 37142,
  7035. 37248, 37445, 37888, 37922, 37956, 38225, 39041, 39200, 40962, 41040, 41093, 41225, 41472, 42008, 43088, 43268,
  7036. };
  7037. static const uint16_t kgrid_512[512] = {
  7038. 0, 2, 5, 8, 10, 17, 20, 22, 25, 32, 34, 37, 40, 65, 68, 70,
  7039. 73, 80, 82, 85, 88, 97, 100, 128, 130, 133, 136, 145, 148, 153, 160, 257,
  7040. 260, 262, 265, 272, 274, 277, 280, 282, 289, 292, 320, 322, 325, 328, 337, 340,
  7041. 352, 360, 385, 388, 400, 512, 514, 517, 520, 529, 532, 544, 577, 580, 592, 597,
  7042. 640, 650, 1025, 1028, 1030, 1033, 1040, 1042, 1045, 1048, 1057, 1060, 1088, 1090, 1093, 1096,
  7043. 1105, 1108, 1110, 1120, 1153, 1156, 1168, 1280, 1282, 1285, 1288, 1297, 1300, 1312, 1345, 1348,
  7044. 1360, 1377, 1408, 1537, 1540, 1552, 1574, 1600, 1602, 1668, 2048, 2050, 2053, 2056, 2058, 2065,
  7045. 2068, 2080, 2085, 2113, 2116, 2128, 2136, 2176, 2208, 2218, 2305, 2308, 2320, 2368, 2433, 2441,
  7046. 2560, 2592, 2600, 2710, 2720, 4097, 4100, 4102, 4105, 4112, 4114, 4117, 4120, 4129, 4132, 4160,
  7047. 4162, 4165, 4168, 4177, 4180, 4192, 4202, 4225, 4228, 4240, 4352, 4354, 4357, 4360, 4369, 4372,
  7048. 4384, 4417, 4420, 4432, 4480, 4500, 4502, 4609, 4612, 4614, 4624, 4672, 4704, 5120, 5122, 5125,
  7049. 5128, 5137, 5140, 5152, 5185, 5188, 5193, 5200, 5220, 5248, 5377, 5380, 5392, 5440, 5632, 5652,
  7050. 5705, 6145, 6148, 6160, 6162, 6208, 6228, 6278, 6400, 6405, 6502, 6737, 6825, 8192, 8194, 8197,
  7051. 8200, 8202, 8209, 8212, 8224, 8257, 8260, 8272, 8320, 8352, 8449, 8452, 8464, 8512, 8520, 8549,
  7052. 8704, 8738, 8832, 8872, 9217, 9220, 9232, 9257, 9280, 9472, 9537, 9554, 9625, 9729, 9754, 9894,
  7053. 10240, 10248, 10250, 10272, 10325, 10376, 10402, 10600, 10640, 10760, 10784, 10882, 10888, 10890, 16385, 16388,
  7054. 16390, 16393, 16400, 16402, 16405, 16408, 16417, 16420, 16448, 16450, 16453, 16456, 16458, 16465, 16468, 16480,
  7055. 16485, 16513, 16516, 16528, 16640, 16642, 16645, 16648, 16657, 16660, 16672, 16705, 16708, 16720, 16768, 16773,
  7056. 16802, 16897, 16900, 16912, 16914, 16937, 16960, 17408, 17410, 17413, 17416, 17425, 17428, 17433, 17440, 17473,
  7057. 17476, 17488, 17536, 17556, 17665, 17668, 17680, 17700, 17728, 17818, 17920, 17930, 17988, 18000, 18433, 18436,
  7058. 18448, 18496, 18501, 18516, 18530, 18688, 18705, 18756, 18768, 18793, 18948, 20480, 20482, 20485, 20488, 20497,
  7059. 20500, 20512, 20520, 20545, 20548, 20560, 20608, 20737, 20740, 20752, 20757, 20800, 20802, 20992, 21060, 21162,
  7060. 21505, 21508, 21520, 21537, 21568, 21600, 21633, 21665, 21760, 21768, 21888, 21896, 22049, 22120, 22177, 22528,
  7061. 22548, 22593, 22608, 22681, 22810, 22848, 22850, 23173, 24577, 24580, 24592, 24640, 24660, 24674, 24710, 24745,
  7062. 24832, 25124, 25162, 25234, 25600, 25622, 25872, 25920, 25925, 26020, 26625, 26730, 26917, 27142, 27220, 27234,
  7063. 32768, 32770, 32773, 32776, 32785, 32788, 32800, 32810, 32833, 32836, 32848, 32896, 32898, 32936, 32938, 33025,
  7064. 33028, 33030, 33040, 33088, 33105, 33113, 33280, 33312, 33408, 33410, 33440, 33448, 33793, 33796, 33808, 33810,
  7065. 33813, 33856, 33888, 33929, 34048, 34116, 34213, 34328, 34410, 34816, 34824, 34853, 34906, 34944, 34946, 34984,
  7066. 35078, 35362, 35456, 35464, 35478, 35496, 36865, 36868, 36880, 36928, 36950, 36996, 37120, 37154, 37220, 37462,
  7067. 37513, 37888, 37893, 37956, 37968, 37976, 38185, 38288, 38290, 38465, 38993, 39078, 39241, 39445, 39520, 40960,
  7068. 40962, 40968, 40970, 40992, 41002, 41120, 41297, 41305, 41382, 41472, 41474, 41480, 41514, 41600, 41632, 42048,
  7069. 42133, 42597, 42648, 43018, 43040, 43042, 43048, 43168, 43176, 43268, 43396, 43398, 43560, 43562, 43665, 43690,
  7070. };
  7071. const int kmap_size = 43692;
  7072. const int nwant = 2;
  7073. const uint16_t * kgrid = grid_size == 256 ? kgrid_256 : kgrid_512;
  7074. uint64_t * kgrid_q2xs;
  7075. int * kmap_q2xs;
  7076. uint16_t * kneighbors_q2xs;
  7077. printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size);
  7078. uint64_t * the_grid = (uint64_t *)malloc(grid_size*sizeof(uint64_t));
  7079. for (int k = 0; k < grid_size; ++k) {
  7080. int8_t * pos = (int8_t *)(the_grid + k);
  7081. for (int i = 0; i < 8; ++i) {
  7082. int l = (kgrid[k] >> 2*i) & 0x3;
  7083. pos[i] = 2*l + 1;
  7084. }
  7085. }
  7086. kgrid_q2xs = the_grid;
  7087. iq2_data[gindex].grid = the_grid;
  7088. kmap_q2xs = (int *)malloc(kmap_size*sizeof(int));
  7089. iq2_data[gindex].map = kmap_q2xs;
  7090. for (int i = 0; i < kmap_size; ++i) kmap_q2xs[i] = -1;
  7091. uint64_t aux64;
  7092. uint8_t * aux8 = (uint8_t *)&aux64;
  7093. for (int i = 0; i < grid_size; ++i) {
  7094. aux64 = kgrid_q2xs[i];
  7095. uint16_t index = 0;
  7096. for (int k=0; k<8; ++k) {
  7097. uint16_t q = (aux8[k] - 1)/2;
  7098. index |= (q << 2*k);
  7099. }
  7100. kmap_q2xs[index] = i;
  7101. }
  7102. int8_t pos[8];
  7103. int * dist2 = (int *)malloc(2*grid_size*sizeof(int));
  7104. int num_neighbors = 0, num_not_in_map = 0;
  7105. for (int i = 0; i < kmap_size; ++i) {
  7106. if (kmap_q2xs[i] >= 0) continue;
  7107. ++num_not_in_map;
  7108. for (int k = 0; k < 8; ++k) {
  7109. int l = (i >> 2*k) & 0x3;
  7110. pos[k] = 2*l + 1;
  7111. }
  7112. for (int j = 0; j < grid_size; ++j) {
  7113. const int8_t * pg = (const int8_t *)(kgrid_q2xs + j);
  7114. int d2 = 0;
  7115. for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  7116. dist2[2*j+0] = d2;
  7117. dist2[2*j+1] = j;
  7118. }
  7119. qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func);
  7120. int n = 0; int d2 = dist2[0];
  7121. int nhave = 1;
  7122. for (int j = 0; j < grid_size; ++j) {
  7123. if (dist2[2*j] > d2) {
  7124. if (nhave == nwant) break;
  7125. d2 = dist2[2*j];
  7126. ++nhave;
  7127. }
  7128. ++n;
  7129. }
  7130. num_neighbors += n;
  7131. }
  7132. printf("%s: %d neighbours in total\n", __func__, num_neighbors);
  7133. kneighbors_q2xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t));
  7134. iq2_data[gindex].neighbours = kneighbors_q2xs;
  7135. int counter = 0;
  7136. for (int i = 0; i < kmap_size; ++i) {
  7137. if (kmap_q2xs[i] >= 0) continue;
  7138. for (int k = 0; k < 8; ++k) {
  7139. int l = (i >> 2*k) & 0x3;
  7140. pos[k] = 2*l + 1;
  7141. }
  7142. for (int j = 0; j < grid_size; ++j) {
  7143. const int8_t * pg = (const int8_t *)(kgrid_q2xs + j);
  7144. int d2 = 0;
  7145. for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  7146. dist2[2*j+0] = d2;
  7147. dist2[2*j+1] = j;
  7148. }
  7149. qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func);
  7150. kmap_q2xs[i] = -(counter + 1);
  7151. int d2 = dist2[0];
  7152. uint16_t * start = &kneighbors_q2xs[counter++];
  7153. int n = 0, nhave = 1;
  7154. for (int j = 0; j < grid_size; ++j) {
  7155. if (dist2[2*j] > d2) {
  7156. if (nhave == nwant) break;
  7157. d2 = dist2[2*j];
  7158. ++nhave;
  7159. }
  7160. kneighbors_q2xs[counter++] = dist2[2*j+1];
  7161. ++n;
  7162. }
  7163. *start = n;
  7164. }
  7165. free(dist2);
  7166. }
  7167. void iq2xs_free_impl(int grid_size) {
  7168. GGML_ASSERT(grid_size == 256 || grid_size == 512 || grid_size == 1024);
  7169. const int gindex = iq2_data_index(grid_size);
  7170. if (iq2_data[gindex].grid) {
  7171. free(iq2_data[gindex].grid); iq2_data[gindex].grid = NULL;
  7172. free(iq2_data[gindex].map); iq2_data[gindex].map = NULL;
  7173. free(iq2_data[gindex].neighbours); iq2_data[gindex].neighbours = NULL;
  7174. }
  7175. }
  7176. static int iq2_find_best_neighbour(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
  7177. const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) {
  7178. int num_neighbors = neighbours[0];
  7179. GGML_ASSERT(num_neighbors > 0);
  7180. float best_d2 = FLT_MAX;
  7181. int grid_index = -1;
  7182. for (int j = 1; j <= num_neighbors; ++j) {
  7183. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  7184. float d2 = 0;
  7185. for (int i = 0; i < 8; ++i) {
  7186. float q = pg[i];
  7187. float diff = scale*q - xval[i];
  7188. d2 += weight[i]*diff*diff;
  7189. }
  7190. if (d2 < best_d2) {
  7191. best_d2 = d2; grid_index = neighbours[j];
  7192. }
  7193. }
  7194. GGML_ASSERT(grid_index >= 0);
  7195. const int8_t * pg = (const int8_t *)(grid + grid_index);
  7196. for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
  7197. return grid_index;
  7198. }
  7199. static void quantize_row_iq2_xxs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
  7200. const int gindex = iq2_data_index(256);
  7201. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  7202. const int * kmap_q2xs = iq2_data[gindex].map;
  7203. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  7204. GGML_ASSERT(quant_weights && "missing quantization weights");
  7205. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  7206. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  7207. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  7208. GGML_ASSERT(n%QK_K == 0);
  7209. const int kMaxQ = 3;
  7210. const int nbl = n/256;
  7211. block_iq2_xxs * y = vy;
  7212. float scales[QK_K/32];
  7213. float weight[32];
  7214. float xval[32];
  7215. int8_t L[32];
  7216. int8_t Laux[32];
  7217. float waux[32];
  7218. bool is_on_grid[4];
  7219. bool is_on_grid_aux[4];
  7220. uint8_t block_signs[4];
  7221. uint32_t q2[2*(QK_K/32)];
  7222. for (int ibl = 0; ibl < nbl; ++ibl) {
  7223. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  7224. memset(q2, 0, QK_K/4);
  7225. float max_scale = 0;
  7226. const float * xbl = x + QK_K*ibl;
  7227. float sumx2 = 0;
  7228. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  7229. float sigma2 = sumx2/QK_K;
  7230. for (int ib = 0; ib < QK_K/32; ++ib) {
  7231. const float * xb = xbl + 32*ib;
  7232. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  7233. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  7234. for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
  7235. for (int k = 0; k < 4; ++k) {
  7236. int nflip = 0;
  7237. uint8_t s = 0;
  7238. for (int i = 0; i < 8; ++i) {
  7239. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  7240. else {
  7241. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  7242. }
  7243. }
  7244. if (nflip%2) {
  7245. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  7246. for (int i = 1; i < 8; ++i) {
  7247. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  7248. if (ax < min) {
  7249. min = ax; imin = i;
  7250. }
  7251. }
  7252. xval[8*k+imin] = -xval[8*k+imin];
  7253. s ^= (1 << imin);
  7254. }
  7255. block_signs[k] = s & 127;
  7256. }
  7257. float max = xval[0];
  7258. for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]);
  7259. if (!max) {
  7260. scales[ib] = 0;
  7261. memset(L, 0, 32);
  7262. continue;
  7263. }
  7264. float best = 0;
  7265. float scale = max/(2*kMaxQ-1);
  7266. for (int is = -9; is <= 9; ++is) {
  7267. float id = (2*kMaxQ-1+is*0.1f)/max;
  7268. float this_scale = 1/id;
  7269. for (int k = 0; k < 4; ++k) {
  7270. for (int i = 0; i < 8; ++i) {
  7271. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  7272. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  7273. }
  7274. uint16_t u = 0;
  7275. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  7276. int grid_index = kmap_q2xs[u];
  7277. is_on_grid_aux[k] = true;
  7278. if (grid_index < 0) {
  7279. is_on_grid_aux[k] = false;
  7280. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  7281. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  7282. }
  7283. }
  7284. float sumqx = 0, sumq2 = 0;
  7285. for (int i = 0; i < 32; ++i) {
  7286. float w = weight[i];
  7287. float q = 2*Laux[i] + 1;
  7288. sumqx += w*xval[i]*q;
  7289. sumq2 += w*q*q;
  7290. }
  7291. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  7292. scale = sumqx/sumq2; best = scale*sumqx;
  7293. for (int i = 0; i < 32; ++i) L[i] = Laux[i];
  7294. for (int k = 0; k < 4; ++k) is_on_grid[k] = is_on_grid_aux[k];
  7295. }
  7296. }
  7297. int n_not_ongrid = 0;
  7298. for (int k = 0; k < 4; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  7299. if (n_not_ongrid > 0 && scale > 0) {
  7300. float id = 1/scale;
  7301. for (int k = 0; k < 4; ++k) {
  7302. if (is_on_grid[k]) continue;
  7303. uint16_t u = 0;
  7304. for (int i = 0; i < 8; ++i) {
  7305. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  7306. l = MAX(0, MIN(kMaxQ-1, l));
  7307. u |= (l << 2*i);
  7308. }
  7309. int grid_index = kmap_q2xs[u];
  7310. if (grid_index < 0) {
  7311. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  7312. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  7313. }
  7314. const int8_t * pg = (const int8_t *)(kgrid_q2xs + grid_index);
  7315. for (int i = 0; i < 8; ++i) L[8*k+i] = (pg[i] - 1)/2;
  7316. }
  7317. float sumqx = 0, sumq2 = 0;
  7318. for (int i = 0; i < 32; ++i) {
  7319. float w = weight[i];
  7320. float q = 2*L[i] + 1;
  7321. sumqx += w*xval[i]*q;
  7322. sumq2 += w*q*q;
  7323. }
  7324. if (sumq2 > 0) scale = sumqx/sumq2;
  7325. }
  7326. if (scale < 0) {
  7327. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  7328. // and correspondingly flip quant signs.
  7329. scale = -scale;
  7330. for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127;
  7331. }
  7332. for (int k = 0; k < 4; ++k) {
  7333. uint16_t u = 0;
  7334. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  7335. int grid_index = kmap_q2xs[u];
  7336. if (grid_index < 0) {
  7337. printf("Oops: found point %u not on grid:", u);
  7338. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  7339. printf("\n");
  7340. GGML_ASSERT(false);
  7341. }
  7342. q2[2*ib+0] |= (grid_index << 8*k);
  7343. q2[2*ib+1] |= (block_signs[k] << 7*k);
  7344. }
  7345. GGML_ASSERT(scale >= 0);
  7346. scales[ib] = scale;
  7347. max_scale = MAX(max_scale, scale);
  7348. }
  7349. if (!max_scale) {
  7350. memset(y[ibl].qs, 0, QK_K/4);
  7351. continue;
  7352. }
  7353. float d = max_scale/31;
  7354. y[ibl].d = GGML_FP32_TO_FP16(d);
  7355. float id = 1/d;
  7356. float sumqx = 0, sumq2 = 0;
  7357. for (int ib = 0; ib < QK_K/32; ++ib) {
  7358. int l = nearest_int(0.5f*(id*scales[ib]-1));
  7359. l = MAX(0, MIN(15, l));
  7360. q2[2*ib+1] |= ((uint32_t)l << 28);
  7361. const float * xb = xbl + 32*ib;
  7362. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  7363. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  7364. const uint8_t * aux8 = (const uint8_t *)(q2 + 2*ib);
  7365. const float db = d * (1 + 2*l);
  7366. uint32_t u = 0;
  7367. for (int k = 0; k < 4; ++k) {
  7368. const int8_t * signs = keven_signs_q2xs + 8*((q2[2*ib+1] >> 7*k) & 127);
  7369. const float * xk = xb + 8*k;
  7370. const float * wk = weight + 8*k;
  7371. const uint8_t * grid = (const uint8_t *)(kgrid_q2xs + aux8[k]);
  7372. float best_mse = 0; int best_index = aux8[k];
  7373. for (int j = 0; j < 8; ++j) {
  7374. float diff = db * grid[j] * signs[j] - xk[j];
  7375. best_mse += wk[j] * diff * diff;
  7376. }
  7377. for (int idx = 0; idx < 256; ++idx) {
  7378. grid = (const uint8_t *)(kgrid_q2xs + idx);
  7379. float mse = 0;
  7380. for (int j = 0; j < 8; ++j) {
  7381. float diff = db * grid[j] * signs[j] - xk[j];
  7382. mse += wk[j] * diff * diff;
  7383. }
  7384. if (mse < best_mse) {
  7385. best_mse = mse; best_index = idx;
  7386. }
  7387. }
  7388. u |= (best_index << 8*k);
  7389. grid = (const uint8_t *)(kgrid_q2xs + best_index);
  7390. //grid = (const uint8_t *)(kgrid_q2xs + aux8[k]);
  7391. for (int j = 0; j < 8; ++j) {
  7392. float q = db * grid[j] * signs[j];
  7393. sumqx += wk[j] * q * xk[j];
  7394. sumq2 += wk[j] * q * q;
  7395. }
  7396. }
  7397. q2[2*ib] = u;
  7398. if (sumq2 > 0) y[ibl].d = GGML_FP32_TO_FP16(d*sumqx/sumq2);
  7399. }
  7400. memcpy(y[ibl].qs, q2, QK_K/4);
  7401. }
  7402. }
  7403. static void quantize_row_iq2_xs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
  7404. const int gindex = iq2_data_index(512);
  7405. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  7406. const int * kmap_q2xs = iq2_data[gindex].map;
  7407. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  7408. GGML_ASSERT(quant_weights && "missing quantization weights");
  7409. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  7410. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  7411. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  7412. GGML_ASSERT(n%QK_K == 0);
  7413. const int kMaxQ = 3;
  7414. const int nbl = n/256;
  7415. block_iq2_xs * y = vy;
  7416. float scales[QK_K/16];
  7417. float weight[16];
  7418. float xval[16];
  7419. int8_t L[16];
  7420. int8_t Laux[16];
  7421. float waux[16];
  7422. bool is_on_grid[2];
  7423. bool is_on_grid_aux[2];
  7424. uint8_t block_signs[2];
  7425. uint16_t q2[2*(QK_K/16)];
  7426. for (int ibl = 0; ibl < nbl; ++ibl) {
  7427. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  7428. memset(q2, 0, QK_K/4);
  7429. memset(y[ibl].scales, 0, QK_K/32);
  7430. float max_scale = 0;
  7431. const float * xbl = x + QK_K*ibl;
  7432. float sumx2 = 0;
  7433. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  7434. float sigma2 = sumx2/QK_K;
  7435. for (int ib = 0; ib < QK_K/16; ++ib) {
  7436. const float * xb = xbl + 16*ib;
  7437. const float * qw = quant_weights + QK_K*ibl + 16*ib;
  7438. for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  7439. for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]);
  7440. for (int k = 0; k < 2; ++k) {
  7441. int nflip = 0;
  7442. uint8_t s = 0;
  7443. for (int i = 0; i < 8; ++i) {
  7444. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  7445. else {
  7446. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  7447. }
  7448. }
  7449. if (nflip%2) {
  7450. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  7451. for (int i = 1; i < 8; ++i) {
  7452. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  7453. if (ax < min) {
  7454. min = ax; imin = i;
  7455. }
  7456. }
  7457. xval[8*k+imin] = -xval[8*k+imin];
  7458. s ^= (1 << imin);
  7459. }
  7460. block_signs[k] = s & 127;
  7461. }
  7462. float max = xval[0];
  7463. for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]);
  7464. if (!max) {
  7465. scales[ib] = 0;
  7466. memset(L, 0, 16);
  7467. continue;
  7468. }
  7469. float best = 0;
  7470. float scale = max/(2*kMaxQ-1);
  7471. is_on_grid[0] = is_on_grid[1] = true;
  7472. for (int is = -9; is <= 9; ++is) {
  7473. float id = (2*kMaxQ-1+is*0.1f)/max;
  7474. float this_scale = 1/id;
  7475. for (int k = 0; k < 2; ++k) {
  7476. for (int i = 0; i < 8; ++i) {
  7477. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  7478. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  7479. }
  7480. uint16_t u = 0;
  7481. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  7482. int grid_index = kmap_q2xs[u];
  7483. is_on_grid_aux[k] = true;
  7484. if (grid_index < 0) {
  7485. is_on_grid_aux[k] = false;
  7486. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  7487. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  7488. }
  7489. }
  7490. float sumqx = 0, sumq2 = 0;
  7491. for (int i = 0; i < 16; ++i) {
  7492. float w = weight[i];
  7493. float q = 2*Laux[i] + 1;
  7494. sumqx += w*xval[i]*q;
  7495. sumq2 += w*q*q;
  7496. }
  7497. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  7498. scale = sumqx/sumq2; best = scale*sumqx;
  7499. for (int i = 0; i < 16; ++i) L[i] = Laux[i];
  7500. for (int k = 0; k < 2; ++k) is_on_grid[k] = is_on_grid_aux[k];
  7501. }
  7502. }
  7503. int n_not_ongrid = 0;
  7504. for (int k = 0; k < 2; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  7505. if (n_not_ongrid > 0 && scale > 0) {
  7506. float id = 1/scale;
  7507. for (int k = 0; k < 2; ++k) {
  7508. if (is_on_grid[k]) continue;
  7509. uint16_t u = 0;
  7510. for (int i = 0; i < 8; ++i) {
  7511. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  7512. l = MAX(0, MIN(kMaxQ-1, l));
  7513. u |= (l << 2*i);
  7514. L[8*k + i] = l;
  7515. }
  7516. int grid_index = kmap_q2xs[u];
  7517. if (grid_index < 0) {
  7518. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  7519. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  7520. }
  7521. }
  7522. float sumqx = 0, sumq2 = 0;
  7523. for (int i = 0; i < 16; ++i) {
  7524. float w = weight[i];
  7525. float q = 2*L[i] + 1;
  7526. sumqx += w*xval[i]*q;
  7527. sumq2 += w*q*q;
  7528. }
  7529. if (sumq2 > 0) scale = sumqx/sumq2;
  7530. }
  7531. if (scale < 0) {
  7532. scale = -scale;
  7533. for (int k = 0; k < 2; ++k) block_signs[k] = (~block_signs[k]) & 127;
  7534. }
  7535. for (int k = 0; k < 2; ++k) {
  7536. uint16_t u = 0;
  7537. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  7538. int grid_index = kmap_q2xs[u];
  7539. if (grid_index < 0) {
  7540. printf("Oops: found point %u not on grid:", u);
  7541. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  7542. printf("\n");
  7543. GGML_ASSERT(false);
  7544. }
  7545. q2[2*ib+k] = grid_index | (block_signs[k] << 9);
  7546. }
  7547. GGML_ASSERT(scale >= 0);
  7548. scales[ib] = scale;
  7549. max_scale = MAX(max_scale, scale);
  7550. }
  7551. if (!max_scale) {
  7552. memset(y[ibl].qs, 0, QK_K/4);
  7553. continue;
  7554. }
  7555. float d = max_scale/31;
  7556. y[ibl].d = GGML_FP32_TO_FP16(d);
  7557. float id = 1/d;
  7558. for (int ib = 0; ib < QK_K/16; ++ib) {
  7559. int l = nearest_int(0.5f*(id*scales[ib]-1));
  7560. l = MAX(0, MIN(15, l));
  7561. if (ib%2 == 0) y[ibl].scales[ib/2] = l;
  7562. else y[ibl].scales[ib/2] |= (l << 4);
  7563. }
  7564. memcpy(y[ibl].qs, q2, QK_K/4);
  7565. }
  7566. }
  7567. size_t quantize_iq2_xxs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  7568. (void)hist;
  7569. GGML_ASSERT(n_per_row%QK_K == 0);
  7570. int nblock = n_per_row/QK_K;
  7571. char * qrow = (char *)dst;
  7572. for (int row = 0; row < nrow; ++row) {
  7573. quantize_row_iq2_xxs_impl(src, qrow, n_per_row, quant_weights);
  7574. src += n_per_row;
  7575. qrow += nblock*sizeof(block_iq2_xxs);
  7576. }
  7577. return nrow * nblock * sizeof(block_iq2_xxs);
  7578. }
  7579. size_t quantize_iq2_xs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  7580. (void)hist;
  7581. GGML_ASSERT(n_per_row%QK_K == 0);
  7582. int nblock = n_per_row/QK_K;
  7583. char * qrow = (char *)dst;
  7584. for (int row = 0; row < nrow; ++row) {
  7585. quantize_row_iq2_xs_impl(src, qrow, n_per_row, quant_weights);
  7586. src += n_per_row;
  7587. qrow += nblock*sizeof(block_iq2_xs);
  7588. }
  7589. return nrow * nblock * sizeof(block_iq2_xs);
  7590. }
  7591. //
  7592. // ============================================= 3-bit using D4 lattice
  7593. //
  7594. typedef struct {
  7595. uint32_t * grid;
  7596. int * map;
  7597. uint16_t * neighbours;
  7598. } iq3_entry_t;
  7599. static iq3_entry_t iq3_data[1] = {
  7600. {NULL, NULL, NULL},
  7601. };
  7602. static inline int iq3_data_index(int grid_size) {
  7603. (void)grid_size;
  7604. GGML_ASSERT(grid_size == 256);
  7605. return 0;
  7606. }
  7607. static int iq3_compare_func(const void * left, const void * right) {
  7608. const int * l = (const int *)left;
  7609. const int * r = (const int *)right;
  7610. return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0;
  7611. }
  7612. void iq3xs_init_impl(int grid_size) {
  7613. const int gindex = iq3_data_index(grid_size);
  7614. if (iq3_data[gindex].grid) {
  7615. return;
  7616. }
  7617. static const uint16_t kgrid_256[256] = {
  7618. 0, 2, 4, 9, 11, 15, 16, 18, 25, 34, 59, 61, 65, 67, 72, 74,
  7619. 81, 85, 88, 90, 97, 108, 120, 128, 130, 132, 137, 144, 146, 153, 155, 159,
  7620. 169, 175, 189, 193, 199, 200, 202, 213, 248, 267, 287, 292, 303, 315, 317, 321,
  7621. 327, 346, 362, 413, 436, 456, 460, 462, 483, 497, 513, 515, 520, 522, 529, 531,
  7622. 536, 538, 540, 551, 552, 576, 578, 585, 592, 594, 641, 643, 648, 650, 657, 664,
  7623. 698, 704, 706, 720, 729, 742, 758, 769, 773, 808, 848, 852, 870, 889, 901, 978,
  7624. 992, 1024, 1026, 1033, 1035, 1040, 1042, 1046, 1049, 1058, 1089, 1091, 1093, 1096, 1098, 1105,
  7625. 1112, 1139, 1143, 1144, 1152, 1154, 1161, 1167, 1168, 1170, 1183, 1184, 1197, 1217, 1224, 1228,
  7626. 1272, 1276, 1309, 1323, 1347, 1367, 1377, 1404, 1473, 1475, 1486, 1509, 1537, 1544, 1546, 1553,
  7627. 1555, 1576, 1589, 1594, 1600, 1602, 1616, 1625, 1636, 1638, 1665, 1667, 1672, 1685, 1706, 1722,
  7628. 1737, 1755, 1816, 1831, 1850, 1856, 1862, 1874, 1901, 1932, 1950, 1971, 2011, 2032, 2052, 2063,
  7629. 2077, 2079, 2091, 2095, 2172, 2192, 2207, 2208, 2224, 2230, 2247, 2277, 2308, 2345, 2356, 2389,
  7630. 2403, 2424, 2501, 2504, 2506, 2520, 2570, 2593, 2616, 2624, 2630, 2646, 2669, 2700, 2714, 2746,
  7631. 2754, 2795, 2824, 2835, 2839, 2874, 2882, 2905, 2984, 3028, 3042, 3092, 3108, 3110, 3124, 3153,
  7632. 3185, 3215, 3252, 3288, 3294, 3364, 3397, 3434, 3483, 3523, 3537, 3587, 3589, 3591, 3592, 3610,
  7633. 3626, 3670, 3680, 3722, 3749, 3754, 3776, 3789, 3803, 3824, 3857, 3873, 3904, 3906, 3924, 3992,
  7634. };
  7635. const int kmap_size = 4096;
  7636. const int nwant = 2;
  7637. const uint16_t * kgrid = kgrid_256;
  7638. uint32_t * kgrid_q3xs;
  7639. int * kmap_q3xs;
  7640. uint16_t * kneighbors_q3xs;
  7641. printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size);
  7642. uint32_t * the_grid = (uint32_t *)malloc(grid_size*sizeof(uint32_t));
  7643. for (int k = 0; k < grid_size; ++k) {
  7644. int8_t * pos = (int8_t *)(the_grid + k);
  7645. for (int i = 0; i < 4; ++i) {
  7646. int l = (kgrid[k] >> 3*i) & 0x7;
  7647. pos[i] = 2*l + 1;
  7648. }
  7649. }
  7650. kgrid_q3xs = the_grid;
  7651. iq3_data[gindex].grid = the_grid;
  7652. kmap_q3xs = (int *)malloc(kmap_size*sizeof(int));
  7653. iq3_data[gindex].map = kmap_q3xs;
  7654. for (int i = 0; i < kmap_size; ++i) kmap_q3xs[i] = -1;
  7655. uint32_t aux32;
  7656. uint8_t * aux8 = (uint8_t *)&aux32;
  7657. for (int i = 0; i < grid_size; ++i) {
  7658. aux32 = kgrid_q3xs[i];
  7659. uint16_t index = 0;
  7660. for (int k=0; k<4; ++k) {
  7661. uint16_t q = (aux8[k] - 1)/2;
  7662. index |= (q << 3*k);
  7663. }
  7664. kmap_q3xs[index] = i;
  7665. }
  7666. int8_t pos[4];
  7667. int * dist2 = (int *)malloc(2*grid_size*sizeof(int));
  7668. int num_neighbors = 0, num_not_in_map = 0;
  7669. for (int i = 0; i < kmap_size; ++i) {
  7670. if (kmap_q3xs[i] >= 0) continue;
  7671. ++num_not_in_map;
  7672. for (int k = 0; k < 4; ++k) {
  7673. int l = (i >> 3*k) & 0x7;
  7674. pos[k] = 2*l + 1;
  7675. }
  7676. for (int j = 0; j < grid_size; ++j) {
  7677. const int8_t * pg = (const int8_t *)(kgrid_q3xs + j);
  7678. int d2 = 0;
  7679. for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  7680. dist2[2*j+0] = d2;
  7681. dist2[2*j+1] = j;
  7682. }
  7683. qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func);
  7684. int n = 0; int d2 = dist2[0];
  7685. int nhave = 1;
  7686. for (int j = 0; j < grid_size; ++j) {
  7687. if (dist2[2*j] > d2) {
  7688. if (nhave == nwant) break;
  7689. d2 = dist2[2*j];
  7690. ++nhave;
  7691. }
  7692. ++n;
  7693. }
  7694. num_neighbors += n;
  7695. }
  7696. printf("%s: %d neighbours in total\n", __func__, num_neighbors);
  7697. kneighbors_q3xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t));
  7698. iq3_data[gindex].neighbours = kneighbors_q3xs;
  7699. int counter = 0;
  7700. for (int i = 0; i < kmap_size; ++i) {
  7701. if (kmap_q3xs[i] >= 0) continue;
  7702. for (int k = 0; k < 4; ++k) {
  7703. int l = (i >> 3*k) & 0x7;
  7704. pos[k] = 2*l + 1;
  7705. }
  7706. for (int j = 0; j < grid_size; ++j) {
  7707. const int8_t * pg = (const int8_t *)(kgrid_q3xs + j);
  7708. int d2 = 0;
  7709. for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  7710. dist2[2*j+0] = d2;
  7711. dist2[2*j+1] = j;
  7712. }
  7713. qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func);
  7714. kmap_q3xs[i] = -(counter + 1);
  7715. int d2 = dist2[0];
  7716. uint16_t * start = &kneighbors_q3xs[counter++];
  7717. int n = 0, nhave = 1;
  7718. for (int j = 0; j < grid_size; ++j) {
  7719. if (dist2[2*j] > d2) {
  7720. if (nhave == nwant) break;
  7721. d2 = dist2[2*j];
  7722. ++nhave;
  7723. }
  7724. kneighbors_q3xs[counter++] = dist2[2*j+1];
  7725. ++n;
  7726. }
  7727. *start = n;
  7728. }
  7729. free(dist2);
  7730. }
  7731. void iq3xs_free_impl(int grid_size) {
  7732. GGML_ASSERT(grid_size == 256);
  7733. const int gindex = iq3_data_index(grid_size);
  7734. if (iq3_data[gindex].grid) {
  7735. free(iq3_data[gindex].grid); iq3_data[gindex].grid = NULL;
  7736. free(iq3_data[gindex].map); iq3_data[gindex].map = NULL;
  7737. free(iq3_data[gindex].neighbours); iq3_data[gindex].neighbours = NULL;
  7738. }
  7739. }
  7740. static int iq3_find_best_neighbour(const uint16_t * restrict neighbours, const uint32_t * restrict grid,
  7741. const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) {
  7742. int num_neighbors = neighbours[0];
  7743. GGML_ASSERT(num_neighbors > 0);
  7744. float best_d2 = FLT_MAX;
  7745. int grid_index = -1;
  7746. for (int j = 1; j <= num_neighbors; ++j) {
  7747. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  7748. float d2 = 0;
  7749. for (int i = 0; i < 4; ++i) {
  7750. float q = pg[i];
  7751. float diff = scale*q - xval[i];
  7752. d2 += weight[i]*diff*diff;
  7753. }
  7754. if (d2 < best_d2) {
  7755. best_d2 = d2; grid_index = neighbours[j];
  7756. }
  7757. }
  7758. GGML_ASSERT(grid_index >= 0);
  7759. const int8_t * pg = (const int8_t *)(grid + grid_index);
  7760. for (int i = 0; i < 4; ++i) L[i] = (pg[i] - 1)/2;
  7761. return grid_index;
  7762. }
  7763. static void quantize_row_iq3_xxs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
  7764. const int gindex = iq3_data_index(256);
  7765. const uint32_t * kgrid_q3xs = iq3_data[gindex].grid;
  7766. const int * kmap_q3xs = iq3_data[gindex].map;
  7767. const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours;
  7768. //GGML_ASSERT(quant_weights && "missing quantization weights");
  7769. GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?");
  7770. GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?");
  7771. GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?");
  7772. GGML_ASSERT(n%QK_K == 0);
  7773. const int kMaxQ = 8;
  7774. const int nbl = n/256;
  7775. block_iq3_xxs * y = vy;
  7776. float scales[QK_K/32];
  7777. float weight[32];
  7778. float xval[32];
  7779. int8_t L[32];
  7780. int8_t Laux[32];
  7781. float waux[32];
  7782. bool is_on_grid[8];
  7783. bool is_on_grid_aux[8];
  7784. uint8_t block_signs[8];
  7785. uint8_t q3[3*(QK_K/8)];
  7786. uint32_t * scales_and_signs = (uint32_t *)(q3 + QK_K/4);
  7787. for (int ibl = 0; ibl < nbl; ++ibl) {
  7788. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  7789. memset(q3, 0, 3*QK_K/8);
  7790. float max_scale = 0;
  7791. const float * xbl = x + QK_K*ibl;
  7792. float sumx2 = 0;
  7793. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  7794. float sigma2 = sumx2/QK_K;
  7795. for (int ib = 0; ib < QK_K/32; ++ib) {
  7796. const float * xb = xbl + 32*ib;
  7797. if (quant_weights) {
  7798. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  7799. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  7800. } else {
  7801. for (int i = 0; i < 32; ++i) weight[i] = xb[i]*xb[i];
  7802. }
  7803. for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
  7804. for (int k = 0; k < 4; ++k) {
  7805. int nflip = 0;
  7806. uint8_t s = 0;
  7807. for (int i = 0; i < 8; ++i) {
  7808. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  7809. else {
  7810. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  7811. }
  7812. }
  7813. if (nflip%2) {
  7814. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  7815. for (int i = 1; i < 8; ++i) {
  7816. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  7817. if (ax < min) {
  7818. min = ax; imin = i;
  7819. }
  7820. }
  7821. xval[8*k+imin] = -xval[8*k+imin];
  7822. s ^= (1 << imin);
  7823. }
  7824. block_signs[k] = s & 127;
  7825. }
  7826. float max = xval[0];
  7827. for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]);
  7828. if (!max) {
  7829. scales[ib] = 0;
  7830. memset(L, 0, 32);
  7831. continue;
  7832. }
  7833. float best = 0;
  7834. float scale = max/(2*kMaxQ-1);
  7835. for (int is = -15; is <= 15; ++is) {
  7836. float id = (2*kMaxQ-1+is*0.2f)/max;
  7837. float this_scale = 1/id;
  7838. for (int k = 0; k < 8; ++k) {
  7839. for (int i = 0; i < 4; ++i) {
  7840. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  7841. Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l));
  7842. }
  7843. uint16_t u = 0;
  7844. for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i);
  7845. int grid_index = kmap_q3xs[u];
  7846. is_on_grid_aux[k] = true;
  7847. if (grid_index < 0) {
  7848. is_on_grid_aux[k] = false;
  7849. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  7850. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k);
  7851. }
  7852. }
  7853. float sumqx = 0, sumq2 = 0;
  7854. for (int i = 0; i < 32; ++i) {
  7855. float w = weight[i];
  7856. float q = 2*Laux[i] + 1;
  7857. sumqx += w*xval[i]*q;
  7858. sumq2 += w*q*q;
  7859. }
  7860. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  7861. scale = sumqx/sumq2; best = scale*sumqx;
  7862. for (int i = 0; i < 32; ++i) L[i] = Laux[i];
  7863. for (int k = 0; k < 8; ++k) is_on_grid[k] = is_on_grid_aux[k];
  7864. }
  7865. }
  7866. int n_not_ongrid = 0;
  7867. for (int k = 0; k < 8; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  7868. if (n_not_ongrid > 0 && scale > 0) {
  7869. float id = 1/scale;
  7870. for (int k = 0; k < 8; ++k) {
  7871. if (is_on_grid[k]) continue;
  7872. uint16_t u = 0;
  7873. for (int i = 0; i < 4; ++i) {
  7874. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  7875. l = MAX(0, MIN(kMaxQ-1, l));
  7876. u |= (l << 3*i);
  7877. }
  7878. int grid_index = kmap_q3xs[u];
  7879. if (grid_index < 0) {
  7880. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  7881. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k);
  7882. }
  7883. const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index);
  7884. for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2;
  7885. }
  7886. float sumqx = 0, sumq2 = 0;
  7887. for (int i = 0; i < 32; ++i) {
  7888. float w = weight[i];
  7889. float q = 2*L[i] + 1;
  7890. sumqx += w*xval[i]*q;
  7891. sumq2 += w*q*q;
  7892. }
  7893. if (sumq2 > 0) scale = sumqx/sumq2;
  7894. }
  7895. if (scale < 0) {
  7896. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  7897. // and correspondingly flip quant signs.
  7898. scale = -scale;
  7899. for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127;
  7900. }
  7901. for (int k = 0; k < 8; ++k) {
  7902. uint16_t u = 0;
  7903. for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i);
  7904. int grid_index = kmap_q3xs[u];
  7905. if (grid_index < 0) {
  7906. printf("Oops: found point %u not on grid:", u);
  7907. for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]);
  7908. printf("\n");
  7909. GGML_ASSERT(false);
  7910. }
  7911. q3[8*ib+k] = grid_index;
  7912. }
  7913. scales_and_signs[ib] = block_signs[0] | (block_signs[1] << 7) | (block_signs[2] << 14) | (block_signs[3] << 21);
  7914. GGML_ASSERT(scale >= 0);
  7915. scales[ib] = scale;
  7916. max_scale = MAX(max_scale, scale);
  7917. }
  7918. if (!max_scale) {
  7919. memset(y[ibl].qs, 0, 3*QK_K/8);
  7920. continue;
  7921. }
  7922. float d = max_scale/31;
  7923. y[ibl].d = GGML_FP32_TO_FP16(d);
  7924. float id = 1/d;
  7925. float sumqx = 0, sumq2 = 0;
  7926. for (int ib = 0; ib < QK_K/32; ++ib) {
  7927. int l = nearest_int(0.5f*(id*scales[ib]-1));
  7928. l = MAX(0, MIN(15, l));
  7929. scales_and_signs[ib] |= ((uint32_t)l << 28);
  7930. if (false) {
  7931. const float * xb = xbl + 32*ib;
  7932. if (quant_weights) {
  7933. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  7934. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  7935. } else {
  7936. for (int i = 0; i < 32; ++i) weight[i] = xb[i]*xb[i];
  7937. }
  7938. const float db = 0.25f * d * (1 + 2*l);
  7939. for (int k = 0; k < 8; ++k) {
  7940. const int8_t * signs = keven_signs_q2xs + 8*((scales_and_signs[ib] >> 7*(k/2)) & 127) + 4*(k%2);
  7941. const float * xk = xb + 4*k;
  7942. const float * wk = weight + 4*k;
  7943. //const uint8_t * grid = (const uint8_t *)(kgrid_q3xs + q3[8*ib+k]);
  7944. const uint8_t * grid = (const uint8_t *)(iq3xxs_grid + q3[8*ib+k]);
  7945. float best_mse = 0; int best_index = q3[8*ib+k];
  7946. for (int j = 0; j < 4; ++j) {
  7947. float diff = db * grid[j] * signs[j] - xk[j];
  7948. best_mse += wk[j] * diff * diff;
  7949. }
  7950. for (int idx = 0; idx < 256; ++idx) {
  7951. //grid = (const uint8_t *)(kgrid_q3xs + idx);
  7952. grid = (const uint8_t *)(iq3xxs_grid + idx);
  7953. float mse = 0;
  7954. for (int j = 0; j < 4; ++j) {
  7955. float diff = db * grid[j] * signs[j] - xk[j];
  7956. mse += wk[j] * diff * diff;
  7957. }
  7958. if (mse < best_mse) {
  7959. best_mse = mse; best_index = idx;
  7960. }
  7961. }
  7962. q3[8*ib+k] = best_index;
  7963. //grid = (const uint8_t *)(kgrid_q3xs + best_index);
  7964. grid = (const uint8_t *)(iq3xxs_grid + best_index);
  7965. for (int j = 0; j < 4; ++j) {
  7966. float q = db * grid[j] * signs[j];
  7967. sumqx += wk[j] * q * xk[j];
  7968. sumq2 += wk[j] * q * q;
  7969. }
  7970. }
  7971. if (sumq2 > 0) y[ibl].d = GGML_FP32_TO_FP16(d*sumqx/sumq2);
  7972. }
  7973. }
  7974. memcpy(y[ibl].qs, q3, 3*QK_K/8);
  7975. }
  7976. }
  7977. size_t quantize_iq3_xxs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  7978. (void)hist;
  7979. GGML_ASSERT(n_per_row%QK_K == 0);
  7980. int nblock = n_per_row/QK_K;
  7981. char * qrow = (char *)dst;
  7982. for (int row = 0; row < nrow; ++row) {
  7983. quantize_row_iq3_xxs_impl(src, qrow, n_per_row, quant_weights);
  7984. src += n_per_row;
  7985. qrow += nblock*sizeof(block_iq3_xxs);
  7986. }
  7987. return nrow * nblock * sizeof(block_iq3_xxs);
  7988. }
  7989. void quantize_row_iq3_xxs(const float * restrict x, void * restrict vy, int k) {
  7990. assert(k % QK_K == 0);
  7991. block_iq3_xxs * restrict y = vy;
  7992. quantize_row_iq3_xxs_reference(x, y, k);
  7993. }
  7994. void quantize_row_iq3_xxs_reference(const float * restrict x, block_iq3_xxs * restrict y, int k) {
  7995. assert(k % QK_K == 0);
  7996. quantize_row_iq3_xxs_impl(x, y, k, NULL);
  7997. }