| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731 |
- #include "ggml-quants.h"
- #include "ggml-impl.h"
- #include <math.h>
- #include <string.h>
- #include <assert.h>
- #include <float.h>
- #include <stdlib.h> // for qsort
- #include <stdio.h> // for GGML_ASSERT
- #ifdef __ARM_NEON
- // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
- //
- // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
- //
- #include <arm_neon.h>
- #else
- #ifdef __wasm_simd128__
- #include <wasm_simd128.h>
- #else
- #if defined(__POWER9_VECTOR__) || defined(__powerpc64__)
- #include <altivec.h>
- #undef bool
- #define bool _Bool
- #else
- #if defined(_MSC_VER) || defined(__MINGW32__)
- #include <intrin.h>
- #else
- #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
- #if !defined(__riscv)
- #include <immintrin.h>
- #endif
- #endif
- #endif
- #endif
- #endif
- #endif
- #ifdef __riscv_v_intrinsic
- #include <riscv_vector.h>
- #endif
- #undef MIN
- #undef MAX
- #define MIN(a, b) ((a) < (b) ? (a) : (b))
- #define MAX(a, b) ((a) > (b) ? (a) : (b))
- #define UNUSED GGML_UNUSED
- #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
- #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
- // multiply int8_t, add results pairwise twice
- static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
- // Get absolute values of x vectors
- const __m128i ax = _mm_sign_epi8(x, x);
- // Sign the values of the y vectors
- const __m128i sy = _mm_sign_epi8(y, x);
- // Perform multiplication and create 16-bit values
- const __m128i dot = _mm_maddubs_epi16(ax, sy);
- const __m128i ones = _mm_set1_epi16(1);
- return _mm_madd_epi16(ones, dot);
- }
- #if __AVX__ || __AVX2__ || __AVX512F__
- // horizontally add 8 floats
- static inline float hsum_float_8(const __m256 x) {
- __m128 res = _mm256_extractf128_ps(x, 1);
- res = _mm_add_ps(res, _mm256_castps256_ps128(x));
- res = _mm_add_ps(res, _mm_movehl_ps(res, res));
- res = _mm_add_ss(res, _mm_movehdup_ps(res));
- return _mm_cvtss_f32(res);
- }
- // horizontally add 8 int32_t
- static inline int hsum_i32_8(const __m256i a) {
- const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
- const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
- const __m128i sum64 = _mm_add_epi32(hi64, sum128);
- const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
- return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
- }
- // horizontally add 4 int32_t
- static inline int hsum_i32_4(const __m128i a) {
- const __m128i hi64 = _mm_unpackhi_epi64(a, a);
- const __m128i sum64 = _mm_add_epi32(hi64, a);
- const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
- return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
- }
- #if defined(__AVX2__) || defined(__AVX512F__)
- // spread 32 bits to 32 bytes { 0x00, 0xFF }
- static inline __m256i bytes_from_bits_32(const uint8_t * x) {
- uint32_t x32;
- memcpy(&x32, x, sizeof(uint32_t));
- const __m256i shuf_mask = _mm256_set_epi64x(
- 0x0303030303030303, 0x0202020202020202,
- 0x0101010101010101, 0x0000000000000000);
- __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
- const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
- bytes = _mm256_or_si256(bytes, bit_mask);
- return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1));
- }
- // Unpack 32 4-bit fields into 32 bytes
- // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
- static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
- {
- const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
- const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
- const __m256i lowMask = _mm256_set1_epi8( 0xF );
- return _mm256_and_si256(lowMask, bytes);
- }
- // add int16_t pairwise and return as float vector
- static inline __m256 sum_i16_pairs_float(const __m256i x) {
- const __m256i ones = _mm256_set1_epi16(1);
- const __m256i summed_pairs = _mm256_madd_epi16(ones, x);
- return _mm256_cvtepi32_ps(summed_pairs);
- }
- static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
- #if __AVXVNNI__
- const __m256i zero = _mm256_setzero_si256();
- const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy);
- return _mm256_cvtepi32_ps(summed_pairs);
- #else
- // Perform multiplication and create 16-bit values
- const __m256i dot = _mm256_maddubs_epi16(ax, sy);
- return sum_i16_pairs_float(dot);
- #endif
- }
- // multiply int8_t, add results pairwise twice and return as float vector
- static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
- #if __AVXVNNIINT8__
- const __m256i zero = _mm256_setzero_si256();
- const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y);
- return _mm256_cvtepi32_ps(summed_pairs);
- #else
- // Get absolute values of x vectors
- const __m256i ax = _mm256_sign_epi8(x, x);
- // Sign the values of the y vectors
- const __m256i sy = _mm256_sign_epi8(y, x);
- return mul_sum_us8_pairs_float(ax, sy);
- #endif
- }
- static inline __m128i packNibbles( __m256i bytes )
- {
- // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
- #if __AVX512F__
- const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000
- bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh
- return _mm256_cvtepi16_epi8(bytes); // abcd_efgh
- #else
- const __m256i lowByte = _mm256_set1_epi16( 0xFF );
- __m256i high = _mm256_andnot_si256( lowByte, bytes );
- __m256i low = _mm256_and_si256( lowByte, bytes );
- high = _mm256_srli_epi16( high, 4 );
- bytes = _mm256_or_si256( low, high );
- // Compress uint16_t lanes into bytes
- __m128i r0 = _mm256_castsi256_si128( bytes );
- __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
- return _mm_packus_epi16( r0, r1 );
- #endif
- }
- #elif defined(__AVX__)
- // spread 32 bits to 32 bytes { 0x00, 0xFF }
- static inline __m256i bytes_from_bits_32(const uint8_t * x) {
- uint32_t x32;
- memcpy(&x32, x, sizeof(uint32_t));
- const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
- const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202);
- __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl);
- __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh);
- const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe);
- bytesl = _mm_or_si128(bytesl, bit_mask);
- bytesh = _mm_or_si128(bytesh, bit_mask);
- bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
- bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
- return MM256_SET_M128I(bytesh, bytesl);
- }
- // Unpack 32 4-bit fields into 32 bytes
- // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
- static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
- {
- // Load 16 bytes from memory
- __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi);
- __m128i tmph = _mm_srli_epi16(tmpl, 4);
- const __m128i lowMask = _mm_set1_epi8(0xF);
- tmpl = _mm_and_si128(lowMask, tmpl);
- tmph = _mm_and_si128(lowMask, tmph);
- return MM256_SET_M128I(tmph, tmpl);
- }
- // add int16_t pairwise and return as float vector
- static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
- const __m128i ones = _mm_set1_epi16(1);
- const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
- const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
- const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
- return _mm256_cvtepi32_ps(summed_pairs);
- }
- static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
- const __m128i axl = _mm256_castsi256_si128(ax);
- const __m128i axh = _mm256_extractf128_si256(ax, 1);
- const __m128i syl = _mm256_castsi256_si128(sy);
- const __m128i syh = _mm256_extractf128_si256(sy, 1);
- // Perform multiplication and create 16-bit values
- const __m128i dotl = _mm_maddubs_epi16(axl, syl);
- const __m128i doth = _mm_maddubs_epi16(axh, syh);
- return sum_i16_pairs_float(doth, dotl);
- }
- // multiply int8_t, add results pairwise twice and return as float vector
- static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
- const __m128i xl = _mm256_castsi256_si128(x);
- const __m128i xh = _mm256_extractf128_si256(x, 1);
- const __m128i yl = _mm256_castsi256_si128(y);
- const __m128i yh = _mm256_extractf128_si256(y, 1);
- // Get absolute values of x vectors
- const __m128i axl = _mm_sign_epi8(xl, xl);
- const __m128i axh = _mm_sign_epi8(xh, xh);
- // Sign the values of the y vectors
- const __m128i syl = _mm_sign_epi8(yl, xl);
- const __m128i syh = _mm_sign_epi8(yh, xh);
- // Perform multiplication and create 16-bit values
- const __m128i dotl = _mm_maddubs_epi16(axl, syl);
- const __m128i doth = _mm_maddubs_epi16(axh, syh);
- return sum_i16_pairs_float(doth, dotl);
- }
- static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
- {
- // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
- const __m128i lowByte = _mm_set1_epi16( 0xFF );
- __m128i high = _mm_andnot_si128( lowByte, bytes1 );
- __m128i low = _mm_and_si128( lowByte, bytes1 );
- high = _mm_srli_epi16( high, 4 );
- bytes1 = _mm_or_si128( low, high );
- high = _mm_andnot_si128( lowByte, bytes2 );
- low = _mm_and_si128( lowByte, bytes2 );
- high = _mm_srli_epi16( high, 4 );
- bytes2 = _mm_or_si128( low, high );
- return _mm_packus_epi16( bytes1, bytes2);
- }
- #endif
- #elif defined(__SSSE3__)
- // horizontally add 4x4 floats
- static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
- __m128 res_0 =_mm_hadd_ps(a, b);
- __m128 res_1 =_mm_hadd_ps(c, d);
- __m128 res =_mm_hadd_ps(res_0, res_1);
- res =_mm_hadd_ps(res, res);
- res =_mm_hadd_ps(res, res);
- return _mm_cvtss_f32(res);
- }
- #endif // __AVX__ || __AVX2__ || __AVX512F__
- #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
- #if defined(__ARM_NEON)
- #ifdef _MSC_VER
- #define ggml_vld1q_u32(w,x,y,z) { ((w) + ((uint64_t)(x) << 32)), ((y) + ((uint64_t)(z) << 32)) }
- #else
- #define ggml_vld1q_u32(w,x,y,z) { (w), (x), (y), (z) }
- #endif
- #if !defined(__aarch64__)
- // 64-bit compatibility
- // vaddvq_s16
- // vpaddq_s16
- // vpaddq_s32
- // vaddvq_s32
- // vaddvq_f32
- // vmaxvq_f32
- // vcvtnq_s32_f32
- // vzip1_u8
- // vzip2_u8
- inline static int32_t vaddvq_s16(int16x8_t v) {
- return
- (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) +
- (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) +
- (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) +
- (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7);
- }
- inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) {
- int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a));
- int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b));
- return vcombine_s16(a0, b0);
- }
- inline static int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) {
- int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a));
- int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b));
- return vcombine_s32(a0, b0);
- }
- inline static int32_t vaddvq_s32(int32x4_t v) {
- return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
- }
- inline static float vaddvq_f32(float32x4_t v) {
- return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
- }
- inline static float vmaxvq_f32(float32x4_t v) {
- return
- MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
- MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
- }
- inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
- int32x4_t res;
- res[0] = roundf(vgetq_lane_f32(v, 0));
- res[1] = roundf(vgetq_lane_f32(v, 1));
- res[2] = roundf(vgetq_lane_f32(v, 2));
- res[3] = roundf(vgetq_lane_f32(v, 3));
- return res;
- }
- inline static uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) {
- uint8x8_t res;
- res[0] = a[0]; res[1] = b[0];
- res[2] = a[1]; res[3] = b[1];
- res[4] = a[2]; res[5] = b[2];
- res[6] = a[3]; res[7] = b[3];
- return res;
- }
- inline static uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) {
- uint8x8_t res;
- res[0] = a[4]; res[1] = b[4];
- res[2] = a[5]; res[3] = b[5];
- res[4] = a[6]; res[5] = b[6];
- res[6] = a[7]; res[7] = b[7];
- return res;
- }
- // vld1q_s16_x2
- // vld1q_u8_x2
- // vld1q_u8_x4
- // vld1q_s8_x2
- // vld1q_s8_x4
- // TODO: double-check these work correctly
- typedef struct ggml_int16x8x2_t {
- int16x8_t val[2];
- } ggml_int16x8x2_t;
- inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) {
- ggml_int16x8x2_t res;
- res.val[0] = vld1q_s16(ptr + 0);
- res.val[1] = vld1q_s16(ptr + 8);
- return res;
- }
- typedef struct ggml_uint8x16x2_t {
- uint8x16_t val[2];
- } ggml_uint8x16x2_t;
- inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) {
- ggml_uint8x16x2_t res;
- res.val[0] = vld1q_u8(ptr + 0);
- res.val[1] = vld1q_u8(ptr + 16);
- return res;
- }
- typedef struct ggml_uint8x16x4_t {
- uint8x16_t val[4];
- } ggml_uint8x16x4_t;
- inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) {
- ggml_uint8x16x4_t res;
- res.val[0] = vld1q_u8(ptr + 0);
- res.val[1] = vld1q_u8(ptr + 16);
- res.val[2] = vld1q_u8(ptr + 32);
- res.val[3] = vld1q_u8(ptr + 48);
- return res;
- }
- typedef struct ggml_int8x16x2_t {
- int8x16_t val[2];
- } ggml_int8x16x2_t;
- inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) {
- ggml_int8x16x2_t res;
- res.val[0] = vld1q_s8(ptr + 0);
- res.val[1] = vld1q_s8(ptr + 16);
- return res;
- }
- typedef struct ggml_int8x16x4_t {
- int8x16_t val[4];
- } ggml_int8x16x4_t;
- inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) {
- ggml_int8x16x4_t res;
- res.val[0] = vld1q_s8(ptr + 0);
- res.val[1] = vld1q_s8(ptr + 16);
- res.val[2] = vld1q_s8(ptr + 32);
- res.val[3] = vld1q_s8(ptr + 48);
- return res;
- }
- #else
- #define ggml_int16x8x2_t int16x8x2_t
- #define ggml_uint8x16x2_t uint8x16x2_t
- #define ggml_uint8x16x4_t uint8x16x4_t
- #define ggml_int8x16x2_t int8x16x2_t
- #define ggml_int8x16x4_t int8x16x4_t
- #define ggml_vld1q_s16_x2 vld1q_s16_x2
- #define ggml_vld1q_u8_x2 vld1q_u8_x2
- #define ggml_vld1q_u8_x4 vld1q_u8_x4
- #define ggml_vld1q_s8_x2 vld1q_s8_x2
- #define ggml_vld1q_s8_x4 vld1q_s8_x4
- #endif
- #if !defined(__ARM_FEATURE_DOTPROD)
- inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) {
- const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b));
- const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b));
- return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1)));
- }
- #else
- #define ggml_vdotq_s32(a, b, c) vdotq_s32(a, b, c)
- #endif
- #endif
- #if defined(__ARM_NEON) || defined(__wasm_simd128__)
- #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
- #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
- #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
- #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
- #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
- #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
- #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
- #define B8(c,s ) B7(c,s, c), B7(c,s, s)
- // precomputed tables for expanding 8bits to 8 bytes:
- static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
- static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
- #endif
- // reference implementation for deterministic creation of model files
- void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
- static const int qk = QK4_0;
- assert(k % qk == 0);
- const int nb = k / qk;
- for (int i = 0; i < nb; i++) {
- float amax = 0.0f; // absolute max
- float max = 0.0f;
- for (int j = 0; j < qk; j++) {
- const float v = x[i*qk + j];
- if (amax < fabsf(v)) {
- amax = fabsf(v);
- max = v;
- }
- }
- const float d = max / -8;
- const float id = d ? 1.0f/d : 0.0f;
- y[i].d = GGML_FP32_TO_FP16(d);
- for (int j = 0; j < qk/2; ++j) {
- const float x0 = x[i*qk + 0 + j]*id;
- const float x1 = x[i*qk + qk/2 + j]*id;
- const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
- const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
- y[i].qs[j] = xi0;
- y[i].qs[j] |= xi1 << 4;
- }
- }
- }
- void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
- quantize_row_q4_0_reference(x, y, k);
- }
- void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) {
- const int qk = QK4_1;
- assert(k % qk == 0);
- const int nb = k / qk;
- for (int i = 0; i < nb; i++) {
- float min = FLT_MAX;
- float max = -FLT_MAX;
- for (int j = 0; j < qk; j++) {
- const float v = x[i*qk + j];
- if (v < min) min = v;
- if (v > max) max = v;
- }
- const float d = (max - min) / ((1 << 4) - 1);
- const float id = d ? 1.0f/d : 0.0f;
- y[i].d = GGML_FP32_TO_FP16(d);
- y[i].m = GGML_FP32_TO_FP16(min);
- for (int j = 0; j < qk/2; ++j) {
- const float x0 = (x[i*qk + 0 + j] - min)*id;
- const float x1 = (x[i*qk + qk/2 + j] - min)*id;
- const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
- const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
- y[i].qs[j] = xi0;
- y[i].qs[j] |= xi1 << 4;
- }
- }
- }
- void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
- quantize_row_q4_1_reference(x, y, k);
- }
- void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
- static const int qk = QK5_0;
- assert(k % qk == 0);
- const int nb = k / qk;
- for (int i = 0; i < nb; i++) {
- float amax = 0.0f; // absolute max
- float max = 0.0f;
- for (int j = 0; j < qk; j++) {
- const float v = x[i*qk + j];
- if (amax < fabsf(v)) {
- amax = fabsf(v);
- max = v;
- }
- }
- const float d = max / -16;
- const float id = d ? 1.0f/d : 0.0f;
- y[i].d = GGML_FP32_TO_FP16(d);
- uint32_t qh = 0;
- for (int j = 0; j < qk/2; ++j) {
- const float x0 = x[i*qk + 0 + j]*id;
- const float x1 = x[i*qk + qk/2 + j]*id;
- const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
- const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
- y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
- // get the 5-th bit and store it in qh at the right position
- qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
- qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
- }
- memcpy(&y[i].qh, &qh, sizeof(qh));
- }
- }
- void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) {
- quantize_row_q5_0_reference(x, y, k);
- }
- void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
- const int qk = QK5_1;
- assert(k % qk == 0);
- const int nb = k / qk;
- for (int i = 0; i < nb; i++) {
- float min = FLT_MAX;
- float max = -FLT_MAX;
- for (int j = 0; j < qk; j++) {
- const float v = x[i*qk + j];
- if (v < min) min = v;
- if (v > max) max = v;
- }
- const float d = (max - min) / ((1 << 5) - 1);
- const float id = d ? 1.0f/d : 0.0f;
- y[i].d = GGML_FP32_TO_FP16(d);
- y[i].m = GGML_FP32_TO_FP16(min);
- uint32_t qh = 0;
- for (int j = 0; j < qk/2; ++j) {
- const float x0 = (x[i*qk + 0 + j] - min)*id;
- const float x1 = (x[i*qk + qk/2 + j] - min)*id;
- const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
- const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
- y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
- // get the 5-th bit and store it in qh at the right position
- qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
- qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
- }
- memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
- }
- }
- void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) {
- quantize_row_q5_1_reference(x, y, k);
- }
- // reference implementation for deterministic creation of model files
- void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
- assert(k % QK8_0 == 0);
- const int nb = k / QK8_0;
- for (int i = 0; i < nb; i++) {
- float amax = 0.0f; // absolute max
- for (int j = 0; j < QK8_0; j++) {
- const float v = x[i*QK8_0 + j];
- amax = MAX(amax, fabsf(v));
- }
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
- y[i].d = GGML_FP32_TO_FP16(d);
- for (int j = 0; j < QK8_0; ++j) {
- const float x0 = x[i*QK8_0 + j]*id;
- y[i].qs[j] = roundf(x0);
- }
- }
- }
- void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
- assert(QK8_0 == 32);
- assert(k % QK8_0 == 0);
- const int nb = k / QK8_0;
- block_q8_0 * restrict y = vy;
- #if defined(__ARM_NEON)
- for (int i = 0; i < nb; i++) {
- float32x4_t srcv [8];
- float32x4_t asrcv[8];
- float32x4_t amaxv[8];
- for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
- for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
- for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
- for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
- for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
- const float amax = vmaxvq_f32(amaxv[0]);
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
- y[i].d = GGML_FP32_TO_FP16(d);
- for (int j = 0; j < 8; j++) {
- const float32x4_t v = vmulq_n_f32(srcv[j], id);
- const int32x4_t vi = vcvtnq_s32_f32(v);
- y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
- y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
- y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
- y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
- }
- }
- #elif defined(__wasm_simd128__)
- for (int i = 0; i < nb; i++) {
- v128_t srcv [8];
- v128_t asrcv[8];
- v128_t amaxv[8];
- for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
- for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
- for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
- for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
- for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
- const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
- wasm_f32x4_extract_lane(amaxv[0], 1)),
- MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
- wasm_f32x4_extract_lane(amaxv[0], 3)));
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
- y[i].d = GGML_FP32_TO_FP16(d);
- for (int j = 0; j < 8; j++) {
- const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
- const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
- y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
- y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
- y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
- y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
- }
- }
- #elif defined(__AVX2__) || defined(__AVX__)
- for (int i = 0; i < nb; i++) {
- // Load elements into 4 AVX vectors
- __m256 v0 = _mm256_loadu_ps( x );
- __m256 v1 = _mm256_loadu_ps( x + 8 );
- __m256 v2 = _mm256_loadu_ps( x + 16 );
- __m256 v3 = _mm256_loadu_ps( x + 24 );
- x += 32;
- // Compute max(abs(e)) for the block
- const __m256 signBit = _mm256_set1_ps( -0.0f );
- __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
- maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
- maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
- maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
- __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
- max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
- max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
- const float maxScalar = _mm_cvtss_f32( max4 );
- // Quantize these floats
- const float d = maxScalar / 127.f;
- y[i].d = GGML_FP32_TO_FP16(d);
- const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
- const __m256 mul = _mm256_set1_ps( id );
- // Apply the multiplier
- v0 = _mm256_mul_ps( v0, mul );
- v1 = _mm256_mul_ps( v1, mul );
- v2 = _mm256_mul_ps( v2, mul );
- v3 = _mm256_mul_ps( v3, mul );
- // Round to nearest integer
- v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
- v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
- v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
- v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
- // Convert floats to integers
- __m256i i0 = _mm256_cvtps_epi32( v0 );
- __m256i i1 = _mm256_cvtps_epi32( v1 );
- __m256i i2 = _mm256_cvtps_epi32( v2 );
- __m256i i3 = _mm256_cvtps_epi32( v3 );
- #if defined(__AVX2__)
- // Convert int32 to int16
- i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
- i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
- // Convert int16 to int8
- i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
- // We got our precious signed bytes, but the order is now wrong
- // These AVX2 pack instructions process 16-byte pieces independently
- // The following instruction is fixing the order
- const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
- i0 = _mm256_permutevar8x32_epi32( i0, perm );
- _mm256_storeu_si256((__m256i *)y[i].qs, i0);
- #else
- // Since we don't have in AVX some necessary functions,
- // we split the registers in half and call AVX2 analogs from SSE
- __m128i ni0 = _mm256_castsi256_si128( i0 );
- __m128i ni1 = _mm256_extractf128_si256( i0, 1);
- __m128i ni2 = _mm256_castsi256_si128( i1 );
- __m128i ni3 = _mm256_extractf128_si256( i1, 1);
- __m128i ni4 = _mm256_castsi256_si128( i2 );
- __m128i ni5 = _mm256_extractf128_si256( i2, 1);
- __m128i ni6 = _mm256_castsi256_si128( i3 );
- __m128i ni7 = _mm256_extractf128_si256( i3, 1);
- // Convert int32 to int16
- ni0 = _mm_packs_epi32( ni0, ni1 );
- ni2 = _mm_packs_epi32( ni2, ni3 );
- ni4 = _mm_packs_epi32( ni4, ni5 );
- ni6 = _mm_packs_epi32( ni6, ni7 );
- // Convert int16 to int8
- ni0 = _mm_packs_epi16( ni0, ni2 );
- ni4 = _mm_packs_epi16( ni4, ni6 );
- _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
- _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
- #endif
- }
- #elif defined(__riscv_v_intrinsic)
- size_t vl = __riscv_vsetvl_e32m4(QK8_0);
- for (int i = 0; i < nb; i++) {
- // load elements
- vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_0, vl);
- vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
- vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl);
- vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
- float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
- y[i].d = GGML_FP32_TO_FP16(d);
- vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
- // convert to integer
- vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
- vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
- // store result
- __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
- }
- #else
- GGML_UNUSED(nb);
- // scalar
- quantize_row_q8_0_reference(x, y, k);
- #endif
- }
- // reference implementation for deterministic creation of model files
- void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
- assert(QK8_1 == 32);
- assert(k % QK8_1 == 0);
- const int nb = k / QK8_1;
- for (int i = 0; i < nb; i++) {
- float amax = 0.0f; // absolute max
- for (int j = 0; j < QK8_1; j++) {
- const float v = x[i*QK8_1 + j];
- amax = MAX(amax, fabsf(v));
- }
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
- y[i].d = d;
- int sum = 0;
- for (int j = 0; j < QK8_1/2; ++j) {
- const float v0 = x[i*QK8_1 + j]*id;
- const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
- y[i].qs[ j] = roundf(v0);
- y[i].qs[QK8_1/2 + j] = roundf(v1);
- sum += y[i].qs[ j];
- sum += y[i].qs[QK8_1/2 + j];
- }
- y[i].s = sum*d;
- }
- }
- void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK8_1 == 0);
- const int nb = k / QK8_1;
- block_q8_1 * restrict y = vy;
- #if defined(__ARM_NEON)
- for (int i = 0; i < nb; i++) {
- float32x4_t srcv [8];
- float32x4_t asrcv[8];
- float32x4_t amaxv[8];
- for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
- for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
- for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
- for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
- for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
- const float amax = vmaxvq_f32(amaxv[0]);
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
- y[i].d = d;
- int32x4_t accv = vdupq_n_s32(0);
- for (int j = 0; j < 8; j++) {
- const float32x4_t v = vmulq_n_f32(srcv[j], id);
- const int32x4_t vi = vcvtnq_s32_f32(v);
- y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
- y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
- y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
- y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
- accv = vaddq_s32(accv, vi);
- }
- y[i].s = d * vaddvq_s32(accv);
- }
- #elif defined(__wasm_simd128__)
- for (int i = 0; i < nb; i++) {
- v128_t srcv [8];
- v128_t asrcv[8];
- v128_t amaxv[8];
- for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
- for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
- for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
- for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
- for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
- const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
- wasm_f32x4_extract_lane(amaxv[0], 1)),
- MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
- wasm_f32x4_extract_lane(amaxv[0], 3)));
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
- y[i].d = d;
- v128_t accv = wasm_i32x4_splat(0);
- for (int j = 0; j < 8; j++) {
- const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
- const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
- y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
- y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
- y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
- y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
- accv = wasm_i32x4_add(accv, vi);
- }
- y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) +
- wasm_i32x4_extract_lane(accv, 1) +
- wasm_i32x4_extract_lane(accv, 2) +
- wasm_i32x4_extract_lane(accv, 3));
- }
- #elif defined(__AVX2__) || defined(__AVX__)
- for (int i = 0; i < nb; i++) {
- // Load elements into 4 AVX vectors
- __m256 v0 = _mm256_loadu_ps( x );
- __m256 v1 = _mm256_loadu_ps( x + 8 );
- __m256 v2 = _mm256_loadu_ps( x + 16 );
- __m256 v3 = _mm256_loadu_ps( x + 24 );
- x += 32;
- // Compute max(abs(e)) for the block
- const __m256 signBit = _mm256_set1_ps( -0.0f );
- __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
- maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
- maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
- maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
- __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
- max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
- max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
- const float maxScalar = _mm_cvtss_f32( max4 );
- // Quantize these floats
- const float d = maxScalar / 127.f;
- y[i].d = d;
- const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
- const __m256 mul = _mm256_set1_ps( id );
- // Apply the multiplier
- v0 = _mm256_mul_ps( v0, mul );
- v1 = _mm256_mul_ps( v1, mul );
- v2 = _mm256_mul_ps( v2, mul );
- v3 = _mm256_mul_ps( v3, mul );
- // Round to nearest integer
- v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
- v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
- v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
- v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
- // Convert floats to integers
- __m256i i0 = _mm256_cvtps_epi32( v0 );
- __m256i i1 = _mm256_cvtps_epi32( v1 );
- __m256i i2 = _mm256_cvtps_epi32( v2 );
- __m256i i3 = _mm256_cvtps_epi32( v3 );
- #if defined(__AVX2__)
- // Compute the sum of the quants and set y[i].s
- y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)));
- // Convert int32 to int16
- i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
- i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
- // Convert int16 to int8
- i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
- // We got our precious signed bytes, but the order is now wrong
- // These AVX2 pack instructions process 16-byte pieces independently
- // The following instruction is fixing the order
- const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
- i0 = _mm256_permutevar8x32_epi32( i0, perm );
- _mm256_storeu_si256((__m256i *)y[i].qs, i0);
- #else
- // Since we don't have in AVX some necessary functions,
- // we split the registers in half and call AVX2 analogs from SSE
- __m128i ni0 = _mm256_castsi256_si128( i0 );
- __m128i ni1 = _mm256_extractf128_si256( i0, 1);
- __m128i ni2 = _mm256_castsi256_si128( i1 );
- __m128i ni3 = _mm256_extractf128_si256( i1, 1);
- __m128i ni4 = _mm256_castsi256_si128( i2 );
- __m128i ni5 = _mm256_extractf128_si256( i2, 1);
- __m128i ni6 = _mm256_castsi256_si128( i3 );
- __m128i ni7 = _mm256_extractf128_si256( i3, 1);
- // Compute the sum of the quants and set y[i].s
- const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
- const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
- y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1));
- // Convert int32 to int16
- ni0 = _mm_packs_epi32( ni0, ni1 );
- ni2 = _mm_packs_epi32( ni2, ni3 );
- ni4 = _mm_packs_epi32( ni4, ni5 );
- ni6 = _mm_packs_epi32( ni6, ni7 );
- // Convert int16 to int8
- ni0 = _mm_packs_epi16( ni0, ni2 );
- ni4 = _mm_packs_epi16( ni4, ni6 );
- _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
- _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
- #endif
- }
- #elif defined(__riscv_v_intrinsic)
- size_t vl = __riscv_vsetvl_e32m4(QK8_1);
- for (int i = 0; i < nb; i++) {
- // load elements
- vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_1, vl);
- vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
- vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl);
- vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
- float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
- y[i].d = d;
- vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
- // convert to integer
- vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
- vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
- // store result
- __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
- // compute sum for y[i].s
- vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl);
- vint16m1_t vwrs = __riscv_vwredsum_vs_i8m1_i16m1(vs, tmp2, vl);
- // set y[i].s
- int sum = __riscv_vmv_x_s_i16m1_i16(vwrs);
- y[i].s = sum*d;
- }
- #else
- GGML_UNUSED(nb);
- // scalar
- quantize_row_q8_1_reference(x, y, k);
- #endif
- }
- void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) {
- static const int qk = QK4_0;
- assert(k % qk == 0);
- const int nb = k / qk;
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
- for (int j = 0; j < qk/2; ++j) {
- const int x0 = (x[i].qs[j] & 0x0F) - 8;
- const int x1 = (x[i].qs[j] >> 4) - 8;
- y[i*qk + j + 0 ] = x0*d;
- y[i*qk + j + qk/2] = x1*d;
- }
- }
- }
- void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
- static const int qk = QK4_1;
- assert(k % qk == 0);
- const int nb = k / qk;
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
- const float m = GGML_FP16_TO_FP32(x[i].m);
- for (int j = 0; j < qk/2; ++j) {
- const int x0 = (x[i].qs[j] & 0x0F);
- const int x1 = (x[i].qs[j] >> 4);
- y[i*qk + j + 0 ] = x0*d + m;
- y[i*qk + j + qk/2] = x1*d + m;
- }
- }
- }
- void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) {
- static const int qk = QK5_0;
- assert(k % qk == 0);
- const int nb = k / qk;
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
- uint32_t qh;
- memcpy(&qh, x[i].qh, sizeof(qh));
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
- const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
- const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
- const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
- y[i*qk + j + 0 ] = x0*d;
- y[i*qk + j + qk/2] = x1*d;
- }
- }
- }
- void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) {
- static const int qk = QK5_1;
- assert(k % qk == 0);
- const int nb = k / qk;
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
- const float m = GGML_FP16_TO_FP32(x[i].m);
- uint32_t qh;
- memcpy(&qh, x[i].qh, sizeof(qh));
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
- const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
- const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
- const int x1 = (x[i].qs[j] >> 4) | xh_1;
- y[i*qk + j + 0 ] = x0*d + m;
- y[i*qk + j + qk/2] = x1*d + m;
- }
- }
- }
- void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int k) {
- static const int qk = QK8_0;
- assert(k % qk == 0);
- const int nb = k / qk;
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
- for (int j = 0; j < qk; ++j) {
- y[i*qk + j] = x[i].qs[j]*d;
- }
- }
- }
- //
- // 2-6 bit quantization in super-blocks
- //
- //
- // ===================== Helper functions
- //
- static inline int nearest_int(float fval) {
- assert(fval <= 4194303.f);
- float val = fval + 12582912.f;
- int i; memcpy(&i, &val, sizeof(int));
- return (i & 0x007fffff) - 0x00400000;
- }
- static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, int rmse_type,
- const float * restrict qw) {
- float max = 0;
- float amax = 0;
- for (int i = 0; i < n; ++i) {
- float ax = fabsf(x[i]);
- if (ax > amax) { amax = ax; max = x[i]; }
- }
- if (amax < 1e-30f) { // all zero
- for (int i = 0; i < n; ++i) {
- L[i] = 0;
- }
- return 0.f;
- }
- float iscale = -nmax / max;
- if (rmse_type == 0) {
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale * x[i]);
- L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
- }
- return 1/iscale;
- }
- bool return_early = false;
- if (rmse_type < 0) {
- rmse_type = -rmse_type;
- return_early = true;
- }
- float sumlx = 0;
- float suml2 = 0;
- #ifdef HAVE_BUGGY_APPLE_LINKER
- // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
- for (volatile int i = 0; i < n; ++i) {
- #else
- for (int i = 0; i < n; ++i) {
- #endif
- int l = nearest_int(iscale * x[i]);
- l = MAX(-nmax, MIN(nmax-1, l));
- L[i] = l + nmax;
- float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i]));
- sumlx += w*x[i]*l;
- suml2 += w*l*l;
- }
- float scale = sumlx/suml2;
- if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale;
- float best = scale * sumlx;
- for (int is = -9; is <= 9; ++is) {
- if (is == 0) {
- continue;
- }
- iscale = -(nmax + 0.1f*is) / max;
- sumlx = suml2 = 0;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale * x[i]);
- l = MAX(-nmax, MIN(nmax-1, l));
- float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i]));
- sumlx += w*x[i]*l;
- suml2 += w*l*l;
- }
- if (suml2 > 0 && sumlx*sumlx > best*suml2) {
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale * x[i]);
- L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
- }
- scale = sumlx/suml2; best = scale*sumlx;
- }
- }
- return scale;
- }
- static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, bool do_rmse) {
- float max = 0;
- float amax = 0;
- for (int i = 0; i < n; ++i) {
- float ax = fabsf(x[i]);
- if (ax > amax) { amax = ax; max = x[i]; }
- }
- if (!amax) { // all zero
- for (int i = 0; i < n; ++i) { L[i] = 0; }
- return 0.f;
- }
- float iscale = -nmax / max;
- if (do_rmse) {
- float sumlx = 0;
- float suml2 = 0;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale * x[i]);
- l = MAX(-nmax, MIN(nmax-1, l));
- L[i] = l;
- float w = x[i]*x[i];
- sumlx += w*x[i]*l;
- suml2 += w*l*l;
- }
- for (int itry = 0; itry < 5; ++itry) {
- int n_changed = 0;
- for (int i = 0; i < n; ++i) {
- float w = x[i]*x[i];
- float slx = sumlx - w*x[i]*L[i];
- if (slx > 0) {
- float sl2 = suml2 - w*L[i]*L[i];
- int new_l = nearest_int(x[i] * sl2 / slx);
- new_l = MAX(-nmax, MIN(nmax-1, new_l));
- if (new_l != L[i]) {
- slx += w*x[i]*new_l;
- sl2 += w*new_l*new_l;
- if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) {
- L[i] = new_l; sumlx = slx; suml2 = sl2;
- ++n_changed;
- }
- }
- }
- }
- if (!n_changed) {
- break;
- }
- }
- for (int i = 0; i < n; ++i) {
- L[i] += nmax;
- }
- return sumlx / suml2;
- }
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale * x[i]);
- l = MAX(-nmax, MIN(nmax-1, l));
- L[i] = l + nmax;
- }
- return 1/iscale;
- }
- static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min,
- int ntry, float alpha) {
- float min = x[0];
- float max = x[0];
- for (int i = 1; i < n; ++i) {
- if (x[i] < min) min = x[i];
- if (x[i] > max) max = x[i];
- }
- if (max == min) {
- for (int i = 0; i < n; ++i) L[i] = 0;
- *the_min = 0;
- return 0.f;
- }
- if (min > 0) min = 0;
- float iscale = nmax/(max - min);
- float scale = 1/iscale;
- for (int itry = 0; itry < ntry; ++itry) {
- float sumlx = 0; int suml2 = 0;
- bool did_change = false;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale*(x[i] - min));
- l = MAX(0, MIN(nmax, l));
- if (l != L[i]) {
- L[i] = l;
- did_change = true;
- }
- sumlx += (x[i] - min)*l;
- suml2 += l*l;
- }
- scale = sumlx/suml2;
- float sum = 0;
- for (int i = 0; i < n; ++i) {
- sum += x[i] - scale*L[i];
- }
- min = alpha*min + (1 - alpha)*sum/n;
- if (min > 0) min = 0;
- iscale = 1/scale;
- if (!did_change) break;
- }
- *the_min = -min;
- return scale;
- }
- static float make_qkx2_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
- uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
- float rmin, float rdelta, int nstep, bool use_mad) {
- float min = x[0];
- float max = x[0];
- float sum_w = weights[0];
- float sum_x = sum_w * x[0];
- #ifdef HAVE_BUGGY_APPLE_LINKER
- // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
- for (volatile int i = 1; i < n; ++i) {
- #else
- for (int i = 1; i < n; ++i) {
- #endif
- if (x[i] < min) min = x[i];
- if (x[i] > max) max = x[i];
- float w = weights[i];
- sum_w += w;
- sum_x += w * x[i];
- }
- if (min > 0) min = 0;
- if (max == min) {
- for (int i = 0; i < n; ++i) L[i] = 0;
- *the_min = -min;
- return 0.f;
- }
- float iscale = nmax/(max - min);
- float scale = 1/iscale;
- float best_mad = 0;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale*(x[i] - min));
- L[i] = MAX(0, MIN(nmax, l));
- float diff = scale * L[i] + min - x[i];
- diff = use_mad ? fabsf(diff) : diff * diff;
- float w = weights[i];
- best_mad += w * diff;
- }
- if (nstep < 1) {
- *the_min = -min;
- return scale;
- }
- for (int is = 0; is <= nstep; ++is) {
- iscale = (rmin + rdelta*is + nmax)/(max - min);
- float sum_l = 0, sum_l2 = 0, sum_xl = 0;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale*(x[i] - min));
- l = MAX(0, MIN(nmax, l));
- Laux[i] = l;
- float w = weights[i];
- sum_l += w*l;
- sum_l2 += w*l*l;
- sum_xl += w*l*x[i];
- }
- float D = sum_w * sum_l2 - sum_l * sum_l;
- if (D > 0) {
- float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
- float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
- if (this_min > 0) {
- this_min = 0;
- this_scale = sum_xl / sum_l2;
- }
- float mad = 0;
- for (int i = 0; i < n; ++i) {
- float diff = this_scale * Laux[i] + this_min - x[i];
- diff = use_mad ? fabsf(diff) : diff * diff;
- float w = weights[i];
- mad += w * diff;
- }
- if (mad < best_mad) {
- for (int i = 0; i < n; ++i) {
- L[i] = Laux[i];
- }
- best_mad = mad;
- scale = this_scale;
- min = this_min;
- }
- }
- }
- *the_min = -min;
- return scale;
- }
- #if QK_K == 256
- static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) {
- if (j < 4) {
- *d = q[j] & 63; *m = q[j + 4] & 63;
- } else {
- *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
- *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
- }
- }
- #endif
- //========================- 2-bit (de)-quantization
- void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- uint8_t L[QK_K];
- uint8_t Laux[16];
- float weights[16];
- float mins[QK_K/16];
- float scales[QK_K/16];
- const float q4scale = 15.f;
- for (int i = 0; i < nb; i++) {
- float max_scale = 0; // as we are deducting the min, scales are always positive
- float max_min = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]);
- scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true);
- float scale = scales[j];
- if (scale > max_scale) {
- max_scale = scale;
- }
- float min = mins[j];
- if (min > max_min) {
- max_min = min;
- }
- }
- if (max_scale > 0) {
- float iscale = q4scale/max_scale;
- for (int j = 0; j < QK_K/16; ++j) {
- int l = nearest_int(iscale*scales[j]);
- y[i].scales[j] = l;
- }
- y[i].d = GGML_FP32_TO_FP16(max_scale/q4scale);
- } else {
- for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0;
- y[i].d = GGML_FP32_TO_FP16(0.f);
- }
- if (max_min > 0) {
- float iscale = q4scale/max_min;
- for (int j = 0; j < QK_K/16; ++j) {
- int l = nearest_int(iscale*mins[j]);
- y[i].scales[j] |= (l << 4);
- }
- y[i].dmin = GGML_FP32_TO_FP16(max_min/q4scale);
- } else {
- y[i].dmin = GGML_FP32_TO_FP16(0.f);
- }
- for (int j = 0; j < QK_K/16; ++j) {
- const float d = GGML_FP16_TO_FP32(y[i].d) * (y[i].scales[j] & 0xF);
- if (!d) continue;
- const float dm = GGML_FP16_TO_FP32(y[i].dmin) * (y[i].scales[j] >> 4);
- for (int ii = 0; ii < 16; ++ii) {
- int l = nearest_int((x[16*j + ii] + dm)/d);
- l = MAX(0, MIN(3, l));
- L[16*j + ii] = l;
- }
- }
- #if QK_K == 256
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
- }
- }
- #else
- for (int l = 0; l < 16; ++l) {
- y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
- }
- #endif
- x += QK_K;
- }
- }
- void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
- const float min = GGML_FP16_TO_FP32(x[i].dmin);
- const uint8_t * q = x[i].qs;
- #if QK_K == 256
- int is = 0;
- float dl, ml;
- for (int n = 0; n < QK_K; n += 128) {
- int shift = 0;
- for (int j = 0; j < 4; ++j) {
- uint8_t sc = x[i].scales[is++];
- dl = d * (sc & 0xF); ml = min * (sc >> 4);
- for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml;
- sc = x[i].scales[is++];
- dl = d * (sc & 0xF); ml = min * (sc >> 4);
- for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml;
- shift += 2;
- }
- q += 32;
- }
- #else
- float dl1 = d * (x[i].scales[0] & 0xF), ml1 = min * (x[i].scales[0] >> 4);
- float dl2 = d * (x[i].scales[1] & 0xF), ml2 = min * (x[i].scales[1] >> 4);
- float dl3 = d * (x[i].scales[2] & 0xF), ml3 = min * (x[i].scales[2] >> 4);
- float dl4 = d * (x[i].scales[3] & 0xF), ml4 = min * (x[i].scales[3] >> 4);
- for (int l = 0; l < 16; ++l) {
- y[l+ 0] = dl1 * ((int8_t)((q[l] >> 0) & 3)) - ml1;
- y[l+16] = dl2 * ((int8_t)((q[l] >> 2) & 3)) - ml2;
- y[l+32] = dl3 * ((int8_t)((q[l] >> 4) & 3)) - ml3;
- y[l+48] = dl4 * ((int8_t)((q[l] >> 6) & 3)) - ml4;
- }
- y += QK_K;
- #endif
- }
- }
- void quantize_row_q2_K(const float * restrict x, void * restrict vy, int k) {
- quantize_row_q2_K_reference(x, vy, k);
- }
- size_t ggml_quantize_q2_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
- (void)hist; // TODO: collect histograms
- for (int j = 0; j < n; j += k) {
- block_q2_K * restrict y = (block_q2_K *)dst + j/QK_K;
- quantize_row_q2_K_reference(src + j, y, k);
- }
- return (n/QK_K*sizeof(block_q2_K));
- }
- static float make_qkx3_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
- uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
- float rmin, float rdelta, int nstep, bool use_mad) {
- float min = x[0];
- float max = x[0];
- float sum_w = weights ? weights[0] : x[0]*x[0];
- float sum_x = sum_w * x[0];
- #ifdef HAVE_BUGGY_APPLE_LINKER
- // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
- for (volatile int i = 1; i < n; ++i) {
- #else
- for (int i = 1; i < n; ++i) {
- #endif
- if (x[i] < min) min = x[i];
- if (x[i] > max) max = x[i];
- float w = weights ? weights[i] : x[i]*x[i];
- sum_w += w;
- sum_x += w * x[i];
- }
- if (min > 0) {
- min = 0;
- }
- if (max <= min) {
- memset(L, 0, n);
- *the_min = -min;
- return 0.f;
- }
- float iscale = nmax/(max - min);
- float scale = 1/iscale;
- float best_mad = 0;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale*(x[i] - min));
- L[i] = MAX(0, MIN(nmax, l));
- float diff = scale * L[i] + min - x[i];
- diff = use_mad ? fabsf(diff) : diff*diff;
- float w = weights ? weights[i] : x[i]*x[i];
- best_mad += w * diff;
- }
- if (nstep < 1) {
- *the_min = -min;
- return scale;
- }
- for (int is = 0; is <= nstep; ++is) {
- iscale = (rmin + rdelta*is + nmax)/(max - min);
- float sum_l = 0, sum_l2 = 0, sum_xl = 0;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale*(x[i] - min));
- l = MAX(0, MIN(nmax, l));
- Laux[i] = l;
- float w = weights ? weights[i] : x[i]*x[i];
- sum_l += w*l;
- sum_l2 += w*l*l;
- sum_xl += w*l*x[i];
- }
- float D = sum_w * sum_l2 - sum_l * sum_l;
- if (D > 0) {
- float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
- float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
- if (this_min > 0) {
- this_min = 0;
- this_scale = sum_xl / sum_l2;
- }
- float mad = 0;
- for (int i = 0; i < n; ++i) {
- float diff = this_scale * Laux[i] + this_min - x[i];
- diff = use_mad ? fabsf(diff) : diff*diff;
- float w = weights ? weights[i] : x[i]*x[i];
- mad += w * diff;
- }
- if (mad < best_mad) {
- for (int i = 0; i < n; ++i) {
- L[i] = Laux[i];
- }
- best_mad = mad;
- scale = this_scale;
- min = this_min;
- }
- }
- }
- *the_min = -min;
- return scale;
- }
- static float make_qp_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, const float * quant_weights) {
- float max = 0;
- for (int i = 0; i < n; ++i) {
- max = MAX(max, x[i]);
- }
- if (!max) { // all zero
- for (int i = 0; i < n; ++i) { L[i] = 0; }
- return 0.f;
- }
- float iscale = nmax / max;
- for (int i = 0; i < n; ++i) {
- L[i] = nearest_int(iscale * x[i]);
- }
- float scale = 1/iscale;
- float best_mse = 0;
- for (int i = 0; i < n; ++i) {
- float diff = x[i] - scale*L[i];
- float w = quant_weights[i];
- best_mse += w*diff*diff;
- }
- for (int is = -4; is <= 4; ++is) {
- if (is == 0) continue;
- float iscale_is = (0.1f*is + nmax)/max;
- float scale_is = 1/iscale_is;
- float mse = 0;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale_is*x[i]);
- l = MIN(nmax, l);
- float diff = x[i] - scale_is*l;
- float w = quant_weights[i];
- mse += w*diff*diff;
- }
- if (mse < best_mse) {
- best_mse = mse;
- iscale = iscale_is;
- }
- }
- float sumlx = 0;
- float suml2 = 0;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale * x[i]);
- l = MIN(nmax, l);
- L[i] = l;
- float w = quant_weights[i];
- sumlx += w*x[i]*l;
- suml2 += w*l*l;
- }
- for (int itry = 0; itry < 5; ++itry) {
- int n_changed = 0;
- for (int i = 0; i < n; ++i) {
- float w = quant_weights[i];
- float slx = sumlx - w*x[i]*L[i];
- float sl2 = suml2 - w*L[i]*L[i];
- if (slx > 0 && sl2 > 0) {
- int new_l = nearest_int(x[i] * sl2 / slx);
- new_l = MIN(nmax, new_l);
- if (new_l != L[i]) {
- slx += w*x[i]*new_l;
- sl2 += w*new_l*new_l;
- if (slx*slx*suml2 > sumlx*sumlx*sl2) {
- L[i] = new_l; sumlx = slx; suml2 = sl2;
- ++n_changed;
- }
- }
- }
- }
- if (!n_changed) {
- break;
- }
- }
- return sumlx / suml2;
- }
- static void quantize_row_q2_K_impl(const float * restrict x, block_q2_K * restrict y, int k, const float * restrict quant_weights) {
- GGML_ASSERT(quant_weights);
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- const bool requantize = true;
- uint8_t L[QK_K];
- uint8_t Laux[16];
- float mins[QK_K/16];
- float scales[QK_K/16];
- float sw[QK_K/16];
- float weight[QK_K/16];
- uint8_t Ls[QK_K/16], Lm[QK_K/16];
- for (int i = 0; i < nb; i++) {
- memset(sw, 0, QK_K/16*sizeof(float));
- float sumx2 = 0;
- for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j];
- float sigma2 = sumx2/QK_K;
- for (int j = 0; j < QK_K/16; ++j) {
- const float * restrict qw = quant_weights + QK_K * i + 16*j;
- for (int l = 0; l < QK_K/16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j + l]*x[16*j + l]);
- for (int l = 0; l < QK_K/16; ++l) sw[j] += weight[l];
- scales[j] = make_qkx3_quants(QK_K/16, 3, x + 16*j, weight, L + 16*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
- }
- float dm = make_qp_quants(QK_K/16, 15, scales, Ls, sw);
- float mm = make_qp_quants(QK_K/16, 15, mins, Lm, sw);
- y[i].d = GGML_FP32_TO_FP16(dm);
- y[i].dmin = GGML_FP32_TO_FP16(mm);
- dm = GGML_FP16_TO_FP32(y[i].d);
- mm = GGML_FP16_TO_FP32(y[i].dmin);
- for (int j = 0; j < QK_K/16; ++j) {
- y[i].scales[j] = Ls[j] | (Lm[j] << 4);
- }
- if (requantize) {
- for (int j = 0; j < QK_K/16; ++j) {
- const float d = dm * (y[i].scales[j] & 0xF);
- if (!d) continue;
- const float m = mm * (y[i].scales[j] >> 4);
- for (int ii = 0; ii < 16; ++ii) {
- int l = nearest_int((x[16*j + ii] + m)/d);
- l = MAX(0, MIN(3, l));
- L[16*j + ii] = l;
- }
- }
- }
- #if QK_K == 256
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
- }
- }
- #else
- for (int l = 0; l < 16; ++l) {
- y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
- }
- #endif
- x += QK_K;
- }
- }
- size_t quantize_q2_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
- (void)hist;
- size_t row_size = ggml_row_size(GGML_TYPE_Q2_K, n_per_row);
- if (!quant_weights) {
- quantize_row_q2_K_reference(src, dst, nrow*n_per_row);
- }
- else {
- char * qrow = (char *)dst;
- for (int row = 0; row < nrow; ++row) {
- quantize_row_q2_K_impl(src, (block_q2_K*)qrow, n_per_row, quant_weights);
- src += n_per_row;
- qrow += row_size;
- }
- }
- return nrow * row_size;
- }
- //========================= 3-bit (de)-quantization
- void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- int8_t L[QK_K];
- float scales[QK_K / 16];
- for (int i = 0; i < nb; i++) {
- float max_scale = 0;
- float amax = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true);
- float scale = fabsf(scales[j]);
- if (scale > amax) {
- amax = scale; max_scale = scales[j];
- }
- }
- #if QK_K == 256
- memset(y[i].scales, 0, 12);
- if (max_scale) {
- float iscale = -32.f/max_scale;
- for (int j = 0; j < QK_K/16; ++j) {
- int8_t l = nearest_int(iscale*scales[j]);
- l = MAX(-32, MIN(31, l)) + 32;
- if (j < 8) {
- y[i].scales[j] = l & 0xF;
- } else {
- y[i].scales[j-8] |= ((l & 0xF) << 4);
- }
- l >>= 4;
- y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
- }
- y[i].d = GGML_FP32_TO_FP16(1/iscale);
- } else {
- y[i].d = GGML_FP32_TO_FP16(0.f);
- }
- int8_t sc;
- for (int j = 0; j < QK_K/16; ++j) {
- sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
- sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
- float d = GGML_FP16_TO_FP32(y[i].d) * sc;
- if (!d) {
- continue;
- }
- for (int ii = 0; ii < 16; ++ii) {
- int l = nearest_int(x[16*j + ii]/d);
- l = MAX(-4, MIN(3, l));
- L[16*j + ii] = l + 4;
- }
- }
- #else
- if (max_scale) {
- float iscale = -8.f/max_scale;
- for (int j = 0; j < QK_K/16; j+=2) {
- int l1 = nearest_int(iscale*scales[j]);
- l1 = 8 + MAX(-8, MIN(7, l1));
- int l2 = nearest_int(iscale*scales[j+1]);
- l2 = 8 + MAX(-8, MIN(7, l2));
- y[i].scales[j/2] = l1 | (l2 << 4);
- }
- y[i].d = GGML_FP32_TO_FP16(1/iscale);
- } else {
- for (int j = 0; j < QK_K/16; j+=2) {
- y[i].scales[j/2] = 0;
- }
- y[i].d = GGML_FP32_TO_FP16(0.f);
- }
- for (int j = 0; j < QK_K/16; ++j) {
- int s = j%2 == 0 ? y[i].scales[j/2] & 0xF : y[i].scales[j/2] >> 4;
- float d = GGML_FP16_TO_FP32(y[i].d) * (s - 8);
- if (!d) {
- continue;
- }
- for (int ii = 0; ii < 16; ++ii) {
- int l = nearest_int(x[16*j + ii]/d);
- l = MAX(-4, MIN(3, l));
- L[16*j + ii] = l + 4;
- }
- }
- #endif
- memset(y[i].hmask, 0, QK_K/8);
- // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
- int m = 0;
- uint8_t hm = 1;
- for (int j = 0; j < QK_K; ++j) {
- if (L[j] > 3) {
- y[i].hmask[m] |= hm;
- L[j] -= 4;
- }
- if (++m == QK_K/8) {
- m = 0; hm <<= 1;
- }
- }
- #if QK_K == 256
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
- }
- }
- #else
- for (int l = 0; l < 16; ++l) {
- y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
- }
- #endif
- x += QK_K;
- }
- }
- #if QK_K == 256
- void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- const uint32_t kmask1 = 0x03030303;
- const uint32_t kmask2 = 0x0f0f0f0f;
- uint32_t aux[4];
- const int8_t * scales = (const int8_t*)aux;
- for (int i = 0; i < nb; i++) {
- const float d_all = GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * restrict q = x[i].qs;
- const uint8_t * restrict hm = x[i].hmask;
- uint8_t m = 1;
- memcpy(aux, x[i].scales, 12);
- uint32_t tmp = aux[2];
- aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
- aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
- aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
- aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
- int is = 0;
- float dl;
- for (int n = 0; n < QK_K; n += 128) {
- int shift = 0;
- for (int j = 0; j < 4; ++j) {
- dl = d_all * (scales[is++] - 32);
- for (int l = 0; l < 16; ++l) {
- *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4));
- }
- dl = d_all * (scales[is++] - 32);
- for (int l = 0; l < 16; ++l) {
- *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4));
- }
- shift += 2;
- m <<= 1;
- }
- q += 32;
- }
- }
- }
- #else
- void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- assert(QK_K == 64);
- const int nb = k / QK_K;
- for (int i = 0; i < nb; i++) {
- const float d_all = GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * restrict q = x[i].qs;
- const uint8_t * restrict hm = x[i].hmask;
- const float d1 = d_all * ((x[i].scales[0] & 0xF) - 8);
- const float d2 = d_all * ((x[i].scales[0] >> 4) - 8);
- const float d3 = d_all * ((x[i].scales[1] & 0xF) - 8);
- const float d4 = d_all * ((x[i].scales[1] >> 4) - 8);
- for (int l=0; l<8; ++l) {
- uint8_t h = hm[l];
- y[l+ 0] = d1 * ((int8_t)((q[l+0] >> 0) & 3) - ((h & 0x01) ? 0 : 4));
- y[l+ 8] = d1 * ((int8_t)((q[l+8] >> 0) & 3) - ((h & 0x02) ? 0 : 4));
- y[l+16] = d2 * ((int8_t)((q[l+0] >> 2) & 3) - ((h & 0x04) ? 0 : 4));
- y[l+24] = d2 * ((int8_t)((q[l+8] >> 2) & 3) - ((h & 0x08) ? 0 : 4));
- y[l+32] = d3 * ((int8_t)((q[l+0] >> 4) & 3) - ((h & 0x10) ? 0 : 4));
- y[l+40] = d3 * ((int8_t)((q[l+8] >> 4) & 3) - ((h & 0x20) ? 0 : 4));
- y[l+48] = d4 * ((int8_t)((q[l+0] >> 6) & 3) - ((h & 0x40) ? 0 : 4));
- y[l+56] = d4 * ((int8_t)((q[l+8] >> 6) & 3) - ((h & 0x80) ? 0 : 4));
- }
- y += QK_K;
- }
- }
- #endif
- void quantize_row_q3_K(const float * restrict x, void * restrict vy, int k) {
- quantize_row_q3_K_reference(x, vy, k);
- }
- size_t ggml_quantize_q3_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
- (void)hist; // TODO: collect histograms
- for (int j = 0; j < n; j += k) {
- block_q3_K * restrict y = (block_q3_K *)dst + j/QK_K;
- quantize_row_q3_K_reference(src + j, y, k);
- }
- return (n/QK_K*sizeof(block_q3_K));
- }
- static void quantize_row_q3_K_impl(const float * restrict x, block_q3_K * restrict y, int n_per_row, const float * restrict quant_weights) {
- #if QK_K != 256
- (void)quant_weights;
- quantize_row_q3_K_reference(x, y, n_per_row);
- #else
- assert(n_per_row % QK_K == 0);
- const int nb = n_per_row / QK_K;
- int8_t L[QK_K];
- float scales[QK_K / 16];
- float weight[16];
- float sw[QK_K / 16];
- int8_t Ls[QK_K / 16];
- for (int i = 0; i < nb; i++) {
- float sumx2 = 0;
- for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j];
- float sigma2 = 2*sumx2/QK_K;
- for (int j = 0; j < QK_K/16; ++j) {
- if (quant_weights) {
- const float * qw = quant_weights ? quant_weights + QK_K * i + 16*j : NULL;
- for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j+l]*x[16*j+l]);
- } else {
- for (int l = 0; l < 16; ++l) weight[l] = x[16*j+l]*x[16*j+l];
- }
- float sumw = 0;
- for (int l = 0; l < 16; ++l) sumw += weight[l];
- sw[j] = sumw;
- scales[j] = make_qx_quants(16, 4, x + 16*j, L + 16*j, 1, weight);
- }
- memset(y[i].scales, 0, 12);
- float d_block = make_qx_quants(QK_K/16, 32, scales, Ls, 1, sw);
- for (int j = 0; j < QK_K/16; ++j) {
- int l = Ls[j];
- if (j < 8) {
- y[i].scales[j] = l & 0xF;
- } else {
- y[i].scales[j-8] |= ((l & 0xF) << 4);
- }
- l >>= 4;
- y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
- }
- y[i].d = GGML_FP32_TO_FP16(d_block);
- int8_t sc;
- for (int j = 0; j < QK_K/16; ++j) {
- sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
- sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
- float d = GGML_FP16_TO_FP32(y[i].d) * sc;
- if (!d) {
- continue;
- }
- for (int ii = 0; ii < 16; ++ii) {
- int l = nearest_int(x[16*j + ii]/d);
- l = MAX(-4, MIN(3, l));
- L[16*j + ii] = l + 4;
- }
- }
- memset(y[i].hmask, 0, QK_K/8);
- // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
- int m = 0;
- uint8_t hm = 1;
- for (int j = 0; j < QK_K; ++j) {
- if (L[j] > 3) {
- y[i].hmask[m] |= hm;
- L[j] -= 4;
- }
- if (++m == QK_K/8) {
- m = 0; hm <<= 1;
- }
- }
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
- }
- }
- x += QK_K;
- }
- #endif
- }
- size_t quantize_q3_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
- (void)hist;
- size_t row_size = ggml_row_size(GGML_TYPE_Q3_K, n_per_row);
- if (!quant_weights) {
- quantize_row_q3_K_reference(src, dst, nrow*n_per_row);
- }
- else {
- char * qrow = (char *)dst;
- for (int row = 0; row < nrow; ++row) {
- quantize_row_q3_K_impl(src, (block_q3_K*)qrow, n_per_row, quant_weights);
- src += n_per_row;
- qrow += row_size;
- }
- }
- return nrow * row_size;
- }
- // ====================== 4-bit (de)-quantization
- void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- uint8_t L[QK_K];
- uint8_t Laux[32];
- float weights[32];
- float mins[QK_K/32];
- float scales[QK_K/32];
- for (int i = 0; i < nb; i++) {
- float max_scale = 0; // as we are deducting the min, scales are always positive
- float max_min = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- //scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
- float sum_x2 = 0;
- for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
- float av_x = sqrtf(sum_x2/32);
- for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
- scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false);
- float scale = scales[j];
- if (scale > max_scale) {
- max_scale = scale;
- }
- float min = mins[j];
- if (min > max_min) {
- max_min = min;
- }
- }
- #if QK_K == 256
- float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
- float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
- for (int j = 0; j < QK_K/32; ++j) {
- uint8_t ls = nearest_int(inv_scale*scales[j]);
- uint8_t lm = nearest_int(inv_min*mins[j]);
- ls = MIN(63, ls);
- lm = MIN(63, lm);
- if (j < 4) {
- y[i].scales[j] = ls;
- y[i].scales[j+4] = lm;
- } else {
- y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
- y[i].scales[j-4] |= ((ls >> 4) << 6);
- y[i].scales[j-0] |= ((lm >> 4) << 6);
- }
- }
- y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
- y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
- uint8_t sc, m;
- for (int j = 0; j < QK_K/32; ++j) {
- get_scale_min_k4(j, y[i].scales, &sc, &m);
- const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
- if (!d) continue;
- const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
- for (int ii = 0; ii < 32; ++ii) {
- int l = nearest_int((x[32*j + ii] + dm)/d);
- l = MAX(0, MIN(15, l));
- L[32*j + ii] = l;
- }
- }
- #else
- const float s_factor = 15.f;
- float inv_scale = max_scale > 0 ? s_factor/max_scale : 0.f;
- float inv_min = max_min > 0 ? s_factor/max_min : 0.f;
- int d1 = nearest_int(inv_scale*scales[0]);
- int m1 = nearest_int(inv_min*mins[0]);
- int d2 = nearest_int(inv_scale*scales[1]);
- int m2 = nearest_int(inv_min*mins[1]);
- y[i].scales[0] = d1 | (m1 << 4);
- y[i].scales[1] = d2 | (m2 << 4);
- y[i].d[0] = GGML_FP32_TO_FP16(max_scale/s_factor);
- y[i].d[1] = GGML_FP32_TO_FP16(max_min/s_factor);
- float sumlx = 0;
- int suml2 = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- const uint8_t sd = y[i].scales[j] & 0xF;
- const uint8_t sm = y[i].scales[j] >> 4;
- const float d = GGML_FP16_TO_FP32(y[i].d[0]) * sd;
- if (!d) continue;
- const float m = GGML_FP16_TO_FP32(y[i].d[1]) * sm;
- for (int ii = 0; ii < 32; ++ii) {
- int l = nearest_int((x[32*j + ii] + m)/d);
- l = MAX(0, MIN(15, l));
- L[32*j + ii] = l;
- sumlx += (x[32*j + ii] + m)*l*sd;
- suml2 += l*l*sd*sd;
- }
- }
- if (suml2) {
- y[i].d[0] = GGML_FP32_TO_FP16(sumlx/suml2);
- }
- #endif
- uint8_t * q = y[i].qs;
- for (int j = 0; j < QK_K; j += 64) {
- for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
- q += 32;
- }
- x += QK_K;
- }
- }
- void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- for (int i = 0; i < nb; i++) {
- const uint8_t * q = x[i].qs;
- #if QK_K == 256
- const float d = GGML_FP16_TO_FP32(x[i].d);
- const float min = GGML_FP16_TO_FP32(x[i].dmin);
- int is = 0;
- uint8_t sc, m;
- for (int j = 0; j < QK_K; j += 64) {
- get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
- const float d1 = d * sc; const float m1 = min * m;
- get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
- const float d2 = d * sc; const float m2 = min * m;
- for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1;
- for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2;
- q += 32; is += 2;
- }
- #else
- const float dall = GGML_FP16_TO_FP32(x[i].d[0]);
- const float mall = GGML_FP16_TO_FP32(x[i].d[1]);
- const float d1 = dall * (x[i].scales[0] & 0xF), m1 = mall * (x[i].scales[0] >> 4);
- const float d2 = dall * (x[i].scales[1] & 0xF), m2 = mall * (x[i].scales[1] >> 4);
- for (int l = 0; l < 32; ++l) {
- y[l+ 0] = d1 * (q[l] & 0xF) - m1;
- y[l+32] = d2 * (q[l] >> 4) - m2;
- }
- y += QK_K;
- #endif
- }
- }
- void quantize_row_q4_K(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK_K == 0);
- block_q4_K * restrict y = vy;
- quantize_row_q4_K_reference(x, y, k);
- }
- size_t ggml_quantize_q4_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
- assert(k % QK_K == 0);
- (void)hist; // TODO: collect histograms
- for (int j = 0; j < n; j += k) {
- block_q4_K * restrict y = (block_q4_K *)dst + j/QK_K;
- quantize_row_q4_K_reference(src + j, y, k);
- }
- return (n/QK_K*sizeof(block_q4_K));
- }
- static void quantize_row_q4_K_impl(const float * restrict x, block_q4_K * restrict y, int n_per_row, const float * quant_weights) {
- #if QK_K != 256
- (void)quant_weights;
- quantize_row_q4_K_reference(x, y, n_per_row);
- #else
- assert(n_per_row % QK_K == 0);
- const int nb = n_per_row / QK_K;
- uint8_t L[QK_K];
- uint8_t Laux[32];
- uint8_t Ls[QK_K/32];
- uint8_t Lm[QK_K/32];
- float weights[32];
- float sw[QK_K/32];
- float mins[QK_K/32];
- float scales[QK_K/32];
- for (int i = 0; i < nb; i++) {
- float sum_x2 = 0;
- for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l];
- float sigma2 = 2*sum_x2/QK_K;
- float av_x = sqrtf(sigma2);
- for (int j = 0; j < QK_K/32; ++j) {
- if (quant_weights) {
- const float * qw = quant_weights + QK_K*i + 32*j;
- for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]);
- } else {
- for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
- }
- float sumw = 0;
- for (int l = 0; l < 32; ++l) sumw += weights[l];
- sw[j] = sumw;
- scales[j] = make_qkx3_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
- }
- float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw);
- float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw);
- for (int j = 0; j < QK_K/32; ++j) {
- uint8_t ls = Ls[j];
- uint8_t lm = Lm[j];
- if (j < 4) {
- y[i].scales[j] = ls;
- y[i].scales[j+4] = lm;
- } else {
- y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
- y[i].scales[j-4] |= ((ls >> 4) << 6);
- y[i].scales[j-0] |= ((lm >> 4) << 6);
- }
- }
- y[i].d = GGML_FP32_TO_FP16(d_block);
- y[i].dmin = GGML_FP32_TO_FP16(m_block);
- uint8_t sc, m;
- for (int j = 0; j < QK_K/32; ++j) {
- get_scale_min_k4(j, y[i].scales, &sc, &m);
- const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
- if (!d) continue;
- const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
- for (int ii = 0; ii < 32; ++ii) {
- int l = nearest_int((x[32*j + ii] + dm)/d);
- l = MAX(0, MIN(15, l));
- L[32*j + ii] = l;
- }
- }
- uint8_t * q = y[i].qs;
- for (int j = 0; j < QK_K; j += 64) {
- for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
- q += 32;
- }
- x += QK_K;
- }
- #endif
- }
- size_t quantize_q4_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
- (void)hist;
- size_t row_size = ggml_row_size(GGML_TYPE_Q4_K, n_per_row);
- if (!quant_weights) {
- quantize_row_q4_K_reference(src, dst, nrow*n_per_row);
- }
- else {
- char * qrow = (char *)dst;
- for (int row = 0; row < nrow; ++row) {
- quantize_row_q4_K_impl(src, (block_q4_K*)qrow, n_per_row, quant_weights);
- src += n_per_row;
- qrow += row_size;
- }
- }
- return nrow * row_size;
- }
- // ====================== 5-bit (de)-quantization
- void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- #if QK_K == 256
- uint8_t L[QK_K];
- float mins[QK_K/32];
- float scales[QK_K/32];
- float weights[32];
- uint8_t Laux[32];
- #else
- int8_t L[QK_K];
- float scales[QK_K/16];
- #endif
- for (int i = 0; i < nb; i++) {
- #if QK_K == 256
- float max_scale = 0; // as we are deducting the min, scales are always positive
- float max_min = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- //scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
- float sum_x2 = 0;
- for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
- float av_x = sqrtf(sum_x2/32);
- for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
- scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false);
- float scale = scales[j];
- if (scale > max_scale) {
- max_scale = scale;
- }
- float min = mins[j];
- if (min > max_min) {
- max_min = min;
- }
- }
- float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
- float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
- for (int j = 0; j < QK_K/32; ++j) {
- uint8_t ls = nearest_int(inv_scale*scales[j]);
- uint8_t lm = nearest_int(inv_min*mins[j]);
- ls = MIN(63, ls);
- lm = MIN(63, lm);
- if (j < 4) {
- y[i].scales[j] = ls;
- y[i].scales[j+4] = lm;
- } else {
- y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
- y[i].scales[j-4] |= ((ls >> 4) << 6);
- y[i].scales[j-0] |= ((lm >> 4) << 6);
- }
- }
- y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
- y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
- uint8_t sc, m;
- for (int j = 0; j < QK_K/32; ++j) {
- get_scale_min_k4(j, y[i].scales, &sc, &m);
- const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
- if (!d) continue;
- const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
- for (int ii = 0; ii < 32; ++ii) {
- int l = nearest_int((x[32*j + ii] + dm)/d);
- l = MAX(0, MIN(31, l));
- L[32*j + ii] = l;
- }
- }
- uint8_t * restrict qh = y[i].qh;
- uint8_t * restrict ql = y[i].qs;
- memset(qh, 0, QK_K/8);
- uint8_t m1 = 1, m2 = 2;
- for (int n = 0; n < QK_K; n += 64) {
- for (int j = 0; j < 32; ++j) {
- int l1 = L[n + j];
- if (l1 > 15) {
- l1 -= 16; qh[j] |= m1;
- }
- int l2 = L[n + j + 32];
- if (l2 > 15) {
- l2 -= 16; qh[j] |= m2;
- }
- ql[j] = l1 | (l2 << 4);
- }
- m1 <<= 2; m2 <<= 2;
- ql += 32;
- }
- #else
- float max_scale = 0, amax = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- scales[j] = make_qx_quants(16, 16, x + 16*j, L + 16*j, 1, NULL);
- float abs_scale = fabsf(scales[j]);
- if (abs_scale > amax) {
- amax = abs_scale;
- max_scale = scales[j];
- }
- }
- float iscale = -128.f/max_scale;
- for (int j = 0; j < QK_K/16; ++j) {
- int l = nearest_int(iscale*scales[j]);
- y[i].scales[j] = MAX(-128, MIN(127, l));
- }
- y[i].d = GGML_FP32_TO_FP16(1/iscale);
- for (int j = 0; j < QK_K/16; ++j) {
- const float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
- if (!d) continue;
- for (int ii = 0; ii < 16; ++ii) {
- int l = nearest_int(x[16*j + ii]/d);
- l = MAX(-16, MIN(15, l));
- L[16*j + ii] = l + 16;
- }
- }
- uint8_t * restrict qh = y[i].qh;
- uint8_t * restrict ql = y[i].qs;
- memset(qh, 0, QK_K/8);
- for (int j = 0; j < 32; ++j) {
- int jm = j%8;
- int is = j/8;
- int l1 = L[j];
- if (l1 > 15) {
- l1 -= 16; qh[jm] |= (1 << is);
- }
- int l2 = L[j + 32];
- if (l2 > 15) {
- l2 -= 16; qh[jm] |= (1 << (4 + is));
- }
- ql[j] = l1 | (l2 << 4);
- }
- #endif
- x += QK_K;
- }
- }
- void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- for (int i = 0; i < nb; i++) {
- const uint8_t * ql = x[i].qs;
- const uint8_t * qh = x[i].qh;
- #if QK_K == 256
- const float d = GGML_FP16_TO_FP32(x[i].d);
- const float min = GGML_FP16_TO_FP32(x[i].dmin);
- int is = 0;
- uint8_t sc, m;
- uint8_t u1 = 1, u2 = 2;
- for (int j = 0; j < QK_K; j += 64) {
- get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
- const float d1 = d * sc; const float m1 = min * m;
- get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
- const float d2 = d * sc; const float m2 = min * m;
- for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1;
- for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2;
- ql += 32; is += 2;
- u1 <<= 2; u2 <<= 2;
- }
- #else
- float d = GGML_FP16_TO_FP32(x[i].d);
- const int8_t * restrict s = x[i].scales;
- for (int l = 0; l < 8; ++l) {
- y[l+ 0] = d * s[0] * ((ql[l+ 0] & 0xF) - (qh[l] & 0x01 ? 0 : 16));
- y[l+ 8] = d * s[0] * ((ql[l+ 8] & 0xF) - (qh[l] & 0x02 ? 0 : 16));
- y[l+16] = d * s[1] * ((ql[l+16] & 0xF) - (qh[l] & 0x04 ? 0 : 16));
- y[l+24] = d * s[1] * ((ql[l+24] & 0xF) - (qh[l] & 0x08 ? 0 : 16));
- y[l+32] = d * s[2] * ((ql[l+ 0] >> 4) - (qh[l] & 0x10 ? 0 : 16));
- y[l+40] = d * s[2] * ((ql[l+ 8] >> 4) - (qh[l] & 0x20 ? 0 : 16));
- y[l+48] = d * s[3] * ((ql[l+16] >> 4) - (qh[l] & 0x40 ? 0 : 16));
- y[l+56] = d * s[3] * ((ql[l+24] >> 4) - (qh[l] & 0x80 ? 0 : 16));
- }
- y += QK_K;
- #endif
- }
- }
- void quantize_row_q5_K(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK_K == 0);
- block_q5_K * restrict y = vy;
- quantize_row_q5_K_reference(x, y, k);
- }
- size_t ggml_quantize_q5_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
- assert(k % QK_K == 0);
- (void)hist; // TODO: collect histograms
- for (int j = 0; j < n; j += k) {
- block_q5_K * restrict y = (block_q5_K *)dst + j/QK_K;
- quantize_row_q5_K_reference(src + j, y, k);
- }
- return (n/QK_K*sizeof(block_q5_K));
- }
- static void quantize_row_q5_K_impl(const float * restrict x, block_q5_K * restrict y, int n_per_row, const float * quant_weights) {
- #if QK_K != 256
- (void)quant_weights;
- quantize_row_q5_K_reference(x, y, n_per_row);
- #else
- assert(n_per_row % QK_K == 0);
- const int nb = n_per_row / QK_K;
- uint8_t L[QK_K];
- uint8_t Laux[32];
- uint8_t Ls[QK_K/32];
- uint8_t Lm[QK_K/32];
- float mins[QK_K/32];
- float scales[QK_K/32];
- float sw[QK_K/32];
- float weights[32];
- for (int i = 0; i < nb; i++) {
- float sum_x2 = 0;
- for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l];
- float sigma2 = 2*sum_x2/QK_K;
- float av_x = sqrtf(sigma2);
- for (int j = 0; j < QK_K/32; ++j) {
- if (quant_weights) {
- const float * qw = quant_weights + QK_K*i + 32*j;
- for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]);
- } else {
- for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
- }
- float sumw = 0;
- for (int l = 0; l < 32; ++l) sumw += weights[l];
- sw[j] = sumw;
- scales[j] = make_qkx3_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
- }
- float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw);
- float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw);
- for (int j = 0; j < QK_K/32; ++j) {
- uint8_t ls = Ls[j];
- uint8_t lm = Lm[j];
- ls = MIN(63, ls);
- lm = MIN(63, lm);
- if (j < 4) {
- y[i].scales[j] = ls;
- y[i].scales[j+4] = lm;
- } else {
- y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
- y[i].scales[j-4] |= ((ls >> 4) << 6);
- y[i].scales[j-0] |= ((lm >> 4) << 6);
- }
- }
- y[i].d = GGML_FP32_TO_FP16(d_block);
- y[i].dmin = GGML_FP32_TO_FP16(m_block);
- uint8_t sc, m;
- for (int j = 0; j < QK_K/32; ++j) {
- get_scale_min_k4(j, y[i].scales, &sc, &m);
- const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
- if (!d) continue;
- const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
- for (int ii = 0; ii < 32; ++ii) {
- int l = nearest_int((x[32*j + ii] + dm)/d);
- l = MAX(0, MIN(31, l));
- L[32*j + ii] = l;
- }
- }
- uint8_t * restrict qh = y[i].qh;
- uint8_t * restrict ql = y[i].qs;
- memset(qh, 0, QK_K/8);
- uint8_t m1 = 1, m2 = 2;
- for (int n = 0; n < QK_K; n += 64) {
- for (int j = 0; j < 32; ++j) {
- int l1 = L[n + j];
- if (l1 > 15) {
- l1 -= 16; qh[j] |= m1;
- }
- int l2 = L[n + j + 32];
- if (l2 > 15) {
- l2 -= 16; qh[j] |= m2;
- }
- ql[j] = l1 | (l2 << 4);
- }
- m1 <<= 2; m2 <<= 2;
- ql += 32;
- }
- x += QK_K;
- }
- #endif
- }
- size_t quantize_q5_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
- (void)hist;
- size_t row_size = ggml_row_size(GGML_TYPE_Q5_K, n_per_row);
- if (!quant_weights) {
- quantize_row_q5_K_reference(src, dst, nrow*n_per_row);
- }
- else {
- char * qrow = (char *)dst;
- for (int row = 0; row < nrow; ++row) {
- quantize_row_q5_K_impl(src, (block_q5_K*)qrow, n_per_row, quant_weights);
- src += n_per_row;
- qrow += row_size;
- }
- }
- return nrow * row_size;
- }
- // ====================== 6-bit (de)-quantization
- void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- int8_t L[QK_K];
- float scales[QK_K/16];
- for (int i = 0; i < nb; i++) {
- float max_scale = 0;
- float max_abs_scale = 0;
- for (int ib = 0; ib < QK_K/16; ++ib) {
- const float scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL);
- scales[ib] = scale;
- const float abs_scale = fabsf(scale);
- if (abs_scale > max_abs_scale) {
- max_abs_scale = abs_scale;
- max_scale = scale;
- }
- }
- if (!max_abs_scale) {
- memset(&y[i], 0, sizeof(block_q6_K));
- y[i].d = GGML_FP32_TO_FP16(0.f);
- x += QK_K;
- continue;
- }
- float iscale = -128.f/max_scale;
- y[i].d = GGML_FP32_TO_FP16(1/iscale);
- for (int ib = 0; ib < QK_K/16; ++ib) {
- y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
- }
- for (int j = 0; j < QK_K/16; ++j) {
- float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
- if (!d) {
- continue;
- }
- for (int ii = 0; ii < 16; ++ii) {
- int l = nearest_int(x[16*j + ii]/d);
- l = MAX(-32, MIN(31, l));
- L[16*j + ii] = l + 32;
- }
- }
- uint8_t * restrict ql = y[i].ql;
- uint8_t * restrict qh = y[i].qh;
- #if QK_K == 256
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- const uint8_t q1 = L[j + l + 0] & 0xF;
- const uint8_t q2 = L[j + l + 32] & 0xF;
- const uint8_t q3 = L[j + l + 64] & 0xF;
- const uint8_t q4 = L[j + l + 96] & 0xF;
- ql[l+ 0] = q1 | (q3 << 4);
- ql[l+32] = q2 | (q4 << 4);
- qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
- }
- ql += 64;
- qh += 32;
- }
- #else
- for (int l = 0; l < 32; ++l) {
- const uint8_t q1 = L[l + 0] & 0xF;
- const uint8_t q2 = L[l + 32] & 0xF;
- ql[l] = q1 | (q2 << 4);
- }
- for (int l = 0; l < 16; ++l) {
- qh[l] = (L[l] >> 4) | ((L[l + 16] >> 4) << 2) | ((L[l + 32] >> 4) << 4) | ((L[l + 48] >> 4) << 6);
- }
- #endif
- x += QK_K;
- }
- }
- void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * restrict ql = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict sc = x[i].scales;
- #if QK_K == 256
- for (int n = 0; n < QK_K; n += 128) {
- for (int l = 0; l < 32; ++l) {
- int is = l/16;
- const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- y[l + 0] = d * sc[is + 0] * q1;
- y[l + 32] = d * sc[is + 2] * q2;
- y[l + 64] = d * sc[is + 4] * q3;
- y[l + 96] = d * sc[is + 6] * q4;
- }
- y += 128;
- ql += 64;
- qh += 32;
- sc += 8;
- }
- #else
- for (int l = 0; l < 16; ++l) {
- const int8_t q1 = (int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- const int8_t q2 = (int8_t)((ql[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- const int8_t q3 = (int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- const int8_t q4 = (int8_t)((ql[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- y[l+ 0] = d * sc[0] * q1;
- y[l+16] = d * sc[1] * q2;
- y[l+32] = d * sc[2] * q3;
- y[l+48] = d * sc[3] * q4;
- }
- y += 64;
- #endif
- }
- }
- void quantize_row_q6_K(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK_K == 0);
- block_q6_K * restrict y = vy;
- quantize_row_q6_K_reference(x, y, k);
- }
- size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist) {
- assert(k % QK_K == 0);
- (void)hist; // TODO: collect histograms
- for (int j = 0; j < n; j += k) {
- block_q6_K * restrict y = (block_q6_K *)dst + j/QK_K;
- quantize_row_q6_K_reference(src + j, y, k);
- }
- return (n/QK_K*sizeof(block_q6_K));
- }
- static void quantize_row_q6_K_impl(const float * restrict x, block_q6_K * restrict y, int n_per_row, const float * quant_weights) {
- #if QK_K != 256
- (void)quant_weights;
- quantize_row_q6_K_reference(x, y, n_per_row);
- #else
- assert(n_per_row % QK_K == 0);
- const int nb = n_per_row / QK_K;
- int8_t L[QK_K];
- float scales[QK_K/16];
- //float weights[16];
- for (int i = 0; i < nb; i++) {
- //float sum_x2 = 0;
- //for (int j = 0; j < QK_K; ++j) sum_x2 += x[j]*x[j];
- //float sigma2 = sum_x2/QK_K;
- float max_scale = 0;
- float max_abs_scale = 0;
- for (int ib = 0; ib < QK_K/16; ++ib) {
- float scale;
- if (quant_weights) {
- const float * qw = quant_weights + QK_K*i + 16*ib;
- //for (int j = 0; j < 16; ++j) weights[j] = qw[j] * sqrtf(sigma2 + x[16*ib + j]*x[16*ib + j]);
- //scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, weights);
- scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, qw);
- } else {
- scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL);
- }
- scales[ib] = scale;
- const float abs_scale = fabsf(scale);
- if (abs_scale > max_abs_scale) {
- max_abs_scale = abs_scale;
- max_scale = scale;
- }
- }
- if (!max_abs_scale) {
- memset(&y[i], 0, sizeof(block_q6_K));
- y[i].d = GGML_FP32_TO_FP16(0.f);
- x += QK_K;
- continue;
- }
- float iscale = -128.f/max_scale;
- y[i].d = GGML_FP32_TO_FP16(1/iscale);
- for (int ib = 0; ib < QK_K/16; ++ib) {
- y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
- }
- for (int j = 0; j < QK_K/16; ++j) {
- float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
- if (!d) {
- continue;
- }
- for (int ii = 0; ii < 16; ++ii) {
- int l = nearest_int(x[16*j + ii]/d);
- l = MAX(-32, MIN(31, l));
- L[16*j + ii] = l + 32;
- }
- }
- uint8_t * restrict ql = y[i].ql;
- uint8_t * restrict qh = y[i].qh;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- const uint8_t q1 = L[j + l + 0] & 0xF;
- const uint8_t q2 = L[j + l + 32] & 0xF;
- const uint8_t q3 = L[j + l + 64] & 0xF;
- const uint8_t q4 = L[j + l + 96] & 0xF;
- ql[l+ 0] = q1 | (q3 << 4);
- ql[l+32] = q2 | (q4 << 4);
- qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
- }
- ql += 64;
- qh += 32;
- }
- x += QK_K;
- }
- #endif
- }
- size_t quantize_q6_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
- (void)hist;
- size_t row_size = ggml_row_size(GGML_TYPE_Q6_K, n_per_row);
- if (!quant_weights) {
- quantize_row_q6_K_reference(src, dst, nrow*n_per_row);
- }
- else {
- char * qrow = (char *)dst;
- for (int row = 0; row < nrow; ++row) {
- quantize_row_q6_K_impl(src, (block_q6_K*)qrow, n_per_row, quant_weights);
- src += n_per_row;
- qrow += row_size;
- }
- }
- return nrow * row_size;
- }
- static void quantize_row_q4_0_impl(const float * restrict x, block_q4_0 * restrict y, int n_per_row, const float * quant_weights) {
- static_assert(QK4_0 == 32, "QK4_0 must be 32");
- if (!quant_weights) {
- quantize_row_q4_0_reference(x, y, n_per_row);
- return;
- }
- float weight[QK4_0];
- int8_t L[QK4_0];
- float sum_x2 = 0;
- for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
- float sigma2 = sum_x2/n_per_row;
- const int nb = n_per_row/QK4_0;
- for (int ib = 0; ib < nb; ++ib) {
- const float * xb = x + QK4_0 * ib;
- const float * qw = quant_weights + QK4_0 * ib;
- for (int j = 0; j < QK4_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
- float d = make_qx_quants(QK4_0, 8, xb, L, 1, weight);
- y[ib].d = GGML_FP32_TO_FP16(d);
- for (int j = 0; j < 16; ++j) {
- y[ib].qs[j] = L[j] | (L[j+16] << 4);
- }
- }
- }
- size_t quantize_q4_0(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
- if (!quant_weights) {
- return ggml_quantize_q4_0(src, dst, nrow*n_per_row, n_per_row, hist);
- }
- size_t row_size = ggml_row_size(GGML_TYPE_Q4_0, n_per_row);
- char * qrow = (char *)dst;
- for (int row = 0; row < nrow; ++row) {
- quantize_row_q4_0_impl(src, (block_q4_0*)qrow, n_per_row, quant_weights);
- src += n_per_row;
- qrow += row_size;
- }
- return nrow * row_size;
- }
- static void quantize_row_q4_1_impl(const float * restrict x, block_q4_1 * restrict y, int n_per_row, const float * quant_weights) {
- static_assert(QK4_1 == 32, "QK4_1 must be 32");
- if (!quant_weights) {
- quantize_row_q4_1_reference(x, y, n_per_row);
- return;
- }
- float weight[QK4_1];
- uint8_t L[QK4_1], Laux[QK4_1];
- float sum_x2 = 0;
- for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
- float sigma2 = sum_x2/n_per_row;
- const int nb = n_per_row/QK4_1;
- for (int ib = 0; ib < nb; ++ib) {
- const float * xb = x + QK4_1 * ib;
- const float * qw = quant_weights + QK4_1 * ib;
- for (int j = 0; j < QK4_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
- float min;
- float d = make_qkx3_quants(QK4_1, 15, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false);
- y[ib].d = GGML_FP32_TO_FP16(d);
- y[ib].m = GGML_FP32_TO_FP16(-min);
- for (int j = 0; j < 16; ++j) {
- y[ib].qs[j] = L[j] | (L[j+16] << 4);
- }
- }
- }
- size_t quantize_q4_1(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
- if (!quant_weights) {
- return ggml_quantize_q4_1(src, dst, nrow*n_per_row, n_per_row, hist);
- }
- size_t row_size = ggml_row_size(GGML_TYPE_Q4_1, n_per_row);
- char * qrow = (char *)dst;
- for (int row = 0; row < nrow; ++row) {
- quantize_row_q4_1_impl(src, (block_q4_1*)qrow, n_per_row, quant_weights);
- src += n_per_row;
- qrow += row_size;
- }
- return nrow * row_size;
- }
- static void quantize_row_q5_0_impl(const float * restrict x, block_q5_0 * restrict y, int n_per_row, const float * quant_weights) {
- static_assert(QK5_0 == 32, "QK5_0 must be 32");
- if (!quant_weights) {
- quantize_row_q5_0_reference(x, y, n_per_row);
- return;
- }
- float weight[QK5_0];
- int8_t L[QK5_0];
- float sum_x2 = 0;
- for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
- float sigma2 = sum_x2/n_per_row;
- const int nb = n_per_row/QK5_0;
- for (int ib = 0; ib < nb; ++ib) {
- const float * xb = x + QK5_0 * ib;
- const float * qw = quant_weights + QK5_0 * ib;
- for (int j = 0; j < QK5_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
- float d = make_qx_quants(QK5_0, 16, xb, L, 1, weight);
- y[ib].d = GGML_FP32_TO_FP16(d);
- uint32_t qh = 0;
- for (int j = 0; j < 16; ++j) {
- const uint8_t xi0 = L[j];
- const uint8_t xi1 = L[j+16];
- y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
- // get the 5-th bit and store it in qh at the right position
- qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
- qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
- }
- memcpy(&y[ib].qh, &qh, sizeof(qh));
- }
- }
- size_t quantize_q5_0(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
- if (!quant_weights) {
- return ggml_quantize_q5_0(src, dst, nrow*n_per_row, n_per_row, hist);
- }
- size_t row_size = ggml_row_size(GGML_TYPE_Q5_0, n_per_row);
- char * qrow = (char *)dst;
- for (int row = 0; row < nrow; ++row) {
- quantize_row_q5_0_impl(src, (block_q5_0*)qrow, n_per_row, quant_weights);
- src += n_per_row;
- qrow += row_size;
- }
- return nrow * row_size;
- }
- static void quantize_row_q5_1_impl(const float * restrict x, block_q5_1 * restrict y, int n_per_row, const float * quant_weights) {
- static_assert(QK5_1 == 32, "QK5_1 must be 32");
- if (!quant_weights) {
- quantize_row_q5_1_reference(x, y, n_per_row);
- return;
- }
- float weight[QK5_1];
- uint8_t L[QK5_1], Laux[QK5_1];
- float sum_x2 = 0;
- for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
- float sigma2 = sum_x2/n_per_row;
- const int nb = n_per_row/QK5_1;
- for (int ib = 0; ib < nb; ++ib) {
- const float * xb = x + QK5_1 * ib;
- const float * qw = quant_weights + QK5_1 * ib;
- for (int j = 0; j < QK5_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
- float min;
- float d = make_qkx3_quants(QK5_1, 31, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false);
- y[ib].d = GGML_FP32_TO_FP16(d);
- y[ib].m = GGML_FP32_TO_FP16(-min);
- uint32_t qh = 0;
- for (int j = 0; j < 16; ++j) {
- const uint8_t xi0 = L[j];
- const uint8_t xi1 = L[j+16];
- y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
- // get the 5-th bit and store it in qh at the right position
- qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
- qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
- }
- memcpy(&y[ib].qh, &qh, sizeof(qh));
- }
- }
- size_t quantize_q5_1(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
- if (!quant_weights) {
- return ggml_quantize_q5_1(src, dst, nrow*n_per_row, n_per_row, hist);
- }
- size_t row_size = ggml_row_size(GGML_TYPE_Q5_1, n_per_row);
- char * qrow = (char *)dst;
- for (int row = 0; row < nrow; ++row) {
- quantize_row_q5_1_impl(src, (block_q5_1*)qrow, n_per_row, quant_weights);
- src += n_per_row;
- qrow += row_size;
- }
- return nrow * row_size;
- }
- // ====================== "True" 2-bit (de)-quantization
- static const uint64_t iq2xxs_grid[256] = {
- 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08,
- 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x08080808082b0808,
- 0x08080808082b082b, 0x08080808082b2b08, 0x08080808082b2b2b, 0x0808080819080819,
- 0x0808080819081908, 0x0808080819190808, 0x0808080819192b08, 0x08080808192b0819,
- 0x08080808192b1908, 0x080808082b080808, 0x080808082b08082b, 0x080808082b082b2b,
- 0x080808082b2b082b, 0x0808081908080819, 0x0808081908081908, 0x0808081908190808,
- 0x0808081908191919, 0x0808081919080808, 0x080808192b081908, 0x080808192b192b08,
- 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b082b082b, 0x0808082b2b08082b,
- 0x0808190808080819, 0x0808190808081908, 0x0808190808190808, 0x08081908082b0819,
- 0x08081908082b1908, 0x0808190819080808, 0x080819081908082b, 0x0808190819082b08,
- 0x08081908192b0808, 0x080819082b080819, 0x080819082b081908, 0x080819082b190808,
- 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, 0x0808191908082b08,
- 0x08081919082b0808, 0x080819191908192b, 0x08081919192b2b19, 0x080819192b080808,
- 0x080819192b190819, 0x0808192b08082b19, 0x0808192b08190808, 0x0808192b19080808,
- 0x0808192b2b081908, 0x0808192b2b2b1908, 0x08082b0808080808, 0x08082b0808081919,
- 0x08082b0808082b08, 0x08082b0808191908, 0x08082b08082b2b08, 0x08082b0819080819,
- 0x08082b0819081908, 0x08082b0819190808, 0x08082b081919082b, 0x08082b082b082b08,
- 0x08082b1908081908, 0x08082b1919080808, 0x08082b2b0808082b, 0x08082b2b08191908,
- 0x0819080808080819, 0x0819080808081908, 0x0819080808190808, 0x08190808082b0819,
- 0x0819080819080808, 0x08190808192b0808, 0x081908082b081908, 0x081908082b190808,
- 0x081908082b191919, 0x0819081908080808, 0x0819081908082b08, 0x08190819082b0808,
- 0x0819081919190808, 0x0819081919192b2b, 0x081908192b080808, 0x0819082b082b1908,
- 0x0819082b19081919, 0x0819190808080808, 0x0819190808082b08, 0x08191908082b0808,
- 0x08191908082b1919, 0x0819190819082b19, 0x081919082b080808, 0x0819191908192b08,
- 0x08191919192b082b, 0x0819192b08080808, 0x0819192b0819192b, 0x08192b0808080819,
- 0x08192b0808081908, 0x08192b0808190808, 0x08192b0819080808, 0x08192b082b080819,
- 0x08192b1908080808, 0x08192b1908081919, 0x08192b192b2b0808, 0x08192b2b19190819,
- 0x082b080808080808, 0x082b08080808082b, 0x082b080808082b2b, 0x082b080819081908,
- 0x082b0808192b0819, 0x082b08082b080808, 0x082b08082b08082b, 0x082b0819082b2b19,
- 0x082b081919082b08, 0x082b082b08080808, 0x082b082b0808082b, 0x082b190808080819,
- 0x082b190808081908, 0x082b190808190808, 0x082b190819080808, 0x082b19081919192b,
- 0x082b191908080808, 0x082b191919080819, 0x082b1919192b1908, 0x082b192b2b190808,
- 0x082b2b0808082b08, 0x082b2b08082b0808, 0x082b2b082b191908, 0x082b2b2b19081908,
- 0x1908080808080819, 0x1908080808081908, 0x1908080808190808, 0x1908080808192b08,
- 0x19080808082b0819, 0x19080808082b1908, 0x1908080819080808, 0x1908080819082b08,
- 0x190808081919192b, 0x19080808192b0808, 0x190808082b080819, 0x190808082b081908,
- 0x190808082b190808, 0x1908081908080808, 0x19080819082b0808, 0x19080819192b0819,
- 0x190808192b080808, 0x190808192b081919, 0x1908082b08080819, 0x1908082b08190808,
- 0x1908082b19082b08, 0x1908082b1919192b, 0x1908082b192b2b08, 0x1908190808080808,
- 0x1908190808082b08, 0x19081908082b0808, 0x190819082b080808, 0x190819082b192b19,
- 0x190819190819082b, 0x19081919082b1908, 0x1908192b08080808, 0x19082b0808080819,
- 0x19082b0808081908, 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919,
- 0x19082b1908080808, 0x19082b1919192b08, 0x19082b19192b0819, 0x19082b192b08082b,
- 0x19082b2b19081919, 0x19082b2b2b190808, 0x1919080808080808, 0x1919080808082b08,
- 0x1919080808190819, 0x1919080808192b19, 0x19190808082b0808, 0x191908082b080808,
- 0x191908082b082b08, 0x1919081908081908, 0x191908191908082b, 0x191908192b2b1908,
- 0x1919082b2b190819, 0x191919082b190808, 0x191919082b19082b, 0x1919191908082b2b,
- 0x1919192b08080819, 0x1919192b19191908, 0x19192b0808080808, 0x19192b0808190819,
- 0x19192b0808192b19, 0x19192b08192b1908, 0x19192b1919080808, 0x19192b2b08082b08,
- 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, 0x192b0808192b2b08,
- 0x192b081908080808, 0x192b081919191919, 0x192b082b08192b08, 0x192b082b192b0808,
- 0x192b190808080808, 0x192b190808081919, 0x192b191908190808, 0x192b19190819082b,
- 0x192b19192b081908, 0x192b2b081908082b, 0x2b08080808080808, 0x2b0808080808082b,
- 0x2b08080808082b2b, 0x2b08080819080819, 0x2b0808082b08082b, 0x2b08081908081908,
- 0x2b08081908192b08, 0x2b08081919080808, 0x2b08082b08190819, 0x2b08190808080819,
- 0x2b08190808081908, 0x2b08190808190808, 0x2b08190808191919, 0x2b08190819080808,
- 0x2b081908192b0808, 0x2b08191908080808, 0x2b0819191908192b, 0x2b0819192b191908,
- 0x2b08192b08082b19, 0x2b08192b19080808, 0x2b08192b192b0808, 0x2b082b080808082b,
- 0x2b082b1908081908, 0x2b082b2b08190819, 0x2b19080808081908, 0x2b19080808190808,
- 0x2b190808082b1908, 0x2b19080819080808, 0x2b1908082b2b0819, 0x2b1908190819192b,
- 0x2b1908192b080808, 0x2b19082b19081919, 0x2b19190808080808, 0x2b191908082b082b,
- 0x2b19190819081908, 0x2b19191919190819, 0x2b192b082b080819, 0x2b192b19082b0808,
- 0x2b2b08080808082b, 0x2b2b080819190808, 0x2b2b08082b081919, 0x2b2b081908082b19,
- 0x2b2b082b08080808, 0x2b2b190808192b08, 0x2b2b2b0819190808, 0x2b2b2b1908081908,
- };
- static const uint64_t iq2xs_grid[512] = {
- 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08,
- 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b,
- 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919,
- 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b,
- 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919,
- 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x080808082b080808,
- 0x080808082b08082b, 0x080808082b081919, 0x080808082b082b08, 0x080808082b190819,
- 0x080808082b191908, 0x080808082b192b19, 0x080808082b2b0808, 0x0808081908080819,
- 0x0808081908081908, 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808,
- 0x080808190819082b, 0x0808081908191919, 0x0808081908192b08, 0x0808081908192b2b,
- 0x08080819082b0819, 0x08080819082b1908, 0x0808081919080808, 0x080808191908082b,
- 0x0808081919081919, 0x0808081919082b08, 0x0808081919190819, 0x0808081919191908,
- 0x08080819192b0808, 0x08080819192b2b08, 0x080808192b080819, 0x080808192b081908,
- 0x080808192b190808, 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b08081919,
- 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, 0x0808082b082b0808,
- 0x0808082b19080819, 0x0808082b19081908, 0x0808082b19190808, 0x0808082b19191919,
- 0x0808082b2b080808, 0x0808082b2b082b2b, 0x0808190808080819, 0x0808190808081908,
- 0x080819080808192b, 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b,
- 0x0808190808191919, 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908,
- 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, 0x0808190819082b08,
- 0x0808190819190819, 0x0808190819191908, 0x080819081919192b, 0x08081908192b0808,
- 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, 0x0808191908080808,
- 0x080819190808082b, 0x0808191908081919, 0x0808191908082b08, 0x0808191908190819,
- 0x0808191908191908, 0x08081919082b0808, 0x0808191919080819, 0x0808191919081908,
- 0x0808191919190808, 0x08081919192b0819, 0x080819192b080808, 0x0808192b08080819,
- 0x0808192b08081908, 0x0808192b08190808, 0x0808192b082b192b, 0x0808192b19080808,
- 0x0808192b1908082b, 0x0808192b2b081908, 0x08082b0808080808, 0x08082b080808082b,
- 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808082b2b, 0x08082b0808190819,
- 0x08082b0808191908, 0x08082b08082b0808, 0x08082b08082b1919, 0x08082b0819080819,
- 0x08082b0819081908, 0x08082b0819190808, 0x08082b0819192b08, 0x08082b082b080808,
- 0x08082b082b2b0808, 0x08082b082b2b2b2b, 0x08082b1908080819, 0x08082b1908081908,
- 0x08082b1908190808, 0x08082b1919080808, 0x08082b192b080819, 0x08082b192b082b19,
- 0x08082b2b08080808, 0x08082b2b082b0808, 0x08082b2b082b2b08, 0x08082b2b2b19192b,
- 0x08082b2b2b2b0808, 0x0819080808080819, 0x0819080808081908, 0x081908080808192b,
- 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, 0x0819080808191919,
- 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, 0x0819080819080808,
- 0x081908081908082b, 0x0819080819081919, 0x0819080819082b08, 0x0819080819190819,
- 0x0819080819191908, 0x08190808192b0808, 0x08190808192b2b2b, 0x081908082b080819,
- 0x081908082b081908, 0x081908082b190808, 0x0819081908080808, 0x081908190808082b,
- 0x0819081908081919, 0x0819081908082b08, 0x0819081908190819, 0x0819081908191908,
- 0x08190819082b0808, 0x0819081919080819, 0x0819081919081908, 0x0819081919190808,
- 0x081908192b080808, 0x081908192b191908, 0x081908192b19192b, 0x0819082b08080819,
- 0x0819082b08081908, 0x0819082b0808192b, 0x0819082b08190808, 0x0819082b19080808,
- 0x0819082b192b0808, 0x0819190808080808, 0x081919080808082b, 0x0819190808081919,
- 0x0819190808082b08, 0x0819190808190819, 0x0819190808191908, 0x08191908082b0808,
- 0x0819190819080819, 0x0819190819081908, 0x0819190819082b19, 0x0819190819190808,
- 0x08191908192b1908, 0x081919082b080808, 0x0819191908080819, 0x0819191908081908,
- 0x0819191908190808, 0x0819191919080808, 0x0819192b08080808, 0x0819192b08191908,
- 0x0819192b19082b19, 0x08192b0808080819, 0x08192b0808081908, 0x08192b0808190808,
- 0x08192b080819082b, 0x08192b0819080808, 0x08192b0819191908, 0x08192b082b08192b,
- 0x08192b1908080808, 0x08192b1908081919, 0x08192b19192b192b, 0x08192b2b19190819,
- 0x08192b2b2b2b2b19, 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919,
- 0x082b080808082b08, 0x082b080808082b2b, 0x082b080808190819, 0x082b080808191908,
- 0x082b0808082b0808, 0x082b080819080819, 0x082b080819081908, 0x082b080819190808,
- 0x082b08082b080808, 0x082b08082b2b0808, 0x082b081908080819, 0x082b081908081908,
- 0x082b081908190808, 0x082b081919080808, 0x082b081919082b08, 0x082b0819192b1919,
- 0x082b082b08080808, 0x082b082b082b082b, 0x082b082b2b080808, 0x082b082b2b2b2b08,
- 0x082b190808080819, 0x082b190808081908, 0x082b190808190808, 0x082b1908082b2b19,
- 0x082b190819080808, 0x082b191908080808, 0x082b191919080819, 0x082b19191919082b,
- 0x082b19192b192b19, 0x082b192b08080819, 0x082b192b08192b2b, 0x082b192b2b2b192b,
- 0x082b2b0808080808, 0x082b2b0808082b08, 0x082b2b0808082b2b, 0x082b2b08082b0808,
- 0x082b2b0819191919, 0x082b2b082b082b08, 0x082b2b082b2b082b, 0x082b2b19192b2b08,
- 0x082b2b192b190808, 0x082b2b2b08082b08, 0x082b2b2b082b0808, 0x082b2b2b2b08082b,
- 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, 0x1908080808081908,
- 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, 0x190808080819082b,
- 0x1908080808191919, 0x1908080808192b08, 0x19080808082b0819, 0x19080808082b1908,
- 0x1908080819080808, 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08,
- 0x1908080819082b2b, 0x1908080819190819, 0x1908080819191908, 0x19080808192b0808,
- 0x19080808192b1919, 0x190808082b080819, 0x190808082b081908, 0x190808082b190808,
- 0x1908081908080808, 0x190808190808082b, 0x1908081908081919, 0x1908081908082b08,
- 0x1908081908190819, 0x1908081908191908, 0x19080819082b0808, 0x1908081919080819,
- 0x1908081919081908, 0x1908081919190808, 0x190808192b080808, 0x190808192b081919,
- 0x190808192b2b082b, 0x1908082b08080819, 0x1908082b08081908, 0x1908082b08190808,
- 0x1908082b0819082b, 0x1908082b082b2b19, 0x1908082b19080808, 0x1908190808080808,
- 0x190819080808082b, 0x1908190808081919, 0x1908190808082b08, 0x1908190808190819,
- 0x1908190808191908, 0x1908190808192b19, 0x19081908082b0808, 0x1908190819080819,
- 0x1908190819081908, 0x1908190819190808, 0x190819082b080808, 0x190819082b191908,
- 0x1908191908080819, 0x1908191908081908, 0x1908191908190808, 0x19081919082b1908,
- 0x1908191919080808, 0x190819192b192b2b, 0x1908192b08080808, 0x1908192b08082b2b,
- 0x1908192b19081908, 0x1908192b19190808, 0x19082b0808080819, 0x19082b0808081908,
- 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, 0x19082b0819191908,
- 0x19082b08192b082b, 0x19082b1908080808, 0x19082b1908190819, 0x19082b1919081908,
- 0x19082b1919190808, 0x19082b19192b2b19, 0x19082b2b08081908, 0x1919080808080808,
- 0x191908080808082b, 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819,
- 0x1919080808191908, 0x19190808082b0808, 0x19190808082b2b08, 0x1919080819080819,
- 0x1919080819081908, 0x1919080819190808, 0x191908082b080808, 0x1919081908080819,
- 0x1919081908081908, 0x1919081908190808, 0x1919081908191919, 0x1919081919080808,
- 0x191908191908082b, 0x1919082b08080808, 0x1919082b19081908, 0x1919082b2b2b2b2b,
- 0x1919190808080819, 0x1919190808081908, 0x1919190808190808, 0x19191908082b0819,
- 0x1919190819080808, 0x19191908192b0808, 0x191919082b080819, 0x191919082b2b0819,
- 0x1919191908080808, 0x1919191908082b08, 0x191919192b080808, 0x191919192b082b08,
- 0x1919192b082b0819, 0x1919192b192b2b08, 0x1919192b2b2b0819, 0x19192b0808080808,
- 0x19192b0808191908, 0x19192b0819080819, 0x19192b0819190808, 0x19192b082b192b19,
- 0x19192b1908192b2b, 0x19192b1919080808, 0x19192b191908082b, 0x19192b2b2b081919,
- 0x192b080808080819, 0x192b080808081908, 0x192b080808190808, 0x192b080819080808,
- 0x192b080819191908, 0x192b0808192b082b, 0x192b08082b08192b, 0x192b08082b2b2b19,
- 0x192b081908080808, 0x192b082b082b1908, 0x192b082b19082b2b, 0x192b082b2b19082b,
- 0x192b190808080808, 0x192b19080819192b, 0x192b191908190808, 0x192b191919080808,
- 0x192b191919081919, 0x192b19192b2b1908, 0x192b2b0808080819, 0x192b2b08192b2b2b,
- 0x192b2b19082b1919, 0x192b2b2b0808192b, 0x192b2b2b19191908, 0x192b2b2b192b082b,
- 0x2b08080808080808, 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08,
- 0x2b08080808190819, 0x2b08080808191908, 0x2b080808082b0808, 0x2b080808082b2b2b,
- 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808082b080808,
- 0x2b0808082b08082b, 0x2b0808082b2b2b08, 0x2b0808082b2b2b2b, 0x2b08081908080819,
- 0x2b08081908081908, 0x2b0808190808192b, 0x2b08081908190808, 0x2b08081919080808,
- 0x2b08081919190819, 0x2b08081919192b19, 0x2b08082b08080808, 0x2b08082b082b0808,
- 0x2b08082b2b080808, 0x2b08082b2b08082b, 0x2b08082b2b2b0808, 0x2b08082b2b2b2b08,
- 0x2b08190808080819, 0x2b08190808081908, 0x2b08190808190808, 0x2b0819080819082b,
- 0x2b08190808191919, 0x2b08190819080808, 0x2b081908192b0808, 0x2b0819082b082b19,
- 0x2b08191908080808, 0x2b08191919081908, 0x2b0819192b2b1919, 0x2b08192b08192b08,
- 0x2b08192b192b2b2b, 0x2b082b0808080808, 0x2b082b0808082b08, 0x2b082b08082b1919,
- 0x2b082b0819192b2b, 0x2b082b082b080808, 0x2b082b082b08082b, 0x2b082b082b2b2b08,
- 0x2b082b190808192b, 0x2b082b2b082b082b, 0x2b082b2b2b080808, 0x2b082b2b2b082b08,
- 0x2b082b2b2b19192b, 0x2b082b2b2b2b2b08, 0x2b19080808080819, 0x2b19080808081908,
- 0x2b19080808190808, 0x2b19080819080808, 0x2b1908081919192b, 0x2b1908082b081908,
- 0x2b19081908080808, 0x2b190819082b082b, 0x2b190819192b1908, 0x2b19082b1919192b,
- 0x2b19082b2b082b19, 0x2b19190808080808, 0x2b19190808081919, 0x2b19190819081908,
- 0x2b19190819190808, 0x2b19190819192b08, 0x2b191919082b2b19, 0x2b1919192b190808,
- 0x2b1919192b19082b, 0x2b19192b19080819, 0x2b192b0819190819, 0x2b192b082b2b192b,
- 0x2b192b1919082b19, 0x2b192b2b08191919, 0x2b192b2b192b0808, 0x2b2b080808080808,
- 0x2b2b08080808082b, 0x2b2b080808082b08, 0x2b2b080808082b2b, 0x2b2b0808082b0808,
- 0x2b2b0808082b2b2b, 0x2b2b08082b2b0808, 0x2b2b081919190819, 0x2b2b081919192b19,
- 0x2b2b08192b2b192b, 0x2b2b082b08080808, 0x2b2b082b0808082b, 0x2b2b082b08082b08,
- 0x2b2b082b082b2b2b, 0x2b2b082b2b080808, 0x2b2b082b2b2b0808, 0x2b2b190819080808,
- 0x2b2b19082b191919, 0x2b2b192b192b1919, 0x2b2b192b2b192b08, 0x2b2b2b0808082b2b,
- 0x2b2b2b08082b0808, 0x2b2b2b08082b082b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b0808,
- 0x2b2b2b082b2b2b08, 0x2b2b2b1908081908, 0x2b2b2b192b081908, 0x2b2b2b192b08192b,
- 0x2b2b2b2b082b2b08, 0x2b2b2b2b082b2b2b, 0x2b2b2b2b2b190819, 0x2b2b2b2b2b2b2b2b,
- };
- static const uint32_t iq3xxs_grid[256] = {
- 0x04040404, 0x04040414, 0x04040424, 0x04040c0c, 0x04040c1c, 0x04040c3e, 0x04041404, 0x04041414,
- 0x04041c0c, 0x04042414, 0x04043e1c, 0x04043e2c, 0x040c040c, 0x040c041c, 0x040c0c04, 0x040c0c14,
- 0x040c140c, 0x040c142c, 0x040c1c04, 0x040c1c14, 0x040c240c, 0x040c2c24, 0x040c3e04, 0x04140404,
- 0x04140414, 0x04140424, 0x04140c0c, 0x04141404, 0x04141414, 0x04141c0c, 0x04141c1c, 0x04141c3e,
- 0x04142c0c, 0x04142c3e, 0x04143e2c, 0x041c040c, 0x041c043e, 0x041c0c04, 0x041c0c14, 0x041c142c,
- 0x041c3e04, 0x04240c1c, 0x04241c3e, 0x04242424, 0x04242c3e, 0x04243e1c, 0x04243e2c, 0x042c040c,
- 0x042c043e, 0x042c1c14, 0x042c2c14, 0x04341c2c, 0x04343424, 0x043e0c04, 0x043e0c24, 0x043e0c34,
- 0x043e241c, 0x043e340c, 0x0c04040c, 0x0c04041c, 0x0c040c04, 0x0c040c14, 0x0c04140c, 0x0c04141c,
- 0x0c041c04, 0x0c041c14, 0x0c041c24, 0x0c04243e, 0x0c042c04, 0x0c0c0404, 0x0c0c0414, 0x0c0c0c0c,
- 0x0c0c1404, 0x0c0c1414, 0x0c14040c, 0x0c14041c, 0x0c140c04, 0x0c140c14, 0x0c14140c, 0x0c141c04,
- 0x0c143e14, 0x0c1c0404, 0x0c1c0414, 0x0c1c1404, 0x0c1c1c0c, 0x0c1c2434, 0x0c1c3434, 0x0c24040c,
- 0x0c24042c, 0x0c242c04, 0x0c2c1404, 0x0c2c1424, 0x0c2c2434, 0x0c2c3e0c, 0x0c34042c, 0x0c3e1414,
- 0x0c3e2404, 0x14040404, 0x14040414, 0x14040c0c, 0x14040c1c, 0x14041404, 0x14041414, 0x14041434,
- 0x14041c0c, 0x14042414, 0x140c040c, 0x140c041c, 0x140c042c, 0x140c0c04, 0x140c0c14, 0x140c140c,
- 0x140c1c04, 0x140c341c, 0x140c343e, 0x140c3e04, 0x14140404, 0x14140414, 0x14140c0c, 0x14140c3e,
- 0x14141404, 0x14141414, 0x14141c3e, 0x14142404, 0x14142c2c, 0x141c040c, 0x141c0c04, 0x141c0c24,
- 0x141c3e04, 0x141c3e24, 0x14241c2c, 0x14242c1c, 0x142c041c, 0x142c143e, 0x142c240c, 0x142c3e24,
- 0x143e040c, 0x143e041c, 0x143e0c34, 0x143e242c, 0x1c04040c, 0x1c040c04, 0x1c040c14, 0x1c04140c,
- 0x1c04141c, 0x1c042c04, 0x1c04342c, 0x1c043e14, 0x1c0c0404, 0x1c0c0414, 0x1c0c1404, 0x1c0c1c0c,
- 0x1c0c2424, 0x1c0c2434, 0x1c14040c, 0x1c14041c, 0x1c140c04, 0x1c14142c, 0x1c142c14, 0x1c143e14,
- 0x1c1c0c0c, 0x1c1c1c1c, 0x1c241c04, 0x1c24243e, 0x1c243e14, 0x1c2c0404, 0x1c2c0434, 0x1c2c1414,
- 0x1c2c2c2c, 0x1c340c24, 0x1c341c34, 0x1c34341c, 0x1c3e1c1c, 0x1c3e3404, 0x24040424, 0x24040c3e,
- 0x24041c2c, 0x24041c3e, 0x24042c1c, 0x24042c3e, 0x240c3e24, 0x24141404, 0x24141c3e, 0x24142404,
- 0x24143404, 0x24143434, 0x241c043e, 0x241c242c, 0x24240424, 0x24242c0c, 0x24243424, 0x242c142c,
- 0x242c241c, 0x242c3e04, 0x243e042c, 0x243e0c04, 0x243e0c14, 0x243e1c04, 0x2c040c14, 0x2c04240c,
- 0x2c043e04, 0x2c0c0404, 0x2c0c0434, 0x2c0c1434, 0x2c0c2c2c, 0x2c140c24, 0x2c141c14, 0x2c143e14,
- 0x2c1c0414, 0x2c1c2c1c, 0x2c240c04, 0x2c24141c, 0x2c24143e, 0x2c243e14, 0x2c2c0414, 0x2c2c1c0c,
- 0x2c342c04, 0x2c3e1424, 0x2c3e2414, 0x34041424, 0x34042424, 0x34042434, 0x34043424, 0x340c140c,
- 0x340c340c, 0x34140c3e, 0x34143424, 0x341c1c04, 0x341c1c34, 0x34242424, 0x342c042c, 0x342c2c14,
- 0x34341c1c, 0x343e041c, 0x343e140c, 0x3e04041c, 0x3e04042c, 0x3e04043e, 0x3e040c04, 0x3e041c14,
- 0x3e042c14, 0x3e0c1434, 0x3e0c2404, 0x3e140c14, 0x3e14242c, 0x3e142c14, 0x3e1c0404, 0x3e1c0c2c,
- 0x3e1c1c1c, 0x3e1c3404, 0x3e24140c, 0x3e24240c, 0x3e2c0404, 0x3e2c0414, 0x3e2c1424, 0x3e341c04,
- };
- #define NGRID_IQ2XXS 512
- static const uint64_t iq1s_grid[NGRID_IQ2XXS] = {
- 0xffffffffffff0101, 0xffffffffff01ff00, 0xffffffffff010100, 0xffffffff00000000,
- 0xffffffff01ff00ff, 0xffffffff01ff0001, 0xffffffff0101ffff, 0xffffffff0101ff01,
- 0xffffff00ff000000, 0xffffff000000ff00, 0xffffff00000000ff, 0xffffff0000000100,
- 0xffffff0000010000, 0xffffff0001000000, 0xffffff01ffff00ff, 0xffffff01ff01ff00,
- 0xffffff01ff010100, 0xffffff0100000001, 0xffffff0101ffff00, 0xffffff0101ff0101,
- 0xffffff0101010100, 0xffff00ffff00ff01, 0xffff00ffff0000ff, 0xffff00ff00ff0100,
- 0xffff00ff0100ff00, 0xffff00ff010001ff, 0xffff0000ff0101ff, 0xffff000000ffff00,
- 0xffff000000000000, 0xffff00000001ff01, 0xffff000001000101, 0xffff0000010100ff,
- 0xffff0001ffff0100, 0xffff00010000ff00, 0xffff000100010101, 0xffff000101000000,
- 0xffff01ffffff0000, 0xffff01ffff01ffff, 0xffff01ffff010100, 0xffff01ff00000000,
- 0xffff01ff01ffffff, 0xffff01ff01ff0001, 0xffff01ff0101ffff, 0xffff01ff01010001,
- 0xffff0100ffffff01, 0xffff01000000ffff, 0xffff010000000100, 0xffff010001ff01ff,
- 0xffff010001000000, 0xffff0101ff000000, 0xffff0101000101ff, 0xffff010101ffff01,
- 0xffff01010101ff00, 0xff00ffffff000000, 0xff00ffff00ffff00, 0xff00ffff00000001,
- 0xff00ffff000001ff, 0xff00ffff01010000, 0xff00ff00ffff0000, 0xff00ff00ff00ff00,
- 0xff00ff00ff0000ff, 0xff00ff00ff000100, 0xff00ff00ff010001, 0xff00ff0000ff0001,
- 0xff00ff000000ffff, 0xff00ff0000000000, 0xff00ff000001ff00, 0xff00ff0000010100,
- 0xff00ff0001ff0000, 0xff00ff000100ff00, 0xff00ff0001000100, 0xff00ff01ff000000,
- 0xff00ff0100ff0000, 0xff00ff01000001ff, 0xff00ff0101010001, 0xff0000ff00000000,
- 0xff0000ff0001ff00, 0xff0000ff00010100, 0xff000000ffff0101, 0xff000000ff000000,
- 0xff000000ff01ff00, 0xff00000000ff0000, 0xff0000000000ff00, 0xff000000000000ff,
- 0xff00000000000000, 0xff00000000000001, 0xff00000000000100, 0xff0000000001ffff,
- 0xff00000000010000, 0xff00000001000000, 0xff00000001010100, 0xff000001ff00ff01,
- 0xff000001ff0100ff, 0xff00000100000000, 0xff0000010001ff00, 0xff00000101ff0100,
- 0xff0000010100ff00, 0xff0001ff00ff00ff, 0xff0001ff00000101, 0xff0001ff000100ff,
- 0xff0001ff01000000, 0xff000100ff0001ff, 0xff0001000000ff01, 0xff00010000000000,
- 0xff00010000010001, 0xff00010000010100, 0xff00010001ffff00, 0xff00010001ff0101,
- 0xff00010001010000, 0xff000101ffffffff, 0xff000101ff000101, 0xff00010101ff00ff,
- 0xff00010101000001, 0xff000101010100ff, 0xff01ffffff000101, 0xff01ffffff01ffff,
- 0xff01ffffff01ff01, 0xff01ffffff0101ff, 0xff01ffff00000000, 0xff01ffff01ff0001,
- 0xff01ffff0101ff01, 0xff01ff00ff000000, 0xff01ff0000ff0100, 0xff01ff000000ff01,
- 0xff01ff0000010000, 0xff01ff00010000ff, 0xff01ff01ff01ff00, 0xff01ff0100000101,
- 0xff0100ffffff0000, 0xff0100ffff010000, 0xff0100ff01ff00ff, 0xff0100ff01000100,
- 0xff0100ff010100ff, 0xff010000ffffff01, 0xff01000000000000, 0xff0100000101ff00,
- 0xff010001ffff00ff, 0xff010001ff000100, 0xff01000100ffff00, 0xff01000100010001,
- 0xff01000101ff0001, 0xff010001010001ff, 0xff0101ffffffffff, 0xff0101ffff01ffff,
- 0xff0101ffff010101, 0xff0101ff0000ff00, 0xff0101ff01010001, 0xff010100ff000000,
- 0xff010100ff01ff01, 0xff01010000ff0001, 0xff01010000000100, 0xff01010001000000,
- 0xff0101010100ffff, 0x00ffffff0000ff01, 0x00ffffff000000ff, 0x00ffffff00000100,
- 0x00ffffff00010000, 0x00ffff00ffff0001, 0x00ffff00ff0000ff, 0x00ffff00ff000100,
- 0x00ffff0000000000, 0x00ffff0001000100, 0x00ffff0001010001, 0x00ffff01ff00ff01,
- 0x00ffff0100ff0100, 0x00ffff010000ff00, 0x00ffff01000100ff, 0x00ffff0101ff00ff,
- 0x00ffff010101ff00, 0x00ff00ffffffffff, 0x00ff00ffffff01ff, 0x00ff00ffff000101,
- 0x00ff00ff00000000, 0x00ff00ff000101ff, 0x00ff00ff01010101, 0x00ff0000ff000000,
- 0x00ff0000ff01ffff, 0x00ff000000ff0000, 0x00ff00000000ff00, 0x00ff0000000000ff,
- 0x00ff000000000000, 0x00ff000000000001, 0x00ff000000000100, 0x00ff000000010000,
- 0x00ff000001ffff01, 0x00ff000001000000, 0x00ff0001ff000101, 0x00ff000100ffffff,
- 0x00ff000100000000, 0x00ff0001010001ff, 0x00ff01ffff000000, 0x00ff01ff0001ff00,
- 0x00ff01ff01ff0100, 0x00ff0100ff01ff01, 0x00ff010000ff00ff, 0x00ff010000ff0101,
- 0x00ff010000000000, 0x00ff010000010101, 0x00ff01000100ff00, 0x00ff010001010000,
- 0x00ff0101ffffff00, 0x00ff01010000ff01, 0x00ff010100000100, 0x00ff010101ff0000,
- 0x0000ffffffff0100, 0x0000ffffff00ff00, 0x0000ffffff0000ff, 0x0000ffffff010000,
- 0x0000ffff00000000, 0x0000ffff00010101, 0x0000ffff01ffff01, 0x0000ffff01000100,
- 0x0000ff00ff000000, 0x0000ff00ff01ff00, 0x0000ff00ff0101ff, 0x0000ff0000ff0000,
- 0x0000ff000000ff00, 0x0000ff00000000ff, 0x0000ff0000000000, 0x0000ff0000000001,
- 0x0000ff0000000100, 0x0000ff0000010000, 0x0000ff0001ffffff, 0x0000ff0001ff01ff,
- 0x0000ff0001000000, 0x0000ff000101ffff, 0x0000ff01ffff0101, 0x0000ff01ff010000,
- 0x0000ff0100000000, 0x0000ff0101000101, 0x000000ffffff0001, 0x000000ffff000000,
- 0x000000ff00ff0000, 0x000000ff0000ff00, 0x000000ff000000ff, 0x000000ff00000000,
- 0x000000ff00000001, 0x000000ff00000100, 0x000000ff00010000, 0x000000ff01000000,
- 0x000000ff0101ff00, 0x00000000ffff0000, 0x00000000ff00ff00, 0x00000000ff0000ff,
- 0x00000000ff000000, 0x00000000ff000001, 0x00000000ff000100, 0x00000000ff010000,
- 0x0000000000ffff00, 0x0000000000ff00ff, 0x0000000000ff0000, 0x0000000000ff0001,
- 0x0000000000ff0100, 0x000000000000ffff, 0x000000000000ff00, 0x000000000000ff01,
- 0x00000000000000ff, 0x0000000000000001, 0x00000000000001ff, 0x0000000000000100,
- 0x0000000000000101, 0x000000000001ff00, 0x00000000000100ff, 0x0000000000010000,
- 0x0000000000010001, 0x0000000000010100, 0x0000000001ff0000, 0x000000000100ff00,
- 0x00000000010000ff, 0x0000000001000000, 0x0000000001000001, 0x0000000001000100,
- 0x0000000001010000, 0x00000001ffff01ff, 0x00000001ff000000, 0x0000000100ff0000,
- 0x000000010000ff00, 0x00000001000000ff, 0x0000000100000000, 0x0000000100000001,
- 0x0000000100000100, 0x0000000100010000, 0x0000000101000000, 0x000001ffff00ff00,
- 0x000001ffff010001, 0x000001ffff0101ff, 0x000001ff00ffff01, 0x000001ff0000ffff,
- 0x000001ff00000000, 0x000001ff010000ff, 0x000001ff01010100, 0x00000100ffff0100,
- 0x00000100ff000000, 0x0000010000ff0000, 0x000001000000ff00, 0x00000100000000ff,
- 0x0000010000000000, 0x0000010000000001, 0x0000010000000100, 0x0000010000010000,
- 0x0000010001000000, 0x000001000101ff01, 0x00000101ffff0001, 0x00000101ff01ffff,
- 0x0000010100000000, 0x0000010101010100, 0x0001ffffff000000, 0x0001ffff00ffffff,
- 0x0001ffff00000100, 0x0001ffff0001ff00, 0x0001ffff01000000, 0x0001ff00ffffff00,
- 0x0001ff00ffff01ff, 0x0001ff00ff010000, 0x0001ff0000000000, 0x0001ff0000010001,
- 0x0001ff0001ff0000, 0x0001ff0001010100, 0x0001ff01ff0000ff, 0x0001ff01ff000001,
- 0x0001ff0100ffffff, 0x0001ff010001ffff, 0x0001ff01000101ff, 0x0001ff010100ff01,
- 0x000100ffff00ffff, 0x000100ffff00ff01, 0x000100ffff000100, 0x000100ff00000000,
- 0x000100ff000101ff, 0x000100ff01ff0101, 0x000100ff0100ffff, 0x000100ff01010101,
- 0x00010000ff000000, 0x00010000ff010100, 0x0001000000ff0000, 0x000100000000ff00,
- 0x00010000000000ff, 0x0001000000000000, 0x0001000000000001, 0x0001000000000100,
- 0x0001000000010000, 0x0001000001ffff01, 0x0001000001000000, 0x0001000100ff0101,
- 0x0001000100000000, 0x00010001010100ff, 0x000101ffffff01ff, 0x000101ffffff0101,
- 0x000101ff00010000, 0x000101ff01ff0000, 0x000101ff0100ff01, 0x00010100ffff0000,
- 0x0001010000000000, 0x000101000001ffff, 0x0001010000010101, 0x00010100010001ff,
- 0x00010101ff00ff00, 0x00010101ff010001, 0x0001010100ffffff, 0x0001010100ff01ff,
- 0x00010101000101ff, 0x0001010101ff0000, 0x000101010100ff01, 0x0001010101000101,
- 0x01ffffffffff0101, 0x01ffffffff01ffff, 0x01ffffffff01ff01, 0x01ffffffff0101ff,
- 0x01ffffffff010101, 0x01ffffff00000000, 0x01ffffff01ff01ff, 0x01ffffff01000101,
- 0x01ffffff0101ff01, 0x01ffffff010100ff, 0x01ffff000000ff00, 0x01ffff0000000001,
- 0x01ffff00000001ff, 0x01ffff0000010000, 0x01ffff0001ff0000, 0x01ffff01ffffffff,
- 0x01ffff01ffff01ff, 0x01ffff01ff000000, 0x01ffff01ff01ffff, 0x01ffff01ff0101ff,
- 0x01ffff010100ffff, 0x01ff00ffffff0000, 0x01ff00ffff010000, 0x01ff00ff00ffff01,
- 0x01ff0000ff0000ff, 0x01ff000000000000, 0x01ff00000001ff01, 0x01ff000001ffffff,
- 0x01ff000001010100, 0x01ff0001ffffff01, 0x01ff0001ff010001, 0x01ff000101ff0100,
- 0x01ff000101000001, 0x01ff0001010100ff, 0x01ff01ffff00ffff, 0x01ff01ff00010001,
- 0x01ff01ff01000000, 0x01ff01ff010101ff, 0x01ff0100ff000001, 0x01ff010000ffff00,
- 0x01ff010000000100, 0x01ff010001ff01ff, 0x01ff01000101ffff, 0x01ff0101ffff00ff,
- 0x01ff0101ffff0101, 0x01ff0101ff0101ff, 0x01ff010100010000, 0x0100ffff00ff00ff,
- 0x0100ffff00ff0001, 0x0100ffff00000100, 0x0100ffff0100ff00, 0x0100ff00ffff0000,
- 0x0100ff00ff00ffff, 0x0100ff00ff00ff01, 0x0100ff00ff000100, 0x0100ff00ff010000,
- 0x0100ff0000000000, 0x0100ff00000100ff, 0x0100ff0001ff0101, 0x0100ff0001010101,
- 0x0100ff0100ff00ff, 0x0100ff0100ff0001, 0x0100ff0100000100, 0x0100ff0100010001,
- 0x0100ff0101000000, 0x010000ffff00ff00, 0x010000ff0000ffff, 0x010000ff00000000,
- 0x010000ff010001ff, 0x010000ff01010001, 0x01000000ffffff00, 0x01000000ffff0101,
- 0x01000000ff000000, 0x01000000ff0100ff, 0x01000000ff010101, 0x0100000000ff0000,
- 0x010000000000ff00, 0x01000000000000ff, 0x0100000000000000, 0x0100000000000001,
- 0x0100000000000100, 0x0100000000010000, 0x0100000001000000, 0x0100000100000000,
- 0x01000001000101ff, 0x0100000101ffff01, 0x010001ffff000101, 0x010001ff00ff0100,
- 0x010001ff0000ff00, 0x010001ff000100ff, 0x010001ff01ffffff, 0x01000100ffff0000,
- 0x01000100ff0001ff, 0x0100010000000000, 0x010001000001ff00, 0x0100010001ff0000,
- 0x01000100010000ff, 0x0100010001000101, 0x01000101ff00ff01, 0x0100010100ff0100,
- 0x010001010000ffff, 0x0100010101010001, 0x0101ffffffff0101, 0x0101ffffff0001ff,
- 0x0101ffffff01ffff, 0x0101ffffff010101, 0x0101ffff00000000, 0x0101ffff0101ffff,
- 0x0101ffff010101ff, 0x0101ff00ff000000, 0x0101ff0000ff0100, 0x0101ff000000ff00,
- 0x0101ff0000010000, 0x0101ff00010000ff, 0x0101ff0001000001, 0x0101ff01ff010101,
- 0x0101ff0100000000, 0x0101ff010101ff00, 0x010100ffffff0000, 0x010100ffff010000,
- 0x010100ff00ff01ff, 0x010100ff000000ff, 0x010100ff00000101, 0x010100ff01ffff00,
- 0x01010000ffffff01, 0x01010000ff000100, 0x01010000ff01ff01, 0x0101000000000000,
- 0x01010000000100ff, 0x010100000101ff01, 0x01010001ffff0000, 0x01010001ff00ffff,
- 0x01010001ff010000, 0x0101000101ffffff, 0x0101000101ff01ff, 0x0101000101010101,
- 0x010101ffff01ffff, 0x010101ff00000000, 0x010101ff0001ff01, 0x010101ff0101ffff,
- 0x010101ff010101ff, 0x01010100ffffffff, 0x01010100ff000001, 0x010101000000ff00,
- 0x0101010001010000, 0x0101010100ff0001, 0x010101010001ff01, 0x010101010101ffff,
- };
- static const uint8_t ksigns_iq2xs[128] = {
- 0, 129, 130, 3, 132, 5, 6, 135, 136, 9, 10, 139, 12, 141, 142, 15,
- 144, 17, 18, 147, 20, 149, 150, 23, 24, 153, 154, 27, 156, 29, 30, 159,
- 160, 33, 34, 163, 36, 165, 166, 39, 40, 169, 170, 43, 172, 45, 46, 175,
- 48, 177, 178, 51, 180, 53, 54, 183, 184, 57, 58, 187, 60, 189, 190, 63,
- 192, 65, 66, 195, 68, 197, 198, 71, 72, 201, 202, 75, 204, 77, 78, 207,
- 80, 209, 210, 83, 212, 85, 86, 215, 216, 89, 90, 219, 92, 221, 222, 95,
- 96, 225, 226, 99, 228, 101, 102, 231, 232, 105, 106, 235, 108, 237, 238, 111,
- 240, 113, 114, 243, 116, 245, 246, 119, 120, 249, 250, 123, 252, 125, 126, 255,
- };
- static const uint8_t kmask_iq2xs[8] = {1, 2, 4, 8, 16, 32, 64, 128};
- void dequantize_row_iq2_xxs(const block_iq2_xxs * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- uint32_t aux32[2];
- const uint8_t * aux8 = (const uint8_t *)aux32;
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- memcpy(aux32, x[i].qs + 4*ib32, 2*sizeof(uint32_t));
- const float db = d * (0.5f + (aux32[1] >> 28)) * 0.25f;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
- const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
- for (int j = 0; j < 8; ++j) {
- y[j] = db * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
- }
- y += 8;
- }
- }
- }
- }
- // ====================== 2.3125 bpw (de)-quantization
- void dequantize_row_iq2_xs(const block_iq2_xs * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- float db[2];
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- db[0] = d * (0.5f + (x[i].scales[ib32] & 0xf)) * 0.25f;
- db[1] = d * (0.5f + (x[i].scales[ib32] >> 4)) * 0.25f;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (x[i].qs[4*ib32 + l] & 511));
- const uint8_t signs = ksigns_iq2xs[x[i].qs[4*ib32 + l] >> 9];
- for (int j = 0; j < 8; ++j) {
- y[j] = db[l/2] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
- }
- y += 8;
- }
- }
- }
- }
- // ====================== 3.0625 bpw (de)-quantization
- void dequantize_row_iq3_xxs(const block_iq3_xxs * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- uint32_t aux32;
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * qs = x[i].qs;
- const uint8_t * scales_and_signs = qs + QK_K/4;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- memcpy(&aux32, scales_and_signs + 4*ib32, sizeof(uint32_t));
- const float db = d * (0.5f + (aux32 >> 28)) * 0.5f;
- for (int l = 0; l < 4; ++l) {
- const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
- const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + qs[2*l+0]);
- const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + qs[2*l+1]);
- for (int j = 0; j < 4; ++j) {
- y[j+0] = db * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f);
- y[j+4] = db * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f);
- }
- y += 8;
- }
- qs += 8;
- }
- }
- }
- // ====================== 1.5625 bpw (de)-quantization
- void dequantize_row_iq1_s(const block_iq1_s * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- float db[4];
- uint16_t idx[4];
- //const int8_t * grid[4];
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * sc = x[i].scales;
- const uint8_t * qs = x[i].qs;
- for (int i8 = 0; i8 < QK_K/8; i8 += 4) {
- idx[0] = qs[0] | ((sc[0] & 0x08) << 5);
- idx[1] = qs[1] | ((sc[0] & 0x80) << 1);
- idx[2] = qs[2] | ((sc[1] & 0x08) << 5);
- idx[3] = qs[3] | ((sc[1] & 0x80) << 1);
- //grid[0] = (const int8_t *)(iq1s_grid + (qs[0] | ((sc[0] & 0x08) << 5)));
- //grid[1] = (const int8_t *)(iq1s_grid + (qs[1] | ((sc[0] & 0x80) << 1)));
- //grid[2] = (const int8_t *)(iq1s_grid + (qs[2] | ((sc[1] & 0x08) << 5)));
- //grid[3] = (const int8_t *)(iq1s_grid + (qs[3] | ((sc[1] & 0x80) << 1)));
- db[0] = d * (2*(sc[0] & 7) + 1);
- db[1] = d * (2*((sc[0] >> 4) & 7) + 1);
- db[2] = d * (2*(sc[1] & 7) + 1);
- db[3] = d * (2*((sc[1] >> 4) & 7) + 1);
- for (int l = 0; l < 4; ++l) {
- const int8_t * grid = (const int8_t *)(iq1s_grid + idx[l]);
- for (int j = 0; j < 8; ++j) {
- //y[j] = db[l] * grid[l][j];
- y[j] = db[l] * grid[j];
- }
- y += 8;
- }
- qs += 4;
- sc += 2;
- }
- }
- }
- //===================================== Q8_K ==============================================
- void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- for (int i = 0; i < nb; i++) {
- float max = 0;
- float amax = 0;
- for (int j = 0; j < QK_K; ++j) {
- float ax = fabsf(x[j]);
- if (ax > amax) {
- amax = ax; max = x[j];
- }
- }
- if (!amax) {
- y[i].d = 0;
- memset(y[i].qs, 0, QK_K);
- x += QK_K;
- continue;
- }
- //const float iscale = -128.f/max;
- // We need this change for IQ2_XXS, else the AVX implementation becomes very awkward
- const float iscale = -127.f/max;
- for (int j = 0; j < QK_K; ++j) {
- int v = nearest_int(iscale*x[j]);
- y[i].qs[j] = MIN(127, v);
- }
- for (int j = 0; j < QK_K/16; ++j) {
- int sum = 0;
- for (int ii = 0; ii < 16; ++ii) {
- sum += y[i].qs[j*16 + ii];
- }
- y[i].bsums[j] = sum;
- }
- y[i].d = 1/iscale;
- x += QK_K;
- }
- }
- void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- for (int i = 0; i < nb; i++) {
- for (int j = 0; j < QK_K; ++j) {
- *y++ = x[i].d * x[i].qs[j];
- }
- }
- }
- void quantize_row_q8_K(const float * restrict x, void * restrict y, int k) {
- quantize_row_q8_K_reference(x, y, k);
- }
- //===================================== Dot ptoducts =================================
- //
- // Helper functions
- //
- #if __AVX__ || __AVX2__ || __AVX512F__
- // shuffles to pick the required scales in dot products
- static inline __m256i get_scale_shuffle_q3k(int i) {
- static const uint8_t k_shuffle[128] = {
- 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
- 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
- 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
- 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,
- };
- return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
- }
- static inline __m256i get_scale_shuffle_k4(int i) {
- static const uint8_t k_shuffle[256] = {
- 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
- 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
- 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5,
- 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
- 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9,
- 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
- 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,
- 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15
- };
- return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
- }
- static inline __m128i get_scale_shuffle(int i) {
- static const uint8_t k_shuffle[128] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
- 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
- 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
- 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
- 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11,
- 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13,
- 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15
- };
- return _mm_loadu_si128((const __m128i*)k_shuffle + i);
- }
- #endif
- void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bbx, const void * restrict vy, size_t bby, int nrc) {
- const int qk = QK8_0;
- const int nb = n / qk;
- assert(n % qk == 0);
- #if defined(__ARM_FEATURE_MATMUL_INT8)
- assert((nrc == 2) || (nrc == 1));
- #else
- assert(nrc == 1);
- #endif
- UNUSED(nrc);
- UNUSED(bbx);
- UNUSED(bby);
- UNUSED(bs);
- const block_q4_0 * restrict x = vx;
- const block_q8_0 * restrict y = vy;
- #if defined(__ARM_FEATURE_MATMUL_INT8)
- if (nrc == 2) {
- const block_q4_0 * restrict vx0 = vx;
- const block_q4_0 * restrict vx1 = vx + bx;
- const block_q8_0 * restrict vy0 = vy;
- const block_q8_0 * restrict vy1 = vy + by;
- float32x4_t sumv0 = vdupq_n_f32(0.0f);
- for (int i = 0; i < nb; i++) {
- const block_q4_0 * restrict b_x0 = &vx0[i];
- const block_q4_0 * restrict b_x1 = &vx1[i];
- const block_q8_0 * restrict b_y0 = &vy0[i];
- const block_q8_0 * restrict b_y1 = &vy1[i];
- const uint8x16_t m4b = vdupq_n_u8(0x0F);
- const int8x16_t s8b = vdupq_n_s8(0x8);
- const uint8x16_t v0_0 = vld1q_u8(b_x0->qs);
- const uint8x16_t v0_1 = vld1q_u8(b_x1->qs);
- // 4-bit -> 8-bit
- const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
- const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
- const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
- const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
- // sub 8
- const int8x16_t x0_l = vsubq_s8(v0_0l, s8b);
- const int8x16_t x0_h = vsubq_s8(v0_0h, s8b);
- const int8x16_t x1_l = vsubq_s8(v0_1l, s8b);
- const int8x16_t x1_h = vsubq_s8(v0_1h, s8b);
- // load y
- const int8x16_t y0_l = vld1q_s8(b_y0->qs);
- const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
- const int8x16_t y1_l = vld1q_s8(b_y1->qs);
- const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
- float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d),
- GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d),
- GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d),
- GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)};
- int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
- int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
- int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
- int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
- int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
- int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
- int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
- int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
- sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
- l1, r1)), l2, r2)), l3, r3))), scale);
- }
- float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
- float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
- vst1_f32(s, vget_low_f32(sumv2));
- vst1_f32(s + bs, vget_high_f32(sumv2));
- return;
- }
- #endif
- #if defined(__ARM_NEON)
- float32x4_t sumv0 = vdupq_n_f32(0.0f);
- float32x4_t sumv1 = vdupq_n_f32(0.0f);
- assert(nb % 2 == 0); // TODO: handle odd nb
- for (int i = 0; i < nb; i += 2) {
- const block_q4_0 * restrict x0 = &x[i + 0];
- const block_q4_0 * restrict x1 = &x[i + 1];
- const block_q8_0 * restrict y0 = &y[i + 0];
- const block_q8_0 * restrict y1 = &y[i + 1];
- const uint8x16_t m4b = vdupq_n_u8(0x0F);
- const int8x16_t s8b = vdupq_n_s8(0x8);
- const uint8x16_t v0_0 = vld1q_u8(x0->qs);
- const uint8x16_t v0_1 = vld1q_u8(x1->qs);
- // 4-bit -> 8-bit
- const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
- const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
- const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
- const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
- // sub 8
- const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
- const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
- const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
- const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
- // load y
- const int8x16_t v1_0l = vld1q_s8(y0->qs);
- const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
- const int8x16_t v1_1l = vld1q_s8(y1->qs);
- const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
- // dot product into int32x4_t
- const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
- const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
- }
- *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
- #elif defined(__AVX2__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
- // Main loop
- for (int i = 0; i < nb; ++i) {
- /* Compute combined scale for the block */
- const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
- __m256i qx = bytes_from_nibbles_32(x[i].qs);
- // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
- const __m256i off = _mm256_set1_epi8( 8 );
- qx = _mm256_sub_epi8( qx, off );
- __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
- const __m256 q = mul_sum_i8_pairs_float(qx, qy);
- /* Multiply q with scale and accumulate */
- acc = _mm256_fmadd_ps( d, q, acc );
- }
- *s = hsum_float_8(acc);
- #elif defined(__AVX__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
- // Main loop
- for (int i = 0; i < nb; ++i) {
- // Compute combined scale for the block
- const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
- const __m128i lowMask = _mm_set1_epi8(0xF);
- const __m128i off = _mm_set1_epi8(8);
- const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
- __m128i bx = _mm_and_si128(lowMask, tmp);
- __m128i by = _mm_loadu_si128((const __m128i *)y[i].qs);
- bx = _mm_sub_epi8(bx, off);
- const __m128i i32_0 = mul_sum_i8_pairs(bx, by);
- bx = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
- by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
- bx = _mm_sub_epi8(bx, off);
- const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
- // Convert int32_t to float
- __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
- // Apply the scale, and accumulate
- acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined(__SSSE3__)
- // set constants
- const __m128i lowMask = _mm_set1_epi8(0xF);
- const __m128i off = _mm_set1_epi8(8);
- // Initialize accumulator with zeros
- __m128 acc_0 = _mm_setzero_ps();
- __m128 acc_1 = _mm_setzero_ps();
- __m128 acc_2 = _mm_setzero_ps();
- __m128 acc_3 = _mm_setzero_ps();
- // First round without accumulation
- {
- _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
- _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
- // Compute combined scale for the block 0 and 1
- const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) );
- const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
- __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
- __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
- bx_0 = _mm_sub_epi8(bx_0, off);
- const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
- __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
- __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
- bx_1 = _mm_sub_epi8(bx_1, off);
- const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
- _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
- _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
- // Compute combined scale for the block 2 and 3
- const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) );
- const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
- __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
- __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
- bx_2 = _mm_sub_epi8(bx_2, off);
- const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
- __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
- __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
- bx_3 = _mm_sub_epi8(bx_3, off);
- const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
- // Convert int32_t to float
- __m128 p0 = _mm_cvtepi32_ps(i32_0);
- __m128 p1 = _mm_cvtepi32_ps(i32_1);
- __m128 p2 = _mm_cvtepi32_ps(i32_2);
- __m128 p3 = _mm_cvtepi32_ps(i32_3);
- // Apply the scale
- acc_0 = _mm_mul_ps( d_0_1, p0 );
- acc_1 = _mm_mul_ps( d_0_1, p1 );
- acc_2 = _mm_mul_ps( d_2_3, p2 );
- acc_3 = _mm_mul_ps( d_2_3, p3 );
- }
- assert(nb % 2 == 0); // TODO: handle odd nb
- // Main loop
- for (int i = 2; i < nb; i+=2) {
- _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
- _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
- // Compute combined scale for the block 0 and 1
- const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
- const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
- __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
- __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
- bx_0 = _mm_sub_epi8(bx_0, off);
- const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
- __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
- __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
- bx_1 = _mm_sub_epi8(bx_1, off);
- const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
- _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
- _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
- // Compute combined scale for the block 2 and 3
- const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) );
- const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
- __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
- __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
- bx_2 = _mm_sub_epi8(bx_2, off);
- const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
- __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
- __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
- bx_3 = _mm_sub_epi8(bx_3, off);
- const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
- // Convert int32_t to float
- __m128 p0 = _mm_cvtepi32_ps(i32_0);
- __m128 p1 = _mm_cvtepi32_ps(i32_1);
- __m128 p2 = _mm_cvtepi32_ps(i32_2);
- __m128 p3 = _mm_cvtepi32_ps(i32_3);
- // Apply the scale
- __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
- __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
- __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
- __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
- // Acummulate
- acc_0 = _mm_add_ps(p0_d, acc_0);
- acc_1 = _mm_add_ps(p1_d, acc_1);
- acc_2 = _mm_add_ps(p2_d, acc_2);
- acc_3 = _mm_add_ps(p3_d, acc_3);
- }
- *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
- #elif defined(__riscv_v_intrinsic)
- float sumf = 0.0;
- size_t vl = __riscv_vsetvl_e8m1(qk/2);
- for (int i = 0; i < nb; i++) {
- // load elements
- vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
- vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
- vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
- // mask and store lower part of x, and then upper part
- vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
- vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
- vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
- vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
- // subtract offset
- vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 8, vl);
- vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 8, vl);
- vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
- vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
- vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
- vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
- vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
- int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
- sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
- }
- *s = sumf;
- #else
- // scalar
- float sumf = 0.0;
- for (int i = 0; i < nb; i++) {
- int sumi = 0;
- for (int j = 0; j < qk/2; ++j) {
- const int v0 = (x[i].qs[j] & 0x0F) - 8;
- const int v1 = (x[i].qs[j] >> 4) - 8;
- sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
- }
- sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
- }
- *s = sumf;
- #endif
- }
- void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bbx, const void * restrict vy, size_t bby, int nrc) {
- const int qk = QK8_1;
- const int nb = n / qk;
- assert(n % qk == 0);
- #if defined(__ARM_FEATURE_MATMUL_INT8)
- assert((nrc == 2) || (nrc == 1));
- #else
- assert(nrc == 1);
- #endif
- UNUSED(nrc);
- UNUSED(bbx);
- UNUSED(bby);
- UNUSED(bs);
- const block_q4_1 * restrict x = vx;
- const block_q8_1 * restrict y = vy;
- #if defined(__ARM_FEATURE_MATMUL_INT8)
- if (nrc == 2) {
- const block_q4_1 * restrict vx0 = vx;
- const block_q4_1 * restrict vx1 = vx + bx;
- const block_q8_1 * restrict vy0 = vy;
- const block_q8_1 * restrict vy1 = vy + by;
- float32x4_t sumv0 = vdupq_n_f32(0.0f);
- float32x4_t summs0 = vdupq_n_f32(0.0f);
- for (int i = 0; i < nb; i++) {
- const block_q4_1 * restrict b_x0 = &vx0[i];
- const block_q4_1 * restrict b_x1 = &vx1[i];
- const block_q8_1 * restrict b_y0 = &vy0[i];
- const block_q8_1 * restrict b_y1 = &vy1[i];
- float32x4_t summs_t = {GGML_FP16_TO_FP32(b_x0->m) * b_y0->s,
- GGML_FP16_TO_FP32(b_x1->m) * b_y0->s,
- GGML_FP16_TO_FP32(b_x0->m) * b_y1->s,
- GGML_FP16_TO_FP32(b_x1->m) * b_y1->s};
- summs0 += summs_t;
- const uint8x16_t m4b = vdupq_n_u8(0x0F);
- const uint8x16_t v0_0 = vld1q_u8(b_x0->qs);
- const uint8x16_t v0_1 = vld1q_u8(b_x1->qs);
- // 4-bit -> 8-bit
- const int8x16_t x0_l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
- const int8x16_t x0_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
- const int8x16_t x1_l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
- const int8x16_t x1_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
- // load y
- const int8x16_t y0_l = vld1q_s8(b_y0->qs);
- const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
- const int8x16_t y1_l = vld1q_s8(b_y1->qs);
- const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
- // mmla into int32x4_t
- float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d),
- GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d),
- GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d),
- GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)};
- int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
- int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
- int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
- int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
- int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
- int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
- int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
- int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
- sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
- l1, r1)), l2, r2)), l3, r3))), scale);
- }
- float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
- float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
- sumv2 = sumv2 + summs0;
- vst1_f32(s, vget_low_f32(sumv2));
- vst1_f32(s + bs, vget_high_f32(sumv2));
- return;
- }
- #endif
- // TODO: add WASM SIMD
- #if defined(__ARM_NEON)
- float32x4_t sumv0 = vdupq_n_f32(0.0f);
- float32x4_t sumv1 = vdupq_n_f32(0.0f);
- float summs = 0;
- assert(nb % 2 == 0); // TODO: handle odd nb
- for (int i = 0; i < nb; i += 2) {
- const block_q4_1 * restrict x0 = &x[i + 0];
- const block_q4_1 * restrict x1 = &x[i + 1];
- const block_q8_1 * restrict y0 = &y[i + 0];
- const block_q8_1 * restrict y1 = &y[i + 1];
- summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s;
- const uint8x16_t m4b = vdupq_n_u8(0x0F);
- const uint8x16_t v0_0 = vld1q_u8(x0->qs);
- const uint8x16_t v0_1 = vld1q_u8(x1->qs);
- // 4-bit -> 8-bit
- const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
- const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
- const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
- const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
- // load y
- const int8x16_t v1_0l = vld1q_s8(y0->qs);
- const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
- const int8x16_t v1_1l = vld1q_s8(y1->qs);
- const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
- // dot product into int32x4_t
- const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
- const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d);
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d);
- }
- *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
- #elif defined(__AVX2__) || defined(__AVX__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
- float summs = 0;
- // Main loop
- for (int i = 0; i < nb; ++i) {
- const float d0 = GGML_FP16_TO_FP32(x[i].d);
- const float d1 = y[i].d;
- summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
- const __m256 d0v = _mm256_set1_ps( d0 );
- const __m256 d1v = _mm256_set1_ps( d1 );
- // Compute combined scales
- const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
- // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
- const __m256i qx = bytes_from_nibbles_32(x[i].qs);
- const __m256i qy = _mm256_loadu_si256( (const __m256i *)y[i].qs );
- const __m256 xy = mul_sum_us8_pairs_float(qx, qy);
- // Accumulate d0*d1*x*y
- #if defined(__AVX2__)
- acc = _mm256_fmadd_ps( d0d1, xy, acc );
- #else
- acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc );
- #endif
- }
- *s = hsum_float_8(acc) + summs;
- #elif defined(__riscv_v_intrinsic)
- float sumf = 0.0;
- size_t vl = __riscv_vsetvl_e8m1(qk/2);
- for (int i = 0; i < nb; i++) {
- // load elements
- vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
- vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
- vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
- // mask and store lower part of x, and then upper part
- vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
- vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
- vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
- vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
- vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
- vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
- vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
- vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
- vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
- int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
- sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
- }
- *s = sumf;
- #else
- // scalar
- float sumf = 0.0;
- for (int i = 0; i < nb; i++) {
- int sumi = 0;
- for (int j = 0; j < qk/2; ++j) {
- const int v0 = (x[i].qs[j] & 0x0F);
- const int v1 = (x[i].qs[j] >> 4);
- sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
- }
- sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
- }
- *s = sumf;
- #endif
- }
- void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bbx, const void * restrict vy, size_t bby, int nrc) {
- const int qk = QK8_0;
- const int nb = n / qk;
- assert(n % qk == 0);
- assert(qk == QK5_0);
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bbx);
- UNUSED(bby);
- UNUSED(bs);
- const block_q5_0 * restrict x = vx;
- const block_q8_0 * restrict y = vy;
- #if defined(__ARM_NEON)
- float32x4_t sumv0 = vdupq_n_f32(0.0f);
- float32x4_t sumv1 = vdupq_n_f32(0.0f);
- uint32_t qh0;
- uint32_t qh1;
- uint64_t tmp0[4];
- uint64_t tmp1[4];
- assert(nb % 2 == 0); // TODO: handle odd nb
- for (int i = 0; i < nb; i += 2) {
- const block_q5_0 * restrict x0 = &x[i];
- const block_q5_0 * restrict x1 = &x[i + 1];
- const block_q8_0 * restrict y0 = &y[i];
- const block_q8_0 * restrict y1 = &y[i + 1];
- const uint8x16_t m4b = vdupq_n_u8(0x0F);
- // extract the 5th bit via lookup table ((!b) << 4)
- memcpy(&qh0, x0->qh, sizeof(qh0));
- memcpy(&qh1, x1->qh, sizeof(qh1));
- tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
- tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
- tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
- tmp0[3] = table_b2b_1[(qh0 >> 24) ];
- tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
- tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
- tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
- tmp1[3] = table_b2b_1[(qh1 >> 24) ];
- const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
- const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
- const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
- const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
- const uint8x16_t v0_0 = vld1q_u8(x0->qs);
- const uint8x16_t v0_1 = vld1q_u8(x1->qs);
- // 4-bit -> 8-bit
- int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
- int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
- int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
- int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
- // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
- const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
- const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
- const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
- const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
- // load y
- const int8x16_t v1_0l = vld1q_s8(y0->qs);
- const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
- const int8x16_t v1_1l = vld1q_s8(y1->qs);
- const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
- ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
- ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
- ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
- ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
- }
- *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
- #elif defined(__wasm_simd128__)
- v128_t sumv = wasm_f32x4_splat(0.0f);
- uint32_t qh;
- uint64_t tmp[4];
- // TODO: check if unrolling this is better
- for (int i = 0; i < nb; ++i) {
- const block_q5_0 * restrict x0 = &x[i];
- const block_q8_0 * restrict y0 = &y[i];
- const v128_t m4b = wasm_i8x16_splat(0x0F);
- // extract the 5th bit
- memcpy(&qh, x0->qh, sizeof(qh));
- tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
- tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
- tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
- tmp[3] = table_b2b_1[(qh >> 24) ];
- const v128_t qhl = wasm_v128_load(tmp + 0);
- const v128_t qhh = wasm_v128_load(tmp + 2);
- const v128_t v0 = wasm_v128_load(x0->qs);
- // 4-bit -> 8-bit
- const v128_t v0l = wasm_v128_and (v0, m4b);
- const v128_t v0h = wasm_u8x16_shr(v0, 4);
- // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
- const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
- const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
- // load y
- const v128_t v1l = wasm_v128_load(y0->qs);
- const v128_t v1h = wasm_v128_load(y0->qs + 16);
- // int8x16 -> int16x8
- const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
- const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
- const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
- const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
- const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
- const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
- const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
- const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
- // dot product
- sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
- wasm_i32x4_add(
- wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
- wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
- wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
- wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
- wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
- }
- *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
- wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
- #elif defined(__AVX2__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
- // Main loop
- for (int i = 0; i < nb; i++) {
- /* Compute combined scale for the block */
- const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
- __m256i qx = bytes_from_nibbles_32(x[i].qs);
- __m256i bxhi = bytes_from_bits_32(x[i].qh);
- bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
- qx = _mm256_or_si256(qx, bxhi);
- __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
- const __m256 q = mul_sum_i8_pairs_float(qx, qy);
- /* Multiply q with scale and accumulate */
- acc = _mm256_fmadd_ps(d, q, acc);
- }
- *s = hsum_float_8(acc);
- #elif defined(__AVX__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
- __m128i mask = _mm_set1_epi8((char)0xF0);
- // Main loop
- for (int i = 0; i < nb; i++) {
- /* Compute combined scale for the block */
- const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
- __m256i bx = bytes_from_nibbles_32(x[i].qs);
- const __m256i bxhi = bytes_from_bits_32(x[i].qh);
- __m128i bxhil = _mm256_castsi256_si128(bxhi);
- __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
- bxhil = _mm_andnot_si128(bxhil, mask);
- bxhih = _mm_andnot_si128(bxhih, mask);
- __m128i bxl = _mm256_castsi256_si128(bx);
- __m128i bxh = _mm256_extractf128_si256(bx, 1);
- bxl = _mm_or_si128(bxl, bxhil);
- bxh = _mm_or_si128(bxh, bxhih);
- bx = MM256_SET_M128I(bxh, bxl);
- const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
- const __m256 q = mul_sum_i8_pairs_float(bx, by);
- /* Multiply q with scale and accumulate */
- acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined(__riscv_v_intrinsic)
- float sumf = 0.0;
- uint32_t qh;
- size_t vl = __riscv_vsetvl_e8m1(qk/2);
- // These temporary registers are for masking and shift operations
- vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
- vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl);
- vuint32m2_t vt_3 = __riscv_vsll_vx_u32m2(vt_2, 16, vl);
- vuint32m2_t vt_4 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
- for (int i = 0; i < nb; i++) {
- memcpy(&qh, x[i].qh, sizeof(uint32_t));
- // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
- vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(vt_2, qh, vl);
- vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(xha_0, vt_1, vl);
- vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
- // ((qh & (1u << (j + 16))) >> (j + 12));
- vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(vt_3, qh, vl);
- vuint32m2_t xhl_1 = __riscv_vsrl_vv_u32m2(xha_1, vt_4, vl);
- // narrowing
- vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xhl_0, vl);
- vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
- vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xhl_1, vl);
- vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
- // load
- vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
- vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
- vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
- vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
- vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
- vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
- vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
- vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
- vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
- vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 16, vl);
- vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 16, vl);
- vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
- vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
- vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
- vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
- vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
- int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
- sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
- }
- *s = sumf;
- #else
- // scalar
- float sumf = 0.0;
- for (int i = 0; i < nb; i++) {
- uint32_t qh;
- memcpy(&qh, x[i].qh, sizeof(qh));
- int sumi = 0;
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
- const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
- const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
- const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
- sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
- }
- sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
- }
- *s = sumf;
- #endif
- }
- void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bbx, const void * restrict vy, size_t bby, int nrc) {
- const int qk = QK8_1;
- const int nb = n / qk;
- assert(n % qk == 0);
- assert(qk == QK5_1);
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bbx);
- UNUSED(bby);
- UNUSED(bs);
- const block_q5_1 * restrict x = vx;
- const block_q8_1 * restrict y = vy;
- #if defined(__ARM_NEON)
- float32x4_t sumv0 = vdupq_n_f32(0.0f);
- float32x4_t sumv1 = vdupq_n_f32(0.0f);
- float summs0 = 0.0f;
- float summs1 = 0.0f;
- uint32_t qh0;
- uint32_t qh1;
- uint64_t tmp0[4];
- uint64_t tmp1[4];
- assert(nb % 2 == 0); // TODO: handle odd nb
- for (int i = 0; i < nb; i += 2) {
- const block_q5_1 * restrict x0 = &x[i];
- const block_q5_1 * restrict x1 = &x[i + 1];
- const block_q8_1 * restrict y0 = &y[i];
- const block_q8_1 * restrict y1 = &y[i + 1];
- const uint8x16_t m4b = vdupq_n_u8(0x0F);
- summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s;
- summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s;
- // extract the 5th bit via lookup table ((b) << 4)
- memcpy(&qh0, x0->qh, sizeof(qh0));
- memcpy(&qh1, x1->qh, sizeof(qh1));
- tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
- tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
- tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
- tmp0[3] = table_b2b_0[(qh0 >> 24) ];
- tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
- tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
- tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
- tmp1[3] = table_b2b_0[(qh1 >> 24) ];
- const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
- const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
- const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
- const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
- const uint8x16_t v0_0 = vld1q_u8(x0->qs);
- const uint8x16_t v0_1 = vld1q_u8(x1->qs);
- // 4-bit -> 8-bit
- const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
- const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
- const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
- const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
- // add high bit
- const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
- const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
- const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
- const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
- // load y
- const int8x16_t v1_0l = vld1q_s8(y0->qs);
- const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
- const int8x16_t v1_1l = vld1q_s8(y1->qs);
- const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
- ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
- ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d);
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
- ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
- ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d);
- }
- *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
- #elif defined(__wasm_simd128__)
- v128_t sumv = wasm_f32x4_splat(0.0f);
- float summs = 0.0f;
- uint32_t qh;
- uint64_t tmp[4];
- // TODO: check if unrolling this is better
- for (int i = 0; i < nb; ++i) {
- const block_q5_1 * restrict x0 = &x[i];
- const block_q8_1 * restrict y0 = &y[i];
- summs += GGML_FP16_TO_FP32(x0->m) * y0->s;
- const v128_t m4b = wasm_i8x16_splat(0x0F);
- // extract the 5th bit
- memcpy(&qh, x0->qh, sizeof(qh));
- tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
- tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
- tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
- tmp[3] = table_b2b_0[(qh >> 24) ];
- const v128_t qhl = wasm_v128_load(tmp + 0);
- const v128_t qhh = wasm_v128_load(tmp + 2);
- const v128_t v0 = wasm_v128_load(x0->qs);
- // 4-bit -> 8-bit
- const v128_t v0l = wasm_v128_and (v0, m4b);
- const v128_t v0h = wasm_u8x16_shr(v0, 4);
- // add high bit
- const v128_t v0lf = wasm_v128_or(v0l, qhl);
- const v128_t v0hf = wasm_v128_or(v0h, qhh);
- // load y
- const v128_t v1l = wasm_v128_load(y0->qs);
- const v128_t v1h = wasm_v128_load(y0->qs + 16);
- // int8x16 -> int16x8
- const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
- const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
- const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
- const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
- const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
- const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
- const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
- const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
- // dot product
- sumv = wasm_f32x4_add(sumv,
- wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add(
- wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
- wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
- wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
- wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
- wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d)));
- }
- *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
- wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
- #elif defined(__AVX2__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
- float summs = 0.0f;
- // Main loop
- for (int i = 0; i < nb; i++) {
- const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
- summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
- __m256i qx = bytes_from_nibbles_32(x[i].qs);
- __m256i bxhi = bytes_from_bits_32(x[i].qh);
- bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
- qx = _mm256_or_si256(qx, bxhi);
- const __m256 dy = _mm256_set1_ps(y[i].d);
- const __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
- const __m256 q = mul_sum_us8_pairs_float(qx, qy);
- acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
- }
- *s = hsum_float_8(acc) + summs;
- #elif defined(__AVX__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
- __m128i mask = _mm_set1_epi8(0x10);
- float summs = 0.0f;
- // Main loop
- for (int i = 0; i < nb; i++) {
- const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
- summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
- __m256i bx = bytes_from_nibbles_32(x[i].qs);
- const __m256i bxhi = bytes_from_bits_32(x[i].qh);
- __m128i bxhil = _mm256_castsi256_si128(bxhi);
- __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
- bxhil = _mm_and_si128(bxhil, mask);
- bxhih = _mm_and_si128(bxhih, mask);
- __m128i bxl = _mm256_castsi256_si128(bx);
- __m128i bxh = _mm256_extractf128_si256(bx, 1);
- bxl = _mm_or_si128(bxl, bxhil);
- bxh = _mm_or_si128(bxh, bxhih);
- bx = MM256_SET_M128I(bxh, bxl);
- const __m256 dy = _mm256_set1_ps(y[i].d);
- const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
- const __m256 q = mul_sum_us8_pairs_float(bx, by);
- acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
- }
- *s = hsum_float_8(acc) + summs;
- #elif defined(__riscv_v_intrinsic)
- float sumf = 0.0;
- uint32_t qh;
- size_t vl = __riscv_vsetvl_e8m1(qk/2);
- // temporary registers for shift operations
- vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
- vuint32m2_t vt_2 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
- for (int i = 0; i < nb; i++) {
- memcpy(&qh, x[i].qh, sizeof(uint32_t));
- // load qh
- vuint32m2_t vqh = __riscv_vmv_v_x_u32m2(qh, vl);
- // ((qh >> (j + 0)) << 4) & 0x10;
- vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(vqh, vt_1, vl);
- vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
- vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(xhl_0, 0x10, vl);
- // ((qh >> (j + 12)) ) & 0x10;
- vuint32m2_t xhr_1 = __riscv_vsrl_vv_u32m2(vqh, vt_2, vl);
- vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(xhr_1, 0x10, vl);
- // narrowing
- vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xha_0, vl);
- vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
- vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xha_1, vl);
- vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
- // load
- vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
- vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
- vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
- vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
- vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
- vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
- vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
- vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
- vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
- vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
- vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
- vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
- vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
- vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
- int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
- sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
- }
- *s = sumf;
- #else
- // scalar
- float sumf = 0.0;
- for (int i = 0; i < nb; i++) {
- uint32_t qh;
- memcpy(&qh, x[i].qh, sizeof(qh));
- int sumi = 0;
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
- const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
- const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
- const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
- sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
- }
- sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
- }
- *s = sumf;
- #endif
- }
- void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bbx, const void * restrict vy, size_t bby, int nrc) {
- const int qk = QK8_0;
- const int nb = n / qk;
- assert(n % qk == 0);
- #if defined(__ARM_FEATURE_MATMUL_INT8)
- assert((nrc == 2) || (nrc == 1));
- #else
- assert(nrc == 1);
- #endif
- UNUSED(nrc);
- UNUSED(bbx);
- UNUSED(bby);
- UNUSED(bs);
- const block_q8_0 * restrict x = vx;
- const block_q8_0 * restrict y = vy;
- #if defined(__ARM_FEATURE_MATMUL_INT8)
- if (nrc == 2) {
- const block_q8_0 * restrict vx0 = vx;
- const block_q8_0 * restrict vx1 = vx + bx;
- const block_q8_0 * restrict vy0 = vy;
- const block_q8_0 * restrict vy1 = vy + by;
- float32x4_t sumv0 = vdupq_n_f32(0.0f);
- for (int i = 0; i < nb; i++) {
- const block_q8_0 * restrict b_x0 = &vx0[i];
- const block_q8_0 * restrict b_y0 = &vy0[i];
- const block_q8_0 * restrict b_x1 = &vx1[i];
- const block_q8_0 * restrict b_y1 = &vy1[i];
- const int8x16_t x0_l = vld1q_s8(b_x0->qs);
- const int8x16_t x0_h = vld1q_s8(b_x0->qs + 16);
- const int8x16_t x1_l = vld1q_s8(b_x1->qs);
- const int8x16_t x1_h = vld1q_s8(b_x1->qs + 16);
- // load y
- const int8x16_t y0_l = vld1q_s8(b_y0->qs);
- const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
- const int8x16_t y1_l = vld1q_s8(b_y1->qs);
- const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
- float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d),
- GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d),
- GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d),
- GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)};
- int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
- int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
- int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
- int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
- int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
- int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
- int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
- int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
- sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
- l1, r1)), l2, r2)), l3, r3))), scale);
- }
- float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
- float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
- vst1_f32(s, vget_low_f32(sumv2));
- vst1_f32(s + bs, vget_high_f32(sumv2));
- return;
- }
- #endif
- #if defined(__ARM_NEON)
- float32x4_t sumv0 = vdupq_n_f32(0.0f);
- float32x4_t sumv1 = vdupq_n_f32(0.0f);
- assert(nb % 2 == 0); // TODO: handle odd nb
- for (int i = 0; i < nb; i += 2) {
- const block_q8_0 * restrict x0 = &x[i + 0];
- const block_q8_0 * restrict x1 = &x[i + 1];
- const block_q8_0 * restrict y0 = &y[i + 0];
- const block_q8_0 * restrict y1 = &y[i + 1];
- const int8x16_t x0_0 = vld1q_s8(x0->qs);
- const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
- const int8x16_t x1_0 = vld1q_s8(x1->qs);
- const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
- // load y
- const int8x16_t y0_0 = vld1q_s8(y0->qs);
- const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
- const int8x16_t y1_0 = vld1q_s8(y1->qs);
- const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
- ggml_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
- ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
- ggml_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
- ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
- }
- *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
- #elif defined(__AVX2__) || defined(__AVX__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
- // Main loop
- for (int i = 0; i < nb; ++i) {
- // Compute combined scale for the block
- const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
- __m256i qx = _mm256_loadu_si256((const __m256i *)x[i].qs);
- __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
- const __m256 q = mul_sum_i8_pairs_float(qx, qy);
- // Multiply q with scale and accumulate
- #if defined(__AVX2__)
- acc = _mm256_fmadd_ps( d, q, acc );
- #else
- acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc );
- #endif
- }
- *s = hsum_float_8(acc);
- #elif defined(__riscv_v_intrinsic)
- float sumf = 0.0;
- size_t vl = __riscv_vsetvl_e8m1(qk);
- for (int i = 0; i < nb; i++) {
- // load elements
- vint8m1_t bx = __riscv_vle8_v_i8m1(x[i].qs, vl);
- vint8m1_t by = __riscv_vle8_v_i8m1(y[i].qs, vl);
- vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx, by, vl);
- vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl);
- vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl);
- int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum);
- sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
- }
- *s = sumf;
- #else
- // scalar
- float sumf = 0.0;
- for (int i = 0; i < nb; i++) {
- int sumi = 0;
- for (int j = 0; j < qk; j++) {
- sumi += x[i].qs[j]*y[i].qs[j];
- }
- sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
- }
- *s = sumf;
- #endif
- }
- #if QK_K == 256
- void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bx);
- UNUSED(by);
- UNUSED(bs);
- const block_q2_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- const uint8x16_t m3 = vdupq_n_u8(0x3);
- const uint8x16_t m4 = vdupq_n_u8(0xF);
- const int32x4_t vzero = vdupq_n_s32(0);
- ggml_int8x16x2_t q2bytes;
- uint8_t aux[16];
- float sum = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- const uint8_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint8_t * restrict sc = x[i].scales;
- const uint8x16_t mins_and_scales = vld1q_u8(sc);
- const uint8x16_t scales = vandq_u8(mins_and_scales, m4);
- vst1q_u8(aux, scales);
- const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4);
- const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
- const ggml_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}};
- const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])),
- vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0])));
- const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])),
- vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1])));
- sum += dmin * vaddvq_s32(vaddq_s32(s0, s1));
- int isum = 0;
- int is = 0;
- // We use this macro instead of a function call because for some reason
- // the code runs 2-3% slower, even if the function is declared inline
- #define MULTIPLY_ACCUM_WITH_SCALE(index)\
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)];
- #define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\
- q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\
- q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\
- q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\
- MULTIPLY_ACCUM_WITH_SCALE((index));
- for (int j = 0; j < QK_K/128; ++j) {
- const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32;
- ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
- q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3));
- q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3));
- MULTIPLY_ACCUM_WITH_SCALE(0);
- SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2);
- SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4);
- SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6);
- is += 8;
- }
- sum += d * isum;
- }
- *s = sum;
- #elif defined __AVX2__
- const __m256i m3 = _mm256_set1_epi8(3);
- const __m128i m4 = _mm_set1_epi8(0xF);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- const uint8_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
- const __m128i scales8 = _mm_and_si128(mins_and_scales, m4);
- const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
- const __m256i mins = _mm256_cvtepi8_epi16(mins8);
- const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums));
- acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc);
- const __m256i all_scales = _mm256_cvtepi8_epi16(scales8);
- const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
- const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
- const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
- __m256i sumi = _mm256_setzero_si256();
- for (int j = 0; j < QK_K/128; ++j) {
- const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32;
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q2_0 = _mm256_and_si256(q2bits, m3);
- const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3);
- const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3);
- const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3);
- __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
- __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
- __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2);
- __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3);
- p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0);
- p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1);
- p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2);
- p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3);
- p0 = _mm256_add_epi32(p0, p1);
- p2 = _mm256_add_epi32(p2, p3);
- sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2));
- }
- acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __AVX__
- const __m128i m3 = _mm_set1_epi8(0x3);
- const __m128i m4 = _mm_set1_epi8(0xF);
- const __m128i m2 = _mm_set1_epi8(0x2);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- const uint8_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- // load mins and scales from block_q2_K.scales[QK_K/16]
- const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
- const __m128i scales16 = _mm_and_si128(mins_and_scales, m4);
- const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
- const __m128i mins_0 = _mm_cvtepi8_epi16(mins16);
- const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16));
- // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2
- const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0]));
- const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8]));
- // sumf += -dmin * summs in 32bits*8
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc);
- const __m128i scales_0 = _mm_cvtepi8_epi16(scales16);
- const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16));
- const __m128i scales[2] = { scales_0, scales_1 };
- __m128i sumi_0 = _mm_setzero_si128();
- __m128i sumi_1 = _mm_setzero_si128();
- for (int j = 0; j < QK_K/128; ++j) {
- // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K]
- const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- // load 2bits*16*8 from block_q2_K.qs[QK_K/4]
- __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
- const __m128i q2_0 = _mm_and_si128(q2bits, m3);
- const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
- const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
- const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
- q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
- const __m128i q2_1 = _mm_and_si128(q2bits, m3);
- const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
- const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
- const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
- // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8
- __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0);
- __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1);
- __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2);
- __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3);
- __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4);
- __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5);
- __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6);
- __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7);
- // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8
- __m128i shuffle = _mm_set1_epi16(0x0100);
- p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0);
- shuffle = _mm_add_epi16(shuffle, m2);
- p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1);
- shuffle = _mm_add_epi16(shuffle, m2);
- p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2);
- shuffle = _mm_add_epi16(shuffle, m2);
- p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3);
- shuffle = _mm_add_epi16(shuffle, m2);
- p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4);
- shuffle = _mm_add_epi16(shuffle, m2);
- p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5);
- shuffle = _mm_add_epi16(shuffle, m2);
- p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6);
- shuffle = _mm_add_epi16(shuffle, m2);
- p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7);
- p0 = _mm_add_epi32(p0, p1);
- p2 = _mm_add_epi32(p2, p3);
- p4 = _mm_add_epi32(p4, p5);
- p6 = _mm_add_epi32(p6, p7);
- // isum in 32bits*4*2
- sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2));
- sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6));
- }
- // sumf += dall * isum - dmin * summs in 32bits
- __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __riscv_v_intrinsic
- float sumf = 0;
- uint8_t temp_01[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
- for (int i = 0; i < nb; ++i) {
- const uint8_t * q2 = x[i].qs;
- const int8_t * q8 = y[i].qs;
- const uint8_t * sc = x[i].scales;
- const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- size_t vl = 16;
- vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl);
- vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl);
- vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl);
- vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl);
- vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl);
- vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl));
- vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl);
- vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
- sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums);
- vl = 32;
- vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
- vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl);
- uint8_t is=0;
- int isum=0;
- for (int j = 0; j < QK_K/128; ++j) {
- // load Q2
- vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl);
- vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl);
- vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03 , vl);
- vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03 , vl);
- vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03 , vl);
- // duplicate scale elements for product
- vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0+is, vl), vl);
- vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2+is, vl), vl);
- vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4+is, vl), vl);
- vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6+is, vl), vl);
- vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl));
- vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl));
- vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl));
- vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl));
- // load Q8
- vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
- vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
- vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8+64, vl);
- vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8+96, vl);
- vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl);
- vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl);
- vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl);
- vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl);
- vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl);
- vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl);
- isum += __riscv_vmv_x_s_i32m1_i32(isum1);
- q2+=32; q8+=128; is=8;
- }
- sumf += dall * isum;
- }
- *s = sumf;
- #else
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * q2 = x[i].qs;
- const int8_t * q8 = y[i].qs;
- const uint8_t * sc = x[i].scales;
- int summs = 0;
- for (int j = 0; j < 16; ++j) {
- summs += y[i].bsums[j] * (sc[j] >> 4);
- }
- const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- int isum = 0;
- int is = 0;
- int d;
- for (int k = 0; k < QK_K/128; ++k) {
- int shift = 0;
- for (int j = 0; j < 4; ++j) {
- d = sc[is++] & 0xF;
- int isuml = 0;
- for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- d = sc[is++] & 0xF;
- isuml = 0;
- for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- shift += 2;
- q8 += 32;
- }
- q2 += 32;
- }
- sumf += dall * isum - dmin * summs;
- }
- *s = sumf;
- #endif
- }
- #else
- void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bx);
- UNUSED(by);
- UNUSED(bs);
- const block_q2_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- const uint8x16_t m3 = vdupq_n_u8(0x3);
- const int32x4_t vzero = vdupq_n_s32(0);
- ggml_int8x16x4_t q2bytes;
- uint32_t aux32[2];
- const uint8_t * scales = (const uint8_t *)aux32;
- float sum = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * (float)x[i].d;
- const float dmin = -y[i].d * (float)x[i].dmin;
- const uint8_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
- aux32[0] = sc[0] & 0x0f0f0f0f;
- aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
- sum += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
- int isum1 = 0, isum2 = 0;
- const uint8x16_t q2bits = vld1q_u8(q2);
- const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
- q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits, m3));
- q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 2), m3));
- q2bytes.val[2] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 4), m3));
- q2bytes.val[3] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 6), m3));
- isum1 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * scales[0];
- isum2 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * scales[1];
- isum1 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[2], q8bytes.val[2])) * scales[2];
- isum2 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[3], q8bytes.val[3])) * scales[3];
- sum += d * (isum1 + isum2);
- }
- *s = sum;
- #elif defined __AVX2__
- const __m256i m3 = _mm256_set1_epi8(3);
- __m256 acc = _mm256_setzero_ps();
- uint32_t ud, um;
- const uint8_t * restrict db = (const uint8_t *)&ud;
- const uint8_t * restrict mb = (const uint8_t *)&um;
- float summs = 0;
- // TODO: optimize this
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- const uint8_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
- ud = (sc[0] >> 0) & 0x0f0f0f0f;
- um = (sc[0] >> 4) & 0x0f0f0f0f;
- int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
- summs += dmin * smin;
- const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
- const __m256i q2_0 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 2), q2bits), m3);
- const __m256i q2_1 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 6), _mm_srli_epi16(q2bits, 4)), m3);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- const __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
- const __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
- const __m256i p_0 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 0));
- const __m256i p_1 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 1));
- const __m256i p_2 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 0));
- const __m256i p_3 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 1));
- acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0), acc);
- acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1), acc);
- acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2), acc);
- acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3), acc);
- }
- *s = hsum_float_8(acc) + summs;
- #elif defined __AVX__
- const __m128i m3 = _mm_set1_epi8(3);
- __m256 acc = _mm256_setzero_ps();
- uint32_t ud, um;
- const uint8_t * restrict db = (const uint8_t *)&ud;
- const uint8_t * restrict mb = (const uint8_t *)&um;
- float summs = 0;
- // TODO: optimize this
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- const uint8_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
- ud = (sc[0] >> 0) & 0x0f0f0f0f;
- um = (sc[0] >> 4) & 0x0f0f0f0f;
- int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
- summs += dmin * smin;
- const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
- const __m128i q2_0 = _mm_and_si128(q2bits, m3);
- const __m128i q2_1 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
- const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
- const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- const __m128i p0 = _mm_maddubs_epi16(q2_0, _mm256_extractf128_si256(q8_0, 0));
- const __m128i p1 = _mm_maddubs_epi16(q2_1, _mm256_extractf128_si256(q8_0, 1));
- const __m128i p2 = _mm_maddubs_epi16(q2_2, _mm256_extractf128_si256(q8_1, 0));
- const __m128i p3 = _mm_maddubs_epi16(q2_3, _mm256_extractf128_si256(q8_1, 1));
- const __m256i p_0 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p0, p0)), _mm_cvtepi16_epi32(p0));
- const __m256i p_1 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p1, p1)), _mm_cvtepi16_epi32(p1));
- const __m256i p_2 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p2, p2)), _mm_cvtepi16_epi32(p2));
- const __m256i p_3 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p3, p3)), _mm_cvtepi16_epi32(p3));
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0)), acc);
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1)), acc);
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2)), acc);
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3)), acc);
- }
- *s = hsum_float_8(acc) + summs;
- #elif defined __riscv_v_intrinsic
- uint32_t aux32[2];
- const uint8_t * scales = (const uint8_t *)aux32;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * (float)x[i].d;
- const float dmin = -y[i].d * (float)x[i].dmin;
- const uint8_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
- aux32[0] = sc[0] & 0x0f0f0f0f;
- aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
- sumf += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
- int isum1 = 0;
- int isum2 = 0;
- size_t vl = 16;
- vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
- // load Q2
- vuint8mf2_t q2_x = __riscv_vle8_v_u8mf2(q2, vl);
- vint8mf2_t q2_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q2_x, 0x03, vl));
- vint8mf2_t q2_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x2, vl), 0x03 , vl));
- vint8mf2_t q2_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x4, vl), 0x03 , vl));
- vint8mf2_t q2_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x6, vl), 0x03 , vl));
- // load Q8, and take product with Q2
- vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q2_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
- vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q2_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
- vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q2_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
- vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q2_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
- vint16m1_t vs_0 = __riscv_vredsum_vs_i16m1_i16m1(p0, vzero, vl);
- vint16m1_t vs_1 = __riscv_vredsum_vs_i16m1_i16m1(p1, vzero, vl);
- vint16m1_t vs_2 = __riscv_vredsum_vs_i16m1_i16m1(p2, vzero, vl);
- vint16m1_t vs_3 = __riscv_vredsum_vs_i16m1_i16m1(p3, vzero, vl);
- isum1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[0];
- isum2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[1];
- isum1 += __riscv_vmv_x_s_i16m1_i16(vs_2) * scales[2];
- isum2 += __riscv_vmv_x_s_i16m1_i16(vs_3) * scales[3];
- sumf += d * (isum1 + isum2);
- }
- *s = sumf;
- #else
- float sumf = 0;
- int isum[4];
- for (int i = 0; i < nb; ++i) {
- const uint8_t * q2 = x[i].qs;
- const int8_t * q8 = y[i].qs;
- const uint8_t * sc = x[i].scales;
- int summs = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- summs += y[i].bsums[j] * (sc[j] >> 4);
- }
- const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- isum[0] = isum[1] = isum[2] = isum[3] = 0;
- for (int l = 0; l < 16; ++l) {
- isum[0] += q8[l+ 0] * ((q2[l] >> 0) & 3);
- isum[1] += q8[l+16] * ((q2[l] >> 2) & 3);
- isum[2] += q8[l+32] * ((q2[l] >> 4) & 3);
- isum[3] += q8[l+48] * ((q2[l] >> 6) & 3);
- }
- for (int l = 0; l < 4; ++l) {
- isum[l] *= (sc[l] & 0xF);
- }
- sumf += dall * (isum[0] + isum[1] + isum[2] + isum[3]) - dmin * summs;
- }
- *s = sumf;
- #endif
- }
- #endif
- #if QK_K == 256
- void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
- assert(n % QK_K == 0);
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bx);
- UNUSED(by);
- UNUSED(bs);
- const uint32_t kmask1 = 0x03030303;
- const uint32_t kmask2 = 0x0f0f0f0f;
- const block_q3_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- uint32_t aux[3];
- uint32_t utmp[4];
- const uint8x16_t m3b = vdupq_n_u8(0x3);
- const int32x4_t vzero = vdupq_n_s32(0);
- const uint8x16_t m0 = vdupq_n_u8(1);
- const uint8x16_t m1 = vshlq_n_u8(m0, 1);
- const uint8x16_t m2 = vshlq_n_u8(m0, 2);
- const uint8x16_t m3 = vshlq_n_u8(m0, 3);
- const int8_t m32 = 32;
- ggml_int8x16x4_t q3bytes;
- float sum = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * restrict q3 = x[i].qs;
- const uint8_t * restrict qh = x[i].hmask;
- const int8_t * restrict q8 = y[i].qs;
- ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
- ggml_uint8x16x4_t q3h;
- int32_t isum = 0;
- // Set up scales
- memcpy(aux, x[i].scales, 12);
- utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
- utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
- utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
- utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
- int8_t * scale = (int8_t *)utmp;
- for (int j = 0; j < 16; ++j) scale[j] -= m32;
- for (int j = 0; j < QK_K/128; ++j) {
- const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32;
- const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64;
- const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64;
- q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2);
- q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2);
- q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1);
- q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1);
- q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0]));
- q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1]));
- q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
- q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0];
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1];
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2];
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3];
- scale += 4;
- q3h.val[0] = vbicq_u8(m2, qhbits.val[0]);
- q3h.val[1] = vbicq_u8(m2, qhbits.val[1]);
- q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1);
- q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1);
- q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0]));
- q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1]));
- q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
- q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0];
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1];
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2];
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3];
- scale += 4;
- if (j == 0) {
- qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4);
- qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4);
- }
- }
- sum += d * isum;
- }
- *s = sum;
- #elif defined __AVX2__
- const __m256i m3 = _mm256_set1_epi8(3);
- const __m256i mone = _mm256_set1_epi8(1);
- const __m128i m32 = _mm_set1_epi8(32);
- __m256 acc = _mm256_setzero_ps();
- uint32_t aux[3];
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * restrict q3 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- // Set up scales
- memcpy(aux, x[i].scales, 12);
- __m128i scales128 = _mm_set_epi32(
- ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
- ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
- (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
- (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
- scales128 = _mm_sub_epi8(scales128, m32);
- const __m256i all_scales = _mm256_cvtepi8_epi16(scales128);
- const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
- const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
- const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
- // high bit
- const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask);
- // integer accumulator
- __m256i sumi = _mm256_setzero_si256();
- int bit = 0;
- int is = 0;
- for (int j = 0; j < QK_K/128; ++j) {
- // load low 2 bits
- const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32;
- // prepare low and high bits
- const __m256i q3l_0 = _mm256_and_si256(q3bits, m3);
- const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
- ++bit;
- const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3);
- const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
- ++bit;
- const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3);
- const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
- ++bit;
- const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3);
- const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
- ++bit;
- // load Q8 quants
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
- // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
- // and 2 if the high bit was set)
- __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
- __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
- __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2);
- __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3);
- __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
- __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
- __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2);
- __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3);
- p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
- p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
- p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
- // multiply with scales
- p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0);
- p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1);
- p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2);
- p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3);
- // accumulate
- p16_0 = _mm256_add_epi32(p16_0, p16_1);
- p16_2 = _mm256_add_epi32(p16_2, p16_3);
- sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2));
- }
- // multiply with block scale and accumulate
- acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __AVX__
- const __m128i m3 = _mm_set1_epi8(3);
- const __m128i mone = _mm_set1_epi8(1);
- const __m128i m32 = _mm_set1_epi8(32);
- const __m128i m2 = _mm_set1_epi8(2);
- __m256 acc = _mm256_setzero_ps();
- const uint32_t *aux;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * restrict q3 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- // Set up scales
- aux = (const uint32_t *)x[i].scales;
- __m128i scales128 = _mm_set_epi32(
- ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
- ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
- (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
- (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
- scales128 = _mm_sub_epi8(scales128, m32);
- const __m128i scales_0 = _mm_cvtepi8_epi16(scales128);
- const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128));
- const __m128i scales[2] = { scales_0, scales_1 };
- // high bit *128*2 from block_q3_K.hmask[QK_K/8]
- const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]);
- const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]);
- // integer accumulator
- __m128i sumi_0 = _mm_setzero_si128();
- __m128i sumi_1 = _mm_setzero_si128();
- for (int j = 0; j < QK_K/128; ++j) {
- // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4]
- const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
- const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
- // prepare low and high bits
- const int bit = j << 2;
- const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3);
- const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3);
- const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2);
- const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2);
- const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3);
- const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3);
- const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
- const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
- const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3);
- const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3);
- const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
- const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
- const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3);
- const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3);
- const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
- const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
- // load Q8 quants from block_q8_K.qs[QK_K]
- const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
- // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
- // and 2 if the high bit was set)
- __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0);
- __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1);
- __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2);
- __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3);
- __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4);
- __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5);
- __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6);
- __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7);
- __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0);
- __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1);
- __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2);
- __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3);
- __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4);
- __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5);
- __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6);
- __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7);
- p16_0 = _mm_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm_sub_epi16(p16_1, q8s_1);
- p16_2 = _mm_sub_epi16(p16_2, q8s_2);
- p16_3 = _mm_sub_epi16(p16_3, q8s_3);
- p16_4 = _mm_sub_epi16(p16_4, q8s_4);
- p16_5 = _mm_sub_epi16(p16_5, q8s_5);
- p16_6 = _mm_sub_epi16(p16_6, q8s_6);
- p16_7 = _mm_sub_epi16(p16_7, q8s_7);
- // multiply with scales
- __m128i shuffle = _mm_set1_epi16(0x0100);
- p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0);
- shuffle = _mm_add_epi16(shuffle, m2);
- p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1);
- shuffle = _mm_add_epi16(shuffle, m2);
- p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2);
- shuffle = _mm_add_epi16(shuffle, m2);
- p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3);
- shuffle = _mm_add_epi16(shuffle, m2);
- p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4);
- shuffle = _mm_add_epi16(shuffle, m2);
- p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5);
- shuffle = _mm_add_epi16(shuffle, m2);
- p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6);
- shuffle = _mm_add_epi16(shuffle, m2);
- p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7);
- // accumulate
- p16_0 = _mm_add_epi32(p16_0, p16_1);
- p16_2 = _mm_add_epi32(p16_2, p16_3);
- p16_4 = _mm_add_epi32(p16_4, p16_5);
- p16_6 = _mm_add_epi32(p16_6, p16_7);
- sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
- sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6));
- }
- // multiply with block scale and accumulate
- __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __riscv_v_intrinsic
- uint32_t aux[3];
- uint32_t utmp[4];
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q3 = x[i].qs;
- const uint8_t * restrict qh = x[i].hmask;
- const int8_t * restrict q8 = y[i].qs;
- memcpy(aux, x[i].scales, 12);
- utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
- utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
- utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
- utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
- int8_t * scale = (int8_t *)utmp;
- for (int j = 0; j < 16; ++j) scale[j] -= 32;
- size_t vl = 32;
- uint8_t m = 1;
- vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
- vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl);
- int sum_t = 0;
- for (int j = 0; j < QK_K; j += 128) {
- vl = 32;
- // load Q3
- vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl);
- vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl));
- vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03 , vl));
- vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03 , vl));
- vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03 , vl));
- // compute mask for subtraction
- vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl);
- vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl);
- vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_m(vmask_0, q3_0, 0x4, vl);
- m <<= 1;
- vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
- vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl);
- vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_m(vmask_1, q3_1, 0x4, vl);
- m <<= 1;
- vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
- vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl);
- vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_m(vmask_2, q3_2, 0x4, vl);
- m <<= 1;
- vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl);
- vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl);
- vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_m(vmask_3, q3_3, 0x4, vl);
- m <<= 1;
- // load Q8 and take product with Q3
- vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl);
- vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
- vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
- vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
- vl = 16;
- // retrieve lane to multiply with scale
- vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl);
- vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl);
- vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl);
- vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl);
- vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl);
- vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl);
- vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl);
- vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl);
- vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl);
- vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl);
- vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl);
- vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl);
- sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
- q3 += 32; q8 += 128; scale += 8;
- }
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- sumf += d*sum_t;
- }
- *s = sumf;
- #else
- // scalar version
- // This function is written like this so the compiler can manage to vectorize most of it
- // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
- // manually vectorized version above. Every other version I tried would run at least 4 times slower.
- // The ideal situation would be if we could just write the code once, and the compiler would
- // automatically produce the best possible set of machine instructions, instead of us having to manually
- // write vectorized versions for AVX, ARM_NEON, etc.
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
- uint32_t auxs[4];
- const int8_t * scales = (const int8_t*)auxs;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q3 = x[i].qs;
- const uint8_t * restrict hm = x[i].hmask;
- const int8_t * restrict q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * restrict a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- q3 += 32;
- }
- a = aux8;
- memcpy(auxs, x[i].scales, 12);
- uint32_t tmp = auxs[2];
- auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
- auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
- auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
- auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
- for (int j = 0; j < QK_K/16; ++j) {
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #else
- void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
- assert(n % QK_K == 0);
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bx);
- UNUSED(by);
- UNUSED(bs);
- const block_q3_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- const int32x4_t vzero = vdupq_n_s32(0);
- const uint8x16_t m3b = vdupq_n_u8(0x3);
- const uint8x16_t mh = vdupq_n_u8(4);
- ggml_int8x16x4_t q3bytes;
- uint16_t aux16[2];
- int8_t * scales = (int8_t *)aux16;
- float sum = 0;
- for (int i = 0; i < nb; ++i) {
- ggml_uint8x16x4_t q3h;
- const uint8x8_t hbits = vld1_u8(x[i].hmask);
- const uint8x16_t q3bits = vld1q_u8(x[i].qs);
- const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(y[i].qs);
- const uint16_t a = *(const uint16_t *)x[i].scales;
- aux16[0] = a & 0x0f0f;
- aux16[1] = (a >> 4) & 0x0f0f;
- for (int j = 0; j < 4; ++j) scales[j] -= 8;
- int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
- const float d = y[i].d * (float)x[i].d;
- const uint8x16_t htmp = vcombine_u8(hbits, vshr_n_u8(hbits, 1));
- q3h.val[0] = vandq_u8(mh, vshlq_n_u8(htmp, 2));
- q3h.val[1] = vandq_u8(mh, htmp);
- q3h.val[2] = vandq_u8(mh, vshrq_n_u8(htmp, 2));
- q3h.val[3] = vandq_u8(mh, vshrq_n_u8(htmp, 4));
- q3bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q3bits, m3b), q3h.val[0]));
- q3bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 2), m3b), q3h.val[1]));
- q3bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 4), m3b), q3h.val[2]));
- q3bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q3bits, 6), q3h.val[3]));
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes.val[0])) * scales[0];
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes.val[1])) * scales[2];
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes.val[2])) * scales[1];
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes.val[3])) * scales[3];
- sum += d * isum;
- }
- *s = sum;
- #elif defined __AVX2__
- const __m256i m3 = _mm256_set1_epi8(3);
- const __m256i m1 = _mm256_set1_epi8(1);
- __m256 acc = _mm256_setzero_ps();
- uint64_t aux64;
- uint16_t aux16[2];
- const int8_t * aux8 = (const int8_t *)aux16;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * restrict q3 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint16_t a = *(const uint16_t *)x[i].scales;
- aux16[0] = a & 0x0f0f;
- aux16[1] = (a >> 4) & 0x0f0f;
- const __m256i scale_0 = MM256_SET_M128I(_mm_set1_epi16(aux8[2] - 8), _mm_set1_epi16(aux8[0] - 8));
- const __m256i scale_1 = MM256_SET_M128I(_mm_set1_epi16(aux8[3] - 8), _mm_set1_epi16(aux8[1] - 8));
- memcpy(&aux64, x[i].hmask, 8);
- const __m128i haux = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
- __m256i q3h_0 = MM256_SET_M128I(_mm_srli_epi16(haux, 2), haux);
- __m256i q3h_1 = _mm256_srli_epi16(q3h_0, 4);
- q3h_0 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_0, m1), 2);
- q3h_1 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_1, m1), 2);
- // load low 2 bits
- const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
- // prepare low and high bits
- const __m256i q3aux = MM256_SET_M128I(_mm_srli_epi16(q3bits, 2), q3bits);
- const __m256i q3l_0 = _mm256_and_si256(q3aux, m3);
- const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3aux, 4), m3);
- // load Q8 quants
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
- // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
- // and 2 if the high bit was set)
- const __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
- const __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
- __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
- __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
- p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
- // multiply with scales
- p16_0 = _mm256_madd_epi16(scale_0, p16_0);
- p16_1 = _mm256_madd_epi16(scale_1, p16_1);
- p16_0 = _mm256_add_epi32(p16_0, p16_1);
- // multiply with block scale and accumulate
- acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16_0), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __AVX__
- const __m128i m3 = _mm_set1_epi8(3);
- const __m128i m1 = _mm_set1_epi8(1);
- __m256 acc = _mm256_setzero_ps();
- uint64_t aux64;
- uint16_t aux16[2];
- const int8_t * aux8 = (const int8_t *)aux16;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * restrict q3 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint16_t a = *(const uint16_t *)x[i].scales;
- aux16[0] = a & 0x0f0f;
- aux16[1] = (a >> 4) & 0x0f0f;
- const __m128i scale_0 = _mm_set1_epi16(aux8[0] - 8);
- const __m128i scale_1 = _mm_set1_epi16(aux8[2] - 8);
- const __m128i scale_2 = _mm_set1_epi16(aux8[1] - 8);
- const __m128i scale_3 = _mm_set1_epi16(aux8[3] - 8);
- memcpy(&aux64, x[i].hmask, 8);
- __m128i q3h_0 = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
- __m128i q3h_1 = _mm_srli_epi16(q3h_0, 2);
- __m128i q3h_2 = _mm_srli_epi16(q3h_0, 4);
- __m128i q3h_3 = _mm_srli_epi16(q3h_0, 6);
- q3h_0 = _mm_slli_epi16(_mm_andnot_si128(q3h_0, m1), 2);
- q3h_1 = _mm_slli_epi16(_mm_andnot_si128(q3h_1, m1), 2);
- q3h_2 = _mm_slli_epi16(_mm_andnot_si128(q3h_2, m1), 2);
- q3h_3 = _mm_slli_epi16(_mm_andnot_si128(q3h_3, m1), 2);
- // load low 2 bits
- const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
- // prepare low and high bits
- const __m128i q3l_0 = _mm_and_si128(q3bits, m3);
- const __m128i q3l_1 = _mm_and_si128(_mm_srli_epi16(q3bits, 2), m3);
- const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits, 4), m3);
- const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits, 6), m3);
- // load Q8 quants
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm_maddubs_epi16,
- // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
- // and 2 if the high bit was set)
- const __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, _mm256_extractf128_si256(q8_0, 0));
- const __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, _mm256_extractf128_si256(q8_0, 1));
- const __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, _mm256_extractf128_si256(q8_1, 0));
- const __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, _mm256_extractf128_si256(q8_1, 1));
- __m128i p16_0 = _mm_maddubs_epi16(q3l_0, _mm256_extractf128_si256(q8_0, 0));
- __m128i p16_1 = _mm_maddubs_epi16(q3l_1, _mm256_extractf128_si256(q8_0, 1));
- __m128i p16_2 = _mm_maddubs_epi16(q3l_2, _mm256_extractf128_si256(q8_1, 0));
- __m128i p16_3 = _mm_maddubs_epi16(q3l_3, _mm256_extractf128_si256(q8_1, 1));
- p16_0 = _mm_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm_sub_epi16(p16_1, q8s_1);
- p16_2 = _mm_sub_epi16(p16_2, q8s_2);
- p16_3 = _mm_sub_epi16(p16_3, q8s_3);
- // multiply with scales
- p16_0 = _mm_madd_epi16(scale_0, p16_0);
- p16_1 = _mm_madd_epi16(scale_1, p16_1);
- p16_2 = _mm_madd_epi16(scale_2, p16_2);
- p16_3 = _mm_madd_epi16(scale_3, p16_3);
- p16_0 = _mm_add_epi32(p16_0, p16_2);
- p16_1 = _mm_add_epi32(p16_1, p16_3);
- __m256i p16 = MM256_SET_M128I(p16_1, p16_0);
- // multiply with block scale and accumulate
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16)), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __riscv_v_intrinsic
- uint16_t aux16[2];
- int8_t * scales = (int8_t *)aux16;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q3 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint16_t a = *(const uint16_t *)x[i].scales;
- aux16[0] = a & 0x0f0f;
- aux16[1] = (a >> 4) & 0x0f0f;
- for (int j = 0; j < 4; ++j) scales[j] -= 8;
- int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
- const float d = y[i].d * (float)x[i].d;
- vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
- // load qh
- vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(x[i].hmask, 8);
- vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
- size_t vl = 16;
- // extend and combine both qh_x1 and qh_x2
- vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
- vuint8mf2_t qh_0 = __riscv_vand_vx_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
- vuint8mf2_t qh_1 = __riscv_vand_vx_u8mf2(qh_x, 0x4, vl);
- vuint8mf2_t qh_2 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
- vuint8mf2_t qh_3 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), 0x4, vl);
- // load Q3
- vuint8mf2_t q3_x = __riscv_vle8_v_u8mf2(q3, vl);
- vuint8mf2_t q3h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q3_x, 0x3, vl), qh_0, vl);
- vuint8mf2_t q3h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 2, vl), 0x3, vl), qh_1, vl);
- vuint8mf2_t q3h_2 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 4, vl), 0x3, vl), qh_2, vl);
- vuint8mf2_t q3h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 0x6, vl), qh_3, vl);
- vint8mf2_t q3_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_0);
- vint8mf2_t q3_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_1);
- vint8mf2_t q3_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_2);
- vint8mf2_t q3_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_3);
- // load Q8 and take product with Q3
- vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q3_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
- vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q3_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
- vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q3_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
- vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q3_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
- vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
- vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
- vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
- vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
- isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scales[0];
- isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scales[2];
- isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scales[1];
- isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scales[3];
- sumf += d * isum;
- }
- *s = sumf;
- #else
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- int32_t scales[4];
- memset(sums, 0, 8*sizeof(float));
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q3 = x[i].qs;
- const uint8_t * restrict hm = x[i].hmask;
- const int8_t * restrict q8 = y[i].qs;
- int8_t * restrict a = aux8;
- for (int l = 0; l < 8; ++l) {
- a[l+ 0] = (int8_t)((q3[l+0] >> 0) & 3) - (hm[l] & 0x01 ? 0 : 4);
- a[l+ 8] = (int8_t)((q3[l+8] >> 0) & 3) - (hm[l] & 0x02 ? 0 : 4);
- a[l+16] = (int8_t)((q3[l+0] >> 2) & 3) - (hm[l] & 0x04 ? 0 : 4);
- a[l+24] = (int8_t)((q3[l+8] >> 2) & 3) - (hm[l] & 0x08 ? 0 : 4);
- a[l+32] = (int8_t)((q3[l+0] >> 4) & 3) - (hm[l] & 0x10 ? 0 : 4);
- a[l+40] = (int8_t)((q3[l+8] >> 4) & 3) - (hm[l] & 0x20 ? 0 : 4);
- a[l+48] = (int8_t)((q3[l+0] >> 6) & 3) - (hm[l] & 0x40 ? 0 : 4);
- a[l+56] = (int8_t)((q3[l+8] >> 6) & 3) - (hm[l] & 0x80 ? 0 : 4);
- }
- scales[0] = (x[i].scales[0] & 0xF) - 8;
- scales[1] = (x[i].scales[0] >> 4) - 8;
- scales[2] = (x[i].scales[1] & 0xF) - 8;
- scales[3] = (x[i].scales[1] >> 4) - 8;
- memset(aux32, 0, 8*sizeof(int32_t));
- for (int j = 0; j < QK_K/16; ++j) {
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] += q8[l] * a[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux32[l] += scales[j] * aux16[l];
- }
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #endif
- #if QK_K == 256
- void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
- assert(n % QK_K == 0);
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bx);
- UNUSED(by);
- UNUSED(bs);
- const block_q4_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- static const uint32_t kmask1 = 0x3f3f3f3f;
- static const uint32_t kmask2 = 0x0f0f0f0f;
- static const uint32_t kmask3 = 0x03030303;
- uint32_t utmp[4];
- #ifdef __ARM_NEON
- const uint8x16_t m4b = vdupq_n_u8(0xf);
- const int32x4_t mzero = vdupq_n_s32(0);
- ggml_int8x16x2_t q4bytes;
- ggml_int8x16x2_t q8bytes;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
- memcpy(utmp, x[i].scales, 12);
- uint32x2_t mins8 = { 0 };
- mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0);
- mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1);
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[0] &= kmask1;
- const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8)));
- const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
- vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
- sumf -= dmin * vaddvq_s32(prod);
- const uint8_t * scales = (const uint8_t *)utmp;
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- int32_t sumi1 = 0;
- int32_t sumi2 = 0;
- for (int j = 0; j < QK_K/64; ++j) {
- const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32;
- q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
- q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
- q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
- const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
- sumi1 += vaddvq_s32(p1) * scales[2*j+0];
- q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
- q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
- q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
- const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
- sumi2 += vaddvq_s32(p2) * scales[2*j+1];
- }
- sumf += d * (sumi1 + sumi2);
- }
- *s = sumf;
- #elif defined __AVX2__
- const __m256i m4 = _mm256_set1_epi8(0xF);
- __m256 acc = _mm256_setzero_ps();
- __m128 acc_m = _mm_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
- const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
- const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
- const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
- acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m);
- const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
- const __m256i scales = MM256_SET_M128I(sc128, sc128);
- __m256i sumi = _mm256_setzero_si256();
- for (int j = 0; j < QK_K/64; ++j) {
- const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
- const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
- const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
- const __m256i q4l = _mm256_and_si256(q4bits, m4);
- const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
- const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
- p16l = _mm256_madd_epi16(scale_l, p16l);
- const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
- p16h = _mm256_madd_epi16(scale_h, p16h);
- const __m256i sumj = _mm256_add_epi32(p16l, p16h);
- sumi = _mm256_add_epi32(sumi, sumj);
- }
- __m256 vd = _mm256_set1_ps(d);
- acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
- }
- acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
- acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
- *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
- #elif defined __AVX__
- const __m128i m4 = _mm_set1_epi8(0xF);
- const __m128i m2 = _mm_set1_epi8(0x2);
- __m256 acc = _mm256_setzero_ps();
- __m128 acc_m = _mm_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
- const __m128i scales = _mm_cvtepu8_epi16(utmps);
- const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
- const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
- const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
- const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
- const __m128i prod = _mm_madd_epi16(mins, q8s);
- acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m);
- __m128i sumi_0 = _mm_setzero_si128();
- __m128i sumi_1 = _mm_setzero_si128();
- __m128i shuffle = _mm_set1_epi16(0x0100);
- for (int j = 0; j < QK_K/64; ++j) {
- const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi16(shuffle, m2);
- const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi16(shuffle, m2);
- __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
- const __m128i q4l_0 = _mm_and_si128(q4bits, m4);
- const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
- q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
- const __m128i q4l_1 = _mm_and_si128(q4bits, m4);
- const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
- const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0);
- p16l = _mm_madd_epi16(scale_l, p16l);
- sumi_0 = _mm_add_epi32(sumi_0, p16l);
- const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- p16l = _mm_maddubs_epi16(q4l_1, q8l_1);
- p16l = _mm_madd_epi16(scale_l, p16l);
- sumi_1 = _mm_add_epi32(sumi_1, p16l);
- const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0);
- p16h = _mm_madd_epi16(scale_h, p16h);
- sumi_0 = _mm_add_epi32(sumi_0, p16h);
- const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- p16h = _mm_maddubs_epi16(q4h_1, q8h_1);
- p16h = _mm_madd_epi16(scale_h, p16h);
- sumi_1 = _mm_add_epi32(sumi_1, p16h);
- }
- __m256 vd = _mm256_set1_ps(d);
- __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
- acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
- }
- acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
- acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
- *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
- #elif defined __riscv_v_intrinsic
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- size_t vl = 8;
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
- vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
- vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
- vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
- vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
- vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
- sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- vl = 32;
- int32_t sum_1 = 0;
- int32_t sum_2 = 0;
- vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
- for (int j = 0; j < QK_K/64; ++j) {
- // load Q4
- vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
- // load Q8 and multiply it with lower Q4 nibble
- vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
- vint8m1_t q4_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
- vint16m2_t qv_0 = __riscv_vwmul_vv_i16m2(q4_0, q8_0, vl);
- vint16m1_t vs_0 = __riscv_vredsum_vs_i16m2_i16m1(qv_0, vzero, vl);
- sum_1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[2*j+0];
- // load Q8 and multiply it with upper Q4 nibble
- vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
- vint8m1_t q4_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
- vint16m2_t qv_1 = __riscv_vwmul_vv_i16m2(q4_1, q8_1, vl);
- vint16m1_t vs_1 = __riscv_vredsum_vs_i16m2_i16m1(qv_1, vzero, vl);
- sum_2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[2*j+1];
- q4 += 32; q8 += 64;
- }
- sumf += d*(sum_1 + sum_2);
- }
- *s = sumf;
- #else
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * restrict a = aux8;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- a += 32;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- a += 32; q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #else
- void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
- assert(n % QK_K == 0);
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bx);
- UNUSED(by);
- UNUSED(bs);
- const block_q4_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- const uint8x16_t m4b = vdupq_n_u8(0xf);
- const int32x4_t mzero = vdupq_n_s32(0);
- float sumf = 0;
- ggml_int8x16x2_t q4bytes;
- ggml_int8x16x4_t q8bytes;
- float sum_mins = 0.f;
- uint16_t aux16[2];
- const uint8_t * restrict scales = (const uint8_t *)aux16;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint16_t * restrict a = (const uint16_t *)x[i].scales;
- aux16[0] = a[0] & 0x0f0f;
- aux16[1] = (a[0] >> 4) & 0x0f0f;
- const int32_t summi = scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]);
- sum_mins += y[i].d * (float)x[i].d[1] * summi;
- const float d = y[i].d * (float)x[i].d[0];
- const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4);
- q8bytes = ggml_vld1q_s8_x4(q8);
- q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
- q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
- const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
- const int32_t sumi1 = vaddvq_s32(p1) * scales[0];
- q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
- q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
- const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[2]), q4bytes.val[1], q8bytes.val[3]);
- const int32_t sumi2 = vaddvq_s32(p2) * scales[1];
- sumf += d * (sumi1 + sumi2);
- }
- *s = sumf - sum_mins;
- #elif defined __AVX2__
- const __m256i m4 = _mm256_set1_epi8(0xF);
- __m256 acc = _mm256_setzero_ps();
- float summs = 0;
- uint16_t aux16[2];
- const uint8_t * scales = (const uint8_t *)aux16;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
- const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
- const __m256 vd = _mm256_set1_ps(d);
- const uint16_t * a = (const uint16_t *)x[i].scales;
- aux16[0] = a[0] & 0x0f0f;
- aux16[1] = (a[0] >> 4) & 0x0f0f;
- summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
- const __m256i q4l = _mm256_and_si256(q4bits, m4);
- const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
- const __m256i q8l = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8h = _mm256_loadu_si256((const __m256i*)(q8+32));
- const __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
- const __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
- const __m256i p32l = _mm256_madd_epi16(_mm256_set1_epi16(scales[0]), p16l);
- acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32l), acc);
- const __m256i p32h = _mm256_madd_epi16(_mm256_set1_epi16(scales[1]), p16h);
- acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32h), acc);
- }
- *s = hsum_float_8(acc) - summs;
- #elif defined __AVX__
- const __m128i m4 = _mm_set1_epi8(0xF);
- __m256 acc = _mm256_setzero_ps();
- float summs = 0;
- uint16_t aux16[2];
- const uint8_t * scales = (const uint8_t *)aux16;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
- const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
- const __m256 vd = _mm256_set1_ps(d);
- const uint16_t * a = (const uint16_t *)x[i].scales;
- aux16[0] = a[0] & 0x0f0f;
- aux16[1] = (a[0] >> 4) & 0x0f0f;
- summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
- const __m128i q4bits_0 = _mm256_extractf128_si256(q4bits, 0);
- const __m128i q4bits_1 = _mm256_extractf128_si256(q4bits, 1);
- const __m128i q4_0 = _mm_and_si128(q4bits_0, m4);
- const __m128i q4_1 = _mm_and_si128(q4bits_1, m4);
- const __m128i q4_2 = _mm_and_si128(_mm_srli_epi16(q4bits_0, 4), m4);
- const __m128i q4_3 = _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- const __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
- const __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
- const __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
- const __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
- const __m128i p32_0 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_0);
- const __m128i p32_1 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_1);
- acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_1, p32_0))), acc);
- const __m128i p32_2 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_2);
- const __m128i p32_3 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_3);
- acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_3, p32_2))), acc);
- }
- *s = hsum_float_8(acc) - summs;
- #elif defined __riscv_v_intrinsic
- uint16_t s16[2];
- const uint8_t * restrict scales = (const uint8_t *)s16;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint16_t * restrict b = (const uint16_t *)x[i].scales;
- s16[0] = b[0] & 0x0f0f;
- s16[1] = (b[0] >> 4) & 0x0f0f;
- sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
- size_t vl = 32;
- vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
- // load Q4
- vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
- // load Q8 and multiply it with lower Q4 nibble
- vint8m1_t q4_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
- vint16m2_t va_0 = __riscv_vwmul_vv_i16m2(q4_a, __riscv_vle8_v_i8m1(q8, vl), vl);
- vint16m1_t aux1 = __riscv_vredsum_vs_i16m2_i16m1(va_0, vzero, vl);
- sumf += d*scales[0]*__riscv_vmv_x_s_i16m1_i16(aux1);
- // load Q8 and multiply it with upper Q4 nibble
- vint8m1_t q4_s = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
- vint16m2_t va_1 = __riscv_vwmul_vv_i16m2(q4_s, __riscv_vle8_v_i8m1(q8+32, vl), vl);
- vint16m1_t aux2 = __riscv_vredsum_vs_i16m2_i16m1(va_1, vzero, vl);
- sumf += d*scales[1]*__riscv_vmv_x_s_i16m1_i16(aux2);
- }
- *s = sumf;
- #else
- uint8_t aux8[QK_K];
- int16_t aux16[16];
- float sums [8];
- memset(sums, 0, 8*sizeof(float));
- uint16_t s16[2];
- const uint8_t * restrict scales = (const uint8_t *)s16;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- uint8_t * restrict a = aux8;
- for (int l = 0; l < 32; ++l) a[l+ 0] = q4[l] & 0xF;
- for (int l = 0; l < 32; ++l) a[l+32] = q4[l] >> 4;
- const uint16_t * restrict b = (const uint16_t *)x[i].scales;
- s16[0] = b[0] & 0x0f0f;
- s16[1] = (b[0] >> 4) & 0x0f0f;
- sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
- for (int j = 0; j < QK_K/32; ++j) {
- for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
- q8 += 16; a += 16;
- for (int l = 0; l < 16; ++l) aux16[l] += q8[l] * a[l];
- q8 += 16; a += 16;
- const float dl = d * scales[j];
- for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[l+8]);
- }
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #endif
- #if QK_K == 256
- void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
- assert(n % QK_K == 0);
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bx);
- UNUSED(by);
- UNUSED(bs);
- const block_q5_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- static const uint32_t kmask1 = 0x3f3f3f3f;
- static const uint32_t kmask2 = 0x0f0f0f0f;
- static const uint32_t kmask3 = 0x03030303;
- uint32_t utmp[4];
- #ifdef __ARM_NEON
- const uint8x16_t m4b = vdupq_n_u8(0xf);
- const uint8x16_t mone = vdupq_n_u8(1);
- const uint8x16_t mtwo = vdupq_n_u8(2);
- const int32x4_t mzero = vdupq_n_s32(0);
- ggml_int8x16x4_t q5bytes;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8);
- const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8));
- const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
- vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
- int32_t sumi_mins = vaddvq_s32(prod);
- const uint8_t * scales = (const uint8_t *)utmp;
- const uint8_t * restrict q5 = x[i].qs;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
- ggml_uint8x16x4_t q5h;
- int32_t sumi = 0;
- for (int j = 0; j < QK_K/64; ++j) {
- const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32;
- const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
- q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
- q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
- q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3);
- q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3);
- qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2);
- qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2);
- q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0]));
- q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1]));
- q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2]));
- q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3]));
- sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++;
- sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++;
- }
- sumf += d * sumi - dmin * sumi_mins;
- }
- *s = sumf;
- #elif defined __AVX2__
- const __m256i m4 = _mm256_set1_epi8(0xF);
- const __m128i mzero = _mm_setzero_si128();
- const __m256i mone = _mm256_set1_epi8(1);
- __m256 acc = _mm256_setzero_ps();
- float summs = 0.f;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q5 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- #if QK_K == 256
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- #else
- // TODO
- const float d = 0, dmin = 0;
- #endif
- const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
- const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
- const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
- const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
- const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
- summs += dmin * _mm_extract_epi32(hsum, 0);
- const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
- const __m256i scales = MM256_SET_M128I(sc128, sc128);
- const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh);
- __m256i hmask = mone;
- __m256i sumi = _mm256_setzero_si256();
- int bit = 0;
- for (int j = 0; j < QK_K/64; ++j) {
- const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
- const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
- const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32;
- const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
- const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
- const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0);
- hmask = _mm256_slli_epi16(hmask, 1);
- const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
- const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
- const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1);
- hmask = _mm256_slli_epi16(hmask, 1);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0);
- __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1);
- p16_0 = _mm256_madd_epi16(scale_0, p16_0);
- p16_1 = _mm256_madd_epi16(scale_1, p16_1);
- sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
- }
- __m256 vd = _mm256_set1_ps(d);
- acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
- }
- *s = hsum_float_8(acc) + summs;
- #elif defined __AVX__
- const __m128i m4 = _mm_set1_epi8(0xF);
- const __m128i mzero = _mm_setzero_si128();
- const __m128i mone = _mm_set1_epi8(1);
- const __m128i m2 = _mm_set1_epi8(2);
- __m256 acc = _mm256_setzero_ps();
- float summs = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
- const uint8_t * restrict q5 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
- const __m128i scales = _mm_cvtepu8_epi16(utmps);
- const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
- const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
- const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
- const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
- const __m128i prod = _mm_madd_epi16(mins, q8s);
- const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
- summs += dmin * _mm_extract_epi32(hsum, 0);
- const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]);
- const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]);
- __m128i hmask = mone;
- __m128i sumi_0 = _mm_setzero_si128();
- __m128i sumi_1 = _mm_setzero_si128();
- int bit = 0;
- __m128i shuffle = _mm_set1_epi16(0x0100);
- for (int j = 0; j < QK_K/64; ++j) {
- const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi16(shuffle, m2);
- const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi16(shuffle, m2);
- const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
- const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
- __m128i q5l_0 = _mm_and_si128(q5bits_0, m4);
- __m128i q5l_1 = _mm_and_si128(q5bits_1, m4);
- __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
- __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
- __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0);
- __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1);
- hmask = _mm_slli_epi16(hmask, 1);
- __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0);
- __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1);
- p16_0 = _mm_madd_epi16(scale_0, p16_0);
- p16_1 = _mm_madd_epi16(scale_0, p16_1);
- q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4);
- q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4);
- q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
- q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
- q5_0 = _mm_add_epi8(q5l_0, q5h_0);
- q5_1 = _mm_add_epi8(q5l_1, q5h_1);
- hmask = _mm_slli_epi16(hmask, 1);
- q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0);
- __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1);
- p16_2 = _mm_madd_epi16(scale_1, p16_2);
- p16_3 = _mm_madd_epi16(scale_1, p16_3);
- sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
- sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
- }
- __m256 vd = _mm256_set1_ps(d);
- __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
- acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
- }
- *s = hsum_float_8(acc) + summs;
- #elif defined __riscv_v_intrinsic
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
- float sumf = 0;
- float sums = 0.0;
- size_t vl;
- for (int i = 0; i < nb; ++i) {
- vl = 8;
- const uint8_t * restrict q5 = x[i].qs;
- const uint8_t * restrict hm = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
- vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
- vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
- vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
- vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
- vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
- vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
- sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
- vl = 32;
- int32_t aux32 = 0;
- int is = 0;
- uint8_t m = 1;
- vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
- vuint8m1_t vqh = __riscv_vle8_v_u8m1(hm, vl);
- for (int j = 0; j < QK_K/64; ++j) {
- // load Q5 and Q8
- vuint8m1_t q5_x = __riscv_vle8_v_u8m1(q5, vl);
- vint8m1_t q8_y1 = __riscv_vle8_v_i8m1(q8, vl);
- vint8m1_t q8_y2 = __riscv_vle8_v_i8m1(q8+32, vl);
- // compute mask for addition
- vint8m1_t q5_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q5_x, 0x0F, vl));
- vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
- vbool8_t vmask_1 = __riscv_vmsne_vx_u8m1_b8(qh_m1, 0, vl);
- vint8m1_t q5_m1 = __riscv_vadd_vx_i8m1_m(vmask_1, q5_a, 16, vl);
- m <<= 1;
- vint8m1_t q5_l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q5_x, 0x04, vl));
- vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
- vbool8_t vmask_2 = __riscv_vmsne_vx_u8m1_b8(qh_m2, 0, vl);
- vint8m1_t q5_m2 = __riscv_vadd_vx_i8m1_m(vmask_2, q5_l, 16, vl);
- m <<= 1;
- vint16m2_t v0 = __riscv_vwmul_vv_i16m2(q5_m1, q8_y1, vl);
- vint16m2_t v1 = __riscv_vwmul_vv_i16m2(q5_m2, q8_y2, vl);
- vint32m4_t vs1 = __riscv_vwmul_vx_i32m4(v0, scales[is++], vl);
- vint32m4_t vs2 = __riscv_vwmul_vx_i32m4(v1, scales[is++], vl);
- vint32m1_t vacc1 = __riscv_vredsum_vs_i32m4_i32m1(vs1, vzero, vl);
- vint32m1_t vacc2 = __riscv_vredsum_vs_i32m4_i32m1(vs2, vzero, vl);
- aux32 += __riscv_vmv_x_s_i32m1_i32(vacc1) + __riscv_vmv_x_s_i32m1_i32(vacc2);
- q5 += 32; q8 += 64;
- }
- vfloat32m1_t vaux = __riscv_vfmul_vf_f32m1(__riscv_vfmv_v_f_f32m1(aux32, 1), d, 1);
- sums += __riscv_vfmv_f_s_f32m1_f32(vaux);
- }
- *s = sumf+sums;
- #else
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].qs;
- const uint8_t * restrict hm = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * restrict a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #else
- void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
- assert(n % QK_K == 0);
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bx);
- UNUSED(by);
- UNUSED(bs);
- const block_q5_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- const uint8x16_t m4b = vdupq_n_u8(0xf);
- const uint8x16_t mh = vdupq_n_u8(16);
- const int32x4_t mzero = vdupq_n_s32(0);
- ggml_int8x16x4_t q5bytes;
- ggml_uint8x16x4_t q5h;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * (float)x[i].d;
- const int8_t * sc = x[i].scales;
- const uint8_t * restrict q5 = x[i].qs;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const uint8x8_t qhbits = vld1_u8(qh);
- const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5);
- const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
- const uint8x16_t htmp = vcombine_u8(qhbits, vshr_n_u8(qhbits, 1));
- q5h.val[0] = vbicq_u8(mh, vshlq_n_u8(htmp, 4));
- q5h.val[1] = vbicq_u8(mh, vshlq_n_u8(htmp, 2));
- q5h.val[2] = vbicq_u8(mh, htmp);
- q5h.val[3] = vbicq_u8(mh, vshrq_n_u8(htmp, 2));
- q5bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[0], m4b)), vreinterpretq_s8_u8(q5h.val[0]));
- q5bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[1], m4b)), vreinterpretq_s8_u8(q5h.val[1]));
- q5bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[0], 4)), vreinterpretq_s8_u8(q5h.val[2]));
- q5bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[1], 4)), vreinterpretq_s8_u8(q5h.val[3]));
- int32_t sumi1 = sc[0] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]));
- int32_t sumi2 = sc[1] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[1], q8bytes.val[1]));
- int32_t sumi3 = sc[2] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]));
- int32_t sumi4 = sc[3] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[3], q8bytes.val[3]));
- sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
- }
- *s = sumf;
- #elif defined __AVX2__
- const __m256i m4 = _mm256_set1_epi8(0xF);
- const __m256i mone = _mm256_set1_epi8(1);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q5 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
- const __m256i scale_l = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[1]), _mm_set1_epi16(x[i].scales[0]));
- const __m256i scale_h = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[3]), _mm_set1_epi16(x[i].scales[2]));
- int64_t aux64;
- memcpy(&aux64, x[i].qh, 8);
- const __m128i haux128 = _mm_set_epi64x(aux64 >> 1, aux64);
- const __m256i haux256 = MM256_SET_M128I(_mm_srli_epi16(haux128, 2), haux128);
- const __m256i q5h_0 = _mm256_slli_epi16(_mm256_andnot_si256(haux256, mone), 4);
- const __m256i q5h_1 = _mm256_slli_epi16(_mm256_andnot_si256(_mm256_srli_epi16(haux256, 4), mone), 4);
- const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
- const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- const __m256i p16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5l_0, q8_0));
- const __m256i p16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5l_1, q8_1));
- const __m256i s16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5h_0, q8_0));
- const __m256i s16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5h_1, q8_1));
- const __m256i dot = _mm256_sub_epi32(_mm256_add_epi32(p16_0, p16_1), _mm256_add_epi32(s16_0, s16_1));
- acc = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(dot), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __AVX__
- const __m128i m4 = _mm_set1_epi8(0xF);
- const __m128i mone = _mm_set1_epi8(1);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q5 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
- const __m128i scale_0 = _mm_set1_epi16(x[i].scales[0]);
- const __m128i scale_1 = _mm_set1_epi16(x[i].scales[1]);
- const __m128i scale_2 = _mm_set1_epi16(x[i].scales[2]);
- const __m128i scale_3 = _mm_set1_epi16(x[i].scales[3]);
- int64_t aux64;
- memcpy(&aux64, x[i].qh, 8);
- const __m128i haux128_0 = _mm_set_epi64x(aux64 >> 1, aux64);
- const __m128i haux128_1 = _mm_srli_epi16(haux128_0, 2);
- const __m128i q5h_0 = _mm_slli_epi16(_mm_andnot_si128(haux128_0, mone), 4);
- const __m128i q5h_1 = _mm_slli_epi16(_mm_andnot_si128(haux128_1, mone), 4);
- const __m128i q5h_2 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_0, 4), mone), 4);
- const __m128i q5h_3 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_1, 4), mone), 4);
- const __m128i q5l_0 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 0), m4);
- const __m128i q5l_1 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 1), m4);
- const __m128i q5l_2 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 0), 4), m4);
- const __m128i q5l_3 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 1), 4), m4);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- const __m128i p16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5l_0, _mm256_extractf128_si256(q8_0, 0)));
- const __m128i p16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5l_1, _mm256_extractf128_si256(q8_0, 1)));
- const __m128i p16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5l_2, _mm256_extractf128_si256(q8_1, 0)));
- const __m128i p16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5l_3, _mm256_extractf128_si256(q8_1, 1)));
- const __m128i s16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5h_0, _mm256_extractf128_si256(q8_0, 0)));
- const __m128i s16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5h_1, _mm256_extractf128_si256(q8_0, 1)));
- const __m128i s16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5h_2, _mm256_extractf128_si256(q8_1, 0)));
- const __m128i s16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5h_3, _mm256_extractf128_si256(q8_1, 1)));
- const __m128i dot_0 = _mm_sub_epi32(_mm_add_epi32(p16_0, p16_2), _mm_add_epi32(s16_0, s16_2));
- const __m128i dot_1 = _mm_sub_epi32(_mm_add_epi32(p16_1, p16_3), _mm_add_epi32(s16_1, s16_3));
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(dot_1, dot_0))), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __riscv_v_intrinsic
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * (float)x[i].d;
- const int8_t * sc = x[i].scales;
- const uint8_t * restrict q5 = x[i].qs;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
- // load qh
- vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(qh, 8);
- vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
- size_t vl = 16;
- // combine both qh_1 and qh_2
- vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
- vuint8mf2_t qh_h0 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
- vuint8mf2_t qh_h1 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), vl), 16, vl);
- vuint8mf2_t qh_h2 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(qh_x, vl), 16, vl);
- vuint8mf2_t qh_h3 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
- vint8mf2_t qh_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h0);
- vint8mf2_t qh_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h1);
- vint8mf2_t qh_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h2);
- vint8mf2_t qh_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h3);
- // load q5
- vuint8mf2_t q5_x1 = __riscv_vle8_v_u8mf2(q5, vl);
- vuint8mf2_t q5_x2 = __riscv_vle8_v_u8mf2(q5+16, vl);
- vint8mf2_t q5s_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x1, 0xF, vl));
- vint8mf2_t q5s_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x2, 0xF, vl));
- vint8mf2_t q5s_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x1, 0x4, vl));
- vint8mf2_t q5s_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x2, 0x4, vl));
- vint8mf2_t q5_0 = __riscv_vsub_vv_i8mf2(q5s_0, qh_0, vl);
- vint8mf2_t q5_1 = __riscv_vsub_vv_i8mf2(q5s_1, qh_1, vl);
- vint8mf2_t q5_2 = __riscv_vsub_vv_i8mf2(q5s_2, qh_2, vl);
- vint8mf2_t q5_3 = __riscv_vsub_vv_i8mf2(q5s_3, qh_3, vl);
- // load Q8 and multiply it with Q5
- vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q5_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
- vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q5_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
- vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q5_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
- vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q5_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
- vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
- vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
- vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
- vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
- int32_t sumi1 = sc[0] * __riscv_vmv_x_s_i32m1_i32(vs_0);
- int32_t sumi2 = sc[1] * __riscv_vmv_x_s_i32m1_i32(vs_1);
- int32_t sumi3 = sc[2] * __riscv_vmv_x_s_i32m1_i32(vs_2);
- int32_t sumi4 = sc[3] * __riscv_vmv_x_s_i32m1_i32(vs_3);
- sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
- }
- *s = sumf;
- #else
- int8_t aux8[QK_K];
- int16_t aux16[16];
- float sums [8];
- memset(sums, 0, 8*sizeof(float));
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].qs;
- const uint8_t * restrict hm = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- int8_t * restrict a = aux8;
- for (int l = 0; l < 32; ++l) {
- a[l+ 0] = q4[l] & 0xF;
- a[l+32] = q4[l] >> 4;
- }
- for (int is = 0; is < 8; ++is) {
- uint8_t m = 1 << is;
- for (int l = 0; l < 8; ++l) a[8*is + l] -= (hm[l] & m ? 0 : 16);
- }
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const int8_t * restrict sc = x[i].scales;
- for (int j = 0; j < QK_K/16; ++j) {
- const float dl = d * sc[j];
- for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[8+l]);
- q8 += 16; a += 16;
- }
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #endif
- #if QK_K == 256
- void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
- assert(n % QK_K == 0);
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bx);
- UNUSED(by);
- UNUSED(bs);
- const block_q6_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- float sum = 0;
- const uint8x16_t m4b = vdupq_n_u8(0xF);
- const int32x4_t vzero = vdupq_n_s32(0);
- //const int8x16_t m32s = vdupq_n_s8(32);
- const uint8x16_t mone = vdupq_n_u8(3);
- ggml_int8x16x4_t q6bytes;
- ggml_uint8x16x4_t q6h;
- for (int i = 0; i < nb; ++i) {
- const float d_all = GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * restrict q6 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const int8_t * restrict scale = x[i].scales;
- const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
- const int8x16_t scales = vld1q_s8(scale);
- const ggml_int16x8x2_t q6scales = {{vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}};
- const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])),
- vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))),
- vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])),
- vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1]))));
- int32_t isum_mins = vaddvq_s32(prod);
- int32_t isum = 0;
- for (int j = 0; j < QK_K/128; ++j) {
- ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32;
- ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64;
- ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
- q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
- q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
- uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2);
- q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- shifted = vshrq_n_u8(qhbits.val[1], 2);
- q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
- //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
- //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s);
- //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s);
- q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0]));
- q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1]));
- q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2]));
- q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3]));
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
- vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
- vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
- vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
- scale += 4;
- q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
- shifted = vshrq_n_u8(qhbits.val[0], 4);
- q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- shifted = vshrq_n_u8(qhbits.val[1], 4);
- q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- shifted = vshrq_n_u8(qhbits.val[0], 6);
- q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- shifted = vshrq_n_u8(qhbits.val[1], 6);
- q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s);
- //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s);
- //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s);
- //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s);
- q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0]));
- q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1]));
- q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2]));
- q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3]));
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
- vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
- vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
- vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
- scale += 4;
- }
- //sum += isum * d_all * y[i].d;
- sum += d_all * y[i].d * (isum - 32 * isum_mins);
- }
- *s = sum;
- #elif defined __AVX2__
- const __m256i m4 = _mm256_set1_epi8(0xF);
- const __m256i m2 = _mm256_set1_epi8(3);
- const __m256i m32s = _mm256_set1_epi8(32);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * restrict q4 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
- __m256i sumi = _mm256_setzero_si256();
- int is = 0;
- for (int j = 0; j < QK_K/128; ++j) {
- const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0));
- const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1));
- const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2));
- const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3));
- is += 4;
- const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
- const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
- const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32;
- const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4);
- const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4);
- const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4);
- const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4);
- const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
- const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1);
- const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2);
- const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
- __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
- __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2);
- __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3);
- __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
- __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
- __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2);
- __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3);
- p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
- p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
- p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
- p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
- p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
- p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2);
- p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3);
- sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
- sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3));
- }
- acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __AVX__
- const __m128i m4 = _mm_set1_epi8(0xF);
- const __m128i m3 = _mm_set1_epi8(3);
- const __m128i m32s = _mm_set1_epi8(32);
- const __m128i m2 = _mm_set1_epi8(2);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * restrict q4 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
- __m128i sumi_0 = _mm_setzero_si128();
- __m128i sumi_1 = _mm_setzero_si128();
- __m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
- for (int j = 0; j < QK_K/128; ++j) {
- const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
- const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
- const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4);
- const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4);
- const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4);
- const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4);
- const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4);
- const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4);
- const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4);
- const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4);
- const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
- const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
- const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
- const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
- const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0);
- const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1);
- const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2);
- const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3);
- const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4);
- const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5);
- const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6);
- const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7);
- const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- __m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0);
- __m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1);
- __m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2);
- __m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3);
- __m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4);
- __m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5);
- __m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6);
- __m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7);
- __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0);
- __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1);
- __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2);
- __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3);
- __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4);
- __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5);
- __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6);
- __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7);
- p16_0 = _mm_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm_sub_epi16(p16_1, q8s_1);
- p16_2 = _mm_sub_epi16(p16_2, q8s_2);
- p16_3 = _mm_sub_epi16(p16_3, q8s_3);
- p16_4 = _mm_sub_epi16(p16_4, q8s_4);
- p16_5 = _mm_sub_epi16(p16_5, q8s_5);
- p16_6 = _mm_sub_epi16(p16_6, q8s_6);
- p16_7 = _mm_sub_epi16(p16_7, q8s_7);
- const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi8(shuffle, m2);
- const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi8(shuffle, m2);
- const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi8(shuffle, m2);
- const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi8(shuffle, m2);
- p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
- p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
- p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
- p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
- p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4);
- p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5);
- p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6);
- p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7);
- sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
- sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
- sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6));
- sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7));
- }
- __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __riscv_v_intrinsic
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint8_t * restrict q6 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const int8_t * restrict scale = x[i].scales;
- size_t vl;
- vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
- int sum_t = 0;
- int is = 0;
- for (int j = 0; j < QK_K/128; ++j) {
- vl = 32;
- // load qh
- vuint8m1_t qh_x = __riscv_vle8_v_u8m1(qh, vl);
- // load Q6
- vuint8m1_t q6_0 = __riscv_vle8_v_u8m1(q6, vl);
- vuint8m1_t q6_1 = __riscv_vle8_v_u8m1(q6+32, vl);
- vuint8m1_t q6a_0 = __riscv_vand_vx_u8m1(q6_0, 0x0F, vl);
- vuint8m1_t q6a_1 = __riscv_vand_vx_u8m1(q6_1, 0x0F, vl);
- vuint8m1_t q6s_0 = __riscv_vsrl_vx_u8m1(q6_0, 0x04, vl);
- vuint8m1_t q6s_1 = __riscv_vsrl_vx_u8m1(q6_1, 0x04, vl);
- vuint8m1_t qh_0 = __riscv_vand_vx_u8m1(qh_x, 0x03, vl);
- vuint8m1_t qh_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x2, vl), 0x03 , vl);
- vuint8m1_t qh_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x4, vl), 0x03 , vl);
- vuint8m1_t qh_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x6, vl), 0x03 , vl);
- vuint8m1_t qhi_0 = __riscv_vor_vv_u8m1(q6a_0, __riscv_vsll_vx_u8m1(qh_0, 0x04, vl), vl);
- vuint8m1_t qhi_1 = __riscv_vor_vv_u8m1(q6a_1, __riscv_vsll_vx_u8m1(qh_1, 0x04, vl), vl);
- vuint8m1_t qhi_2 = __riscv_vor_vv_u8m1(q6s_0, __riscv_vsll_vx_u8m1(qh_2, 0x04, vl), vl);
- vuint8m1_t qhi_3 = __riscv_vor_vv_u8m1(q6s_1, __riscv_vsll_vx_u8m1(qh_3, 0x04, vl), vl);
- vint8m1_t a_0 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_0), 32, vl);
- vint8m1_t a_1 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_1), 32, vl);
- vint8m1_t a_2 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_2), 32, vl);
- vint8m1_t a_3 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_3), 32, vl);
- // load Q8 and take product
- vint16m2_t va_q_0 = __riscv_vwmul_vv_i16m2(a_0, __riscv_vle8_v_i8m1(q8, vl), vl);
- vint16m2_t va_q_1 = __riscv_vwmul_vv_i16m2(a_1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
- vint16m2_t va_q_2 = __riscv_vwmul_vv_i16m2(a_2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
- vint16m2_t va_q_3 = __riscv_vwmul_vv_i16m2(a_3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
- vl = 16;
- vint32m2_t vaux_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 0), scale[is+0], vl);
- vint32m2_t vaux_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 1), scale[is+1], vl);
- vint32m2_t vaux_2 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 0), scale[is+2], vl);
- vint32m2_t vaux_3 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 1), scale[is+3], vl);
- vint32m2_t vaux_4 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 0), scale[is+4], vl);
- vint32m2_t vaux_5 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 1), scale[is+5], vl);
- vint32m2_t vaux_6 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 0), scale[is+6], vl);
- vint32m2_t vaux_7 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 1), scale[is+7], vl);
- vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_0, vaux_1, vl), vzero, vl);
- vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_2, vaux_3, vl), isum0, vl);
- vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_4, vaux_5, vl), isum1, vl);
- vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_6, vaux_7, vl), isum2, vl);
- sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
- q6 += 64; qh += 32; q8 += 128; is=8;
- }
- sumf += d * sum_t;
- }
- *s = sumf;
- #else
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * restrict a = aux8;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- }
- a += 128;
- q4 += 64;
- qh += 32;
- }
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- int scale = x[i].scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #else
- void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
- assert(n % QK_K == 0);
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bx);
- UNUSED(by);
- UNUSED(bs);
- const block_q6_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- float sum = 0;
- const uint8x16_t m4b = vdupq_n_u8(0xF);
- const int8x16_t m32s = vdupq_n_s8(32);
- const int32x4_t vzero = vdupq_n_s32(0);
- const uint8x16_t mone = vdupq_n_u8(3);
- ggml_int8x16x4_t q6bytes;
- ggml_uint8x16x4_t q6h;
- for (int i = 0; i < nb; ++i) {
- const float d_all = (float)x[i].d;
- const uint8_t * restrict q6 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const int8_t * restrict scale = x[i].scales;
- int32_t isum = 0;
- uint8x16_t qhbits = vld1q_u8(qh);
- ggml_uint8x16x2_t q6bits = ggml_vld1q_u8_x2(q6);
- ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
- q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits), 4);
- uint8x16_t shifted = vshrq_n_u8(qhbits, 2);
- q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- shifted = vshrq_n_u8(qhbits, 4);
- q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- shifted = vshrq_n_u8(qhbits, 6);
- q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
- q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
- q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[2])), m32s);
- q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[3])), m32s);
- isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
- vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
- vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
- vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
- sum += isum * d_all * y[i].d;
- }
- *s = sum;
- #elif defined __AVX2__
- const __m256i m4 = _mm256_set1_epi8(0xF);
- const __m256i m2 = _mm256_set1_epi8(3);
- const __m256i m32s = _mm256_set1_epi8(32);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * restrict q4 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
- const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
- const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
- const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
- __m256i sumi = _mm256_setzero_si256();
- const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
- const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
- const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
- const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
- const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4);
- const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4);
- const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
- const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_1);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
- __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
- __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
- __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
- p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
- p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
- p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
- sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
- acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __AVX__
- const __m128i m4 = _mm_set1_epi8(0xF);
- const __m128i m2 = _mm_set1_epi8(3);
- const __m128i m32s = _mm_set1_epi8(32);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * restrict q4 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
- const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
- const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
- const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
- __m128i sumi_0 = _mm_setzero_si128();
- __m128i sumi_1 = _mm_setzero_si128();
- const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
- const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
- const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
- const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
- const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH, m2), 4);
- const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 2), m2), 4);
- const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 4), m2), 4);
- const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 6), m2), 4);
- const __m128i q4_0 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 0), m4), q4h_0);
- const __m128i q4_1 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 1), m4), q4h_1);
- const __m128i q4_2 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 0), 4), m4), q4h_2);
- const __m128i q4_3 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 1), 4), m4), q4h_3);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- __m128i q8s_0 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 0));
- __m128i q8s_1 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 1));
- __m128i q8s_2 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 0));
- __m128i q8s_3 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 1));
- __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
- __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
- __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
- __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
- p16_0 = _mm_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm_sub_epi16(p16_1, q8s_1);
- p16_2 = _mm_sub_epi16(p16_2, q8s_2);
- p16_3 = _mm_sub_epi16(p16_3, q8s_3);
- p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
- p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
- p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
- p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
- sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
- sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi_1, sumi_0))), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __riscv_v_intrinsic
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const float d_all = (float)x[i].d;
- const uint8_t * restrict q6 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const int8_t * restrict scale = x[i].scales;
- int32_t isum = 0;
- size_t vl = 16;
- vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
- // load Q6
- vuint8mf2_t q6_0 = __riscv_vle8_v_u8mf2(q6, vl);
- vuint8mf2_t q6_1 = __riscv_vle8_v_u8mf2(q6+16, vl);
- // load qh
- vuint8mf2_t qh_x = __riscv_vle8_v_u8mf2(qh, vl);
- vuint8mf2_t qh0 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
- qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
- vuint8mf2_t qh1 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
- qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
- vuint8mf2_t qh2 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
- qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
- vuint8mf2_t qh3 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
- vuint8mf2_t q6h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_0, 0xF, vl), qh0, vl);
- vuint8mf2_t q6h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_1, 0xF, vl), qh1, vl);
- vuint8mf2_t q6h_2 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_0, 0x4, vl), qh2, vl);
- vuint8mf2_t q6h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_1, 0x4, vl), qh3, vl);
- vint8mf2_t q6v_0 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_0), 32, vl);
- vint8mf2_t q6v_1 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_1), 32, vl);
- vint8mf2_t q6v_2 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_2), 32, vl);
- vint8mf2_t q6v_3 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_3), 32, vl);
- // load Q8 and take product
- vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q6v_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
- vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q6v_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
- vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q6v_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
- vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q6v_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
- vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
- vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
- vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
- vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
- isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scale[0];
- isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scale[1];
- isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scale[2];
- isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scale[3];
- sumf += isum * d_all * y[i].d;
- }
- *s = sumf;
- #else
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * restrict a = aux8;
- for (int l = 0; l < 16; ++l) {
- a[l+ 0] = (int8_t)((q4[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- a[l+16] = (int8_t)((q4[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- a[l+32] = (int8_t)((q4[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- a[l+48] = (int8_t)((q4[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- }
- int is = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- int scale = x[i].scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #endif
- static const int8_t keven_signs_q2xs[1024] = {
- 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1,
- 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1,
- 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1,
- 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1,
- 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1,
- 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1,
- 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1,
- 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1,
- 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1,
- 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1,
- 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1,
- 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1,
- 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1,
- 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1,
- 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1,
- 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1,
- 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1,
- 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1,
- 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1,
- 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1,
- 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1,
- 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1,
- 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1,
- 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1,
- 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1,
- 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
- 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,
- 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1,
- 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1,
- 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1,
- 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,
- 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1,
- };
- void ggml_vec_dot_iq2_xxs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
- assert(n % QK_K == 0);
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bx);
- UNUSED(by);
- UNUSED(bs);
- const block_iq2_xxs * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #if defined(__ARM_NEON)
- const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
- uint32_t aux32[4];
- const uint8_t * aux8 = (const uint8_t *)aux32;
- ggml_int8x16x4_t q2u;
- ggml_int8x16x4_t q2s;
- ggml_int8x16x4_t q8b;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint16_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- float sumf1 = 0, sumf2 = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
- q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
- memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8;
- q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 0])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 1])));
- q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 2])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 3])));
- q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 8])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 9])));
- q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[10])), vld1_s8((const void *)(iq2xxs_grid + aux8[11])));
- q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127))));
- q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127))));
- q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 7) & 127))));
- q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 21) & 127))));
- q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]);
- q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]);
- q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]);
- q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]);
- const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]);
- const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]);
- sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28));
- sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28));
- }
- sumf += d*(sumf1 + sumf2);
- }
- *s = 0.25f * sumf;
- #elif defined(__AVX2__)
- const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
- uint32_t aux32[4];
- const uint8_t * aux8 = (const uint8_t *)aux32;
- __m256 accumf = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint16_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- __m256i sumi1 = _mm256_setzero_si256();
- __m256i sumi2 = _mm256_setzero_si256();
- for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
- const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
- memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8;
- const __m256i q2_1 = _mm256_set_epi64x(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]);
- const __m256i q2_2 = _mm256_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]);
- const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127],
- signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]);
- const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127],
- signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]);
- const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1);
- const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2);
- const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
- const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
- const uint16_t ls1 = aux32[1] >> 28;
- const uint16_t ls2 = aux32[3] >> 28;
- const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
- const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
- sumi1 = _mm256_add_epi32(sumi1, p1);
- sumi2 = _mm256_add_epi32(sumi2, p2);
- }
- accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
- }
- *s = 0.125f * hsum_float_8(accumf);
- #else
- uint32_t aux32[2];
- const uint8_t * aux8 = (const uint8_t *)aux32;
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint16_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- memcpy(aux32, q2, 2*sizeof(uint32_t));
- q2 += 4;
- const uint32_t ls = 2*(aux32[1] >> 28) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
- const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls;
- }
- sumf += d * bsum;
- }
- *s = 0.125f * sumf;
- #endif
- }
- void ggml_vec_dot_iq2_xs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
- assert(n % QK_K == 0);
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bx);
- UNUSED(by);
- UNUSED(bs);
- const block_iq2_xs * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #if defined(__ARM_NEON)
- const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
- ggml_int8x16x4_t q2u;
- ggml_int8x16x4_t q2s;
- ggml_int8x16x4_t q8b;
- int32x4x4_t scales32;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint16_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint8x8_t scales8 = vld1_u8(x[i].scales);
- const uint8x8_t scales_l = vand_u8(scales8, vdup_n_u8(0xf));
- const uint8x8_t scales_h = vshr_n_u8(scales8, 4);
- uint8x16_t scales = vcombine_u8(vzip1_u8(scales_l, scales_h), vzip2_u8(scales_l, scales_h));
- scales = vaddq_u8(vshlq_n_u8(scales, 1), vdupq_n_u8(1));
- const uint16x8_t scales1 = vmovl_u8(vget_low_u8(scales));
- const uint16x8_t scales2 = vmovl_u8(vget_high_u8(scales));
- scales32.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales1)));
- scales32.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales1)));
- scales32.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales2)));
- scales32.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales2)));
- int32x4_t sumi = vdupq_n_s32(0);
- for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
- q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
- q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[0] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[1] & 511))));
- q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[2] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[3] & 511))));
- q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[4] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[5] & 511))));
- q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[6] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[7] & 511))));
- q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[0] >> 9))), vld1_s8((const void *)(signs64 + (q2[1] >> 9))));
- q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[2] >> 9))), vld1_s8((const void *)(signs64 + (q2[3] >> 9))));
- q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[4] >> 9))), vld1_s8((const void *)(signs64 + (q2[5] >> 9))));
- q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[6] >> 9))), vld1_s8((const void *)(signs64 + (q2[7] >> 9))));
- q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]);
- q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]);
- q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]);
- q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]);
- const int32x4_t p1 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]);
- const int32x4_t p2 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[1], q8b.val[1]);
- const int32x4_t p3 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]);
- const int32x4_t p4 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[3], q8b.val[3]);
- const int32x4_t p = vpaddq_s32(vpaddq_s32(p1, p2), vpaddq_s32(p3, p4));
- sumi = vmlaq_s32(sumi, p, scales32.val[ib64]);
- q2 += 8;
- }
- sumf += d*vaddvq_s32(sumi);
- }
- *s = 0.125f * sumf;
- #elif defined(__AVX2__)
- const __m128i m4 = _mm_set1_epi8(0xf);
- const __m128i m1 = _mm_set1_epi8(1);
- const __m256i m511 = _mm256_set1_epi16(511);
- const __m256i mone = _mm256_set1_epi8(1);
- static const uint8_t k_bit_helper[32] = {
- 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
- 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
- };
- static const char block_sign_shuffle_mask_1[32] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- };
- static const char block_sign_shuffle_mask_2[32] = {
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
- 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
- };
- static const uint8_t bit_selector_mask_bytes[32] = {
- 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
- 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
- };
- const __m256i bit_helper = _mm256_loadu_si256((const __m256i*)k_bit_helper);
- const __m256i bit_selector_mask = _mm256_loadu_si256((const __m256i*)bit_selector_mask_bytes);
- const __m256i block_sign_shuffle_1 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_1);
- const __m256i block_sign_shuffle_2 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_2);
- uint64_t aux64;
- // somewhat hacky, but gives a significant boost in performance
- __m256i aux_gindex;
- const uint16_t * gindex = (const uint16_t *)&aux_gindex;
- __m256 accumf = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint16_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- memcpy(&aux64, x[i].scales, 8);
- __m128i stmp = _mm_set1_epi64x(aux64);
- stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4));
- const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1);
- __m256i sumi1 = _mm256_setzero_si256();
- __m256i sumi2 = _mm256_setzero_si256();
- for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) {
- const __m256i q2_data = _mm256_loadu_si256((const __m256i*)q2); q2 += 16;
- aux_gindex = _mm256_and_si256(q2_data, m511);
- const __m256i partial_sign_bits = _mm256_srli_epi16(q2_data, 9);
- const __m256i partial_sign_bits_upper = _mm256_srli_epi16(q2_data, 13);
- const __m256i partial_sign_bits_for_counting = _mm256_xor_si256(partial_sign_bits, partial_sign_bits_upper);
- const __m256i odd_bits = _mm256_shuffle_epi8(bit_helper, partial_sign_bits_for_counting);
- const __m256i full_sign_bits = _mm256_or_si256(partial_sign_bits, odd_bits);
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
- const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
- const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
- const __m256i q8_4 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
- const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]],
- iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]);
- const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]],
- iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]);
- const __m256i q2_3 = _mm256_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]],
- iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]);
- const __m256i q2_4 = _mm256_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]],
- iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]);
- const __m128i full_signs_l = _mm256_castsi256_si128(full_sign_bits);
- const __m128i full_signs_h = _mm256_extractf128_si256(full_sign_bits, 1);
- const __m256i full_signs_1 = _mm256_set_m128i(full_signs_l, full_signs_l);
- const __m256i full_signs_2 = _mm256_set_m128i(full_signs_h, full_signs_h);
- __m256i signs;
- signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_1);
- signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
- const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone));
- signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_2);
- signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
- const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone));
- signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_1);
- signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
- const __m256i q8s_3 = _mm256_sign_epi8(q8_3, _mm256_or_si256(signs, mone));
- signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_2);
- signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
- const __m256i q8s_4 = _mm256_sign_epi8(q8_4, _mm256_or_si256(signs, mone));
- const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
- const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
- const __m256i dot3 = _mm256_maddubs_epi16(q2_3, q8s_3);
- const __m256i dot4 = _mm256_maddubs_epi16(q2_4, q8s_4);
- const __m256i sc1 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0)));
- const __m256i sc2 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1)));
- const __m256i sc3 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2)));
- const __m256i sc4 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3)));
- sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot1, sc1));
- sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot2, sc2));
- sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot3, sc3));
- sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot4, sc4));
- }
- accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
- }
- *s = 0.125f * hsum_float_8(accumf);
- #else
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint16_t * restrict q2 = x[i].qs;
- const uint8_t * restrict sc = x[i].scales;
- const int8_t * restrict q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1;
- const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 2; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
- const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls1;
- sumi = 0;
- for (int l = 2; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
- const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls2;
- q2 += 4;
- }
- sumf += d * bsum;
- }
- *s = 0.125f * sumf;
- #endif
- }
- // TODO
- void ggml_vec_dot_iq3_xxs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
- assert(n % QK_K == 0);
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bx);
- UNUSED(by);
- UNUSED(bs);
- const block_iq3_xxs * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #if defined(__ARM_NEON)
- const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
- uint32_t aux32[2];
- ggml_int8x16x4_t q3s;
- ggml_int8x16x4_t q8b;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint8_t * restrict q3 = x[i].qs;
- const uint8_t * restrict gas = x[i].qs + QK_K/4;
- const int8_t * restrict q8 = y[i].qs;
- float sumf1 = 0, sumf2 = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
- q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
- memcpy(aux32, gas, 2*sizeof(uint32_t)); gas += 2*sizeof(uint32_t);
- const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]);
- const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]);
- const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]);
- const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]);
- q3 += 16;
- q3s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 7) & 127))));
- q3s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 21) & 127))));
- q3s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127))));
- q3s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127))));
- q3s.val[0] = vmulq_s8(q3s.val[0], vreinterpretq_s8_u32(aux32x4_0));
- q3s.val[1] = vmulq_s8(q3s.val[1], vreinterpretq_s8_u32(aux32x4_1));
- q3s.val[2] = vmulq_s8(q3s.val[2], vreinterpretq_s8_u32(aux32x4_2));
- q3s.val[3] = vmulq_s8(q3s.val[3], vreinterpretq_s8_u32(aux32x4_3));
- const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]);
- const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]);
- sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[0] >> 28));
- sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[1] >> 28));
- }
- sumf += d*(sumf1 + sumf2);
- }
- *s = 0.5f * sumf;
- #elif defined(__AVX2__)
- const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
- uint32_t aux32[2];
- __m256 accumf = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint8_t * restrict q3 = x[i].qs;
- const uint8_t * restrict gas = x[i].qs + QK_K/4;
- const int8_t * restrict q8 = y[i].qs;
- __m256i sumi1 = _mm256_setzero_si256();
- __m256i sumi2 = _mm256_setzero_si256();
- for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
- const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
- const __m256i q2_1 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]],
- iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]);
- q3 += 8;
- const __m256i q2_2 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]],
- iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]);
- q3 += 8;
- memcpy(aux32, gas, 8); gas += 8;
- const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127],
- signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]);
- const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127],
- signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]);
- const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1);
- const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2);
- const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
- const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
- const uint16_t ls1 = aux32[0] >> 28;
- const uint16_t ls2 = aux32[1] >> 28;
- const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
- const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
- sumi1 = _mm256_add_epi32(sumi1, p1);
- sumi2 = _mm256_add_epi32(sumi2, p2);
- }
- accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
- }
- *s = 0.25f * hsum_float_8(accumf);
- #else
- uint32_t aux32;
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint8_t * restrict q3 = x[i].qs;
- const uint8_t * restrict gas = x[i].qs + QK_K/4;
- const int8_t * restrict q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t);
- const uint32_t ls = 2*(aux32 >> 28) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]);
- const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]);
- const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
- for (int j = 0; j < 4; ++j) {
- sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1);
- sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1);
- }
- q8 += 8;
- }
- q3 += 8;
- bsum += sumi * ls;
- }
- sumf += d * bsum;
- }
- *s = 0.25f * sumf;
- #endif
- }
- #ifdef __AVX2__
- static inline __m256i mul_add_epi8(const __m256i x, const __m256i y) {
- const __m256i ax = _mm256_sign_epi8(x, x);
- const __m256i sy = _mm256_sign_epi8(y, x);
- return _mm256_maddubs_epi16(ax, sy);
- }
- #endif
- void ggml_vec_dot_iq1_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
- assert(n % QK_K == 0);
- assert(nrc == 1);
- UNUSED(nrc);
- UNUSED(bx);
- UNUSED(by);
- UNUSED(bs);
- const block_iq1_s * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #if defined __ARM_NEON
- const uint8x16_t m8 = vdupq_n_u8(0x08);
- const uint8x16_t m7 = vdupq_n_u8(0x07);
- const uint8x16_t m1 = vdupq_n_u8(0x01);
- const int32x4_t vzero = vdupq_n_s32(0);
- uint16_t gindex[8];
- uint16x8x2_t vindex;
- int8x16x4_t q1b;
- int8x16x4_t q8b;
- uint16x8x4_t scales;
- int32x4x2_t sumi;
- int32x4x2_t dotq;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const int8_t * q8 = y[i].qs;
- const uint8_t * qs = x[i].qs;
- const uint8_t * sc = x[i].scales;
- sumi.val[0] = sumi.val[1] = vzero;
- for (int i128 = 0; i128 < QK_K/128; ++i128) {
- const uint8x16_t ql = vld1q_u8(qs); qs += 16;
- const uint8x8_t tm1 = vld1_u8 (sc); sc += 8;
- const uint8x8_t tm2 = vshr_n_u8(tm1, 4);
- const uint8x16_t qh = vcombine_u8(vzip1_u8(tm1, tm2), vzip2_u8(tm1, tm2));
- const uint8x16_t hbit = vandq_u8(qh, m8);
- vindex.val[0] = vorrq_u16(vmovl_u8(vget_low_u8 (ql)), vshlq_n_u16(vmovl_u8(vget_low_u8 (hbit)), 5));
- vindex.val[1] = vorrq_u16(vmovl_u8(vget_high_u8(ql)), vshlq_n_u16(vmovl_u8(vget_high_u8(hbit)), 5));
- const uint8x16_t scales8 = vorrq_u8(vshlq_n_u8(vandq_u8(qh, m7), 1), m1);
- scales.val[0] = vmovl_u8(vget_low_u8 (scales8));
- scales.val[1] = vmovl_u8(vget_high_u8 (scales8));
- for (int l = 0; l < 2; ++l) {
- vst1q_u16(gindex+0, vindex.val[l]);
- q1b.val[0] = vcombine_s8(vld1_s8((const void *)(iq1s_grid+gindex[0])), vld1_s8((const void *)(iq1s_grid+gindex[1])));
- q1b.val[1] = vcombine_s8(vld1_s8((const void *)(iq1s_grid+gindex[2])), vld1_s8((const void *)(iq1s_grid+gindex[3])));
- q1b.val[2] = vcombine_s8(vld1_s8((const void *)(iq1s_grid+gindex[4])), vld1_s8((const void *)(iq1s_grid+gindex[5])));
- q1b.val[3] = vcombine_s8(vld1_s8((const void *)(iq1s_grid+gindex[6])), vld1_s8((const void *)(iq1s_grid+gindex[7])));
- q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
- dotq.val[0] = vpaddq_s32(ggml_vdotq_s32(vzero, q1b.val[0], q8b.val[0]), ggml_vdotq_s32(vzero, q1b.val[1], q8b.val[1]));
- dotq.val[1] = vpaddq_s32(ggml_vdotq_s32(vzero, q1b.val[2], q8b.val[2]), ggml_vdotq_s32(vzero, q1b.val[3], q8b.val[3]));
- sumi.val[0] = vmlaq_s32(sumi.val[0], dotq.val[0], vreinterpretq_s32_u32(vmovl_u16(vget_low_u16 (scales.val[l]))));
- sumi.val[1] = vmlaq_s32(sumi.val[1], dotq.val[1], vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales.val[l]))));
- }
- }
- sumf += y[i].d * GGML_FP16_TO_FP32(x[i].d) * vaddvq_s32(vaddq_s32(sumi.val[0], sumi.val[1]));
- }
- *s = sumf;
- #elif defined __AVX2__
- const __m128i m8 = _mm_set1_epi8(0x08);
- const __m128i m7 = _mm_set1_epi8(0x07);
- const __m128i m1 = _mm_set1_epi8(0x01);
- const __m128i shuffle_h = _mm_set_epi8(15, 7, 14, 6, 13, 5, 12, 4, 11, 3, 10, 2, 9, 1, 8, 0);
- const __m128i shuffle_s[4] = {
- _mm_set_epi32(0x03030303, 0x02020202, 0x01010101, 0x00000000),
- _mm_set_epi32(0x07070707, 0x06060606, 0x05050505, 0x04040404),
- _mm_set_epi32(0x0b0b0b0b, 0x0a0a0a0a, 0x09090909, 0x08080808),
- _mm_set_epi32(0x0f0f0f0f, 0x0e0e0e0e, 0x0d0d0d0d, 0x0c0c0c0c)
- };
- uint64_t aux64;
- __m256i v_gindex;
- const uint16_t * gindex = (const uint16_t *)&v_gindex;
- __m256 accum = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const int8_t * q8 = y[i].qs;
- const uint8_t * qs = x[i].qs;
- const uint8_t * sc = x[i].scales;
- __m256i sumi = _mm256_setzero_si256();
- for (int i128 = 0; i128 < QK_K/128; ++i128) {
- const __m128i ql = _mm_loadu_si128((const __m128i*)qs); qs += 16;
- memcpy(&aux64, sc, 8); sc += 8;
- const __m128i qh = _mm_shuffle_epi8(_mm_set_epi64x(aux64 >> 4, aux64), shuffle_h);
- const __m256i hbit = _mm256_cvtepu8_epi16(_mm_and_si128(qh, m8));
- v_gindex = _mm256_or_si256(_mm256_cvtepu8_epi16(ql), _mm256_slli_epi16(hbit, 5));
- const __m128i scales = _mm_or_si128(_mm_slli_epi16(_mm_and_si128(qh, m7), 1), m1);
- for (int i32 = 0; i32 < 4; ++i32) {
- const __m256i q8b = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q1b = _mm256_set_epi64x(iq1s_grid[gindex[4*i32+3]], iq1s_grid[gindex[4*i32+2]],
- iq1s_grid[gindex[4*i32+1]], iq1s_grid[gindex[4*i32+0]]);
- const __m256i dot = mul_add_epi8(q1b, q8b);
- const __m256i s16 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, shuffle_s[i32]));
- const __m256i p = _mm256_madd_epi16(s16, dot);
- sumi = _mm256_add_epi32(sumi, p);
- }
- }
- accum = _mm256_fmadd_ps(_mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(x[i].d)), _mm256_cvtepi32_ps(sumi), accum);
- }
- *s = hsum_float_8(accum);
- #else
- int db[4];
- uint16_t idx[4];
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const int8_t * q8 = y[i].qs;
- const uint8_t * qs = x[i].qs;
- const uint8_t * sc = x[i].scales;
- int sumi = 0;
- for (int i32 = 0; i32 < QK_K/32; ++i32) {
- idx[0] = qs[0] | ((sc[0] & 0x08) << 5);
- idx[1] = qs[1] | ((sc[0] & 0x80) << 1);
- idx[2] = qs[2] | ((sc[1] & 0x08) << 5);
- idx[3] = qs[3] | ((sc[1] & 0x80) << 1);
- db[0] = (2*(sc[0] & 7) + 1);
- db[1] = (2*((sc[0] >> 4) & 7) + 1);
- db[2] = (2*(sc[1] & 7) + 1);
- db[3] = (2*((sc[1] >> 4) & 7) + 1);
- for (int l = 0; l < 4; ++l) {
- const int8_t * grid = (const int8_t *)(iq1s_grid + idx[l]);
- int suml = 0;
- for (int j = 0; j < 8; ++j) suml += q8[j] * grid[j];
- sumi += db[l] * suml;
- q8 += 8;
- }
- qs += 4;
- sc += 2;
- }
- sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * sumi;
- }
- *s = sumf;
- #endif
- }
- // ================================ IQ2 quantization =============================================
- typedef struct {
- uint64_t * grid;
- int * map;
- uint16_t * neighbours;
- } iq2_entry_t;
- static iq2_entry_t iq2_data[3] = {
- {NULL, NULL, NULL},
- {NULL, NULL, NULL},
- {NULL, NULL, NULL},
- };
- static inline int iq2_data_index(enum ggml_type type) {
- GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S);
- return type == GGML_TYPE_IQ2_XXS ? 0 :
- type == GGML_TYPE_IQ2_XS ? 1 : 2;
- }
- static inline int iq2_grid_size(enum ggml_type type) {
- GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S);
- return type == GGML_TYPE_IQ2_XXS ? 256 :
- type == GGML_TYPE_IQ2_XS ? 512 : 512;
- }
- static int iq2_compare_func(const void * left, const void * right) {
- const int * l = (const int *)left;
- const int * r = (const int *)right;
- return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0;
- }
- void iq2xs_init_impl(enum ggml_type type) {
- const int gindex = iq2_data_index(type);
- const int grid_size = iq2_grid_size(type);
- if (iq2_data[gindex].grid) {
- return;
- }
- static const uint16_t kgrid_2bit_256[256] = {
- 0, 2, 5, 8, 10, 17, 20, 32, 34, 40, 42, 65, 68, 80, 88, 97,
- 100, 128, 130, 138, 162, 257, 260, 272, 277, 320, 388, 408, 512, 514, 546, 642,
- 1025, 1028, 1040, 1057, 1060, 1088, 1090, 1096, 1120, 1153, 1156, 1168, 1188, 1280, 1282, 1288,
- 1312, 1350, 1385, 1408, 1425, 1545, 1552, 1600, 1668, 1700, 2048, 2053, 2056, 2068, 2088, 2113,
- 2116, 2128, 2130, 2184, 2308, 2368, 2562, 2580, 4097, 4100, 4112, 4129, 4160, 4192, 4228, 4240,
- 4245, 4352, 4360, 4384, 4432, 4442, 4480, 4644, 4677, 5120, 5128, 5152, 5157, 5193, 5248, 5400,
- 5474, 5632, 5654, 6145, 6148, 6160, 6208, 6273, 6400, 6405, 6560, 6737, 8192, 8194, 8202, 8260,
- 8289, 8320, 8322, 8489, 8520, 8704, 8706, 9217, 9220, 9232, 9280, 9302, 9472, 9537, 9572, 9872,
- 10248, 10272, 10388, 10820, 16385, 16388, 16400, 16408, 16417, 16420, 16448, 16456, 16470, 16480, 16513, 16516,
- 16528, 16640, 16672, 16737, 16768, 16773, 16897, 16912, 16968, 16982, 17000, 17408, 17416, 17440, 17536, 17561,
- 17682, 17700, 17920, 18433, 18436, 18448, 18496, 18501, 18688, 18776, 18785, 18818, 19013, 19088, 20480, 20488,
- 20497, 20505, 20512, 20608, 20616, 20740, 20802, 20900, 21137, 21648, 21650, 21770, 22017, 22100, 22528, 22545,
- 22553, 22628, 22848, 23048, 24580, 24592, 24640, 24680, 24832, 24917, 25112, 25184, 25600, 25605, 25872, 25874,
- 25988, 26690, 32768, 32770, 32778, 32833, 32898, 33028, 33048, 33088, 33297, 33793, 33796, 33808, 33813, 33856,
- 33888, 34048, 34118, 34196, 34313, 34368, 34400, 34818, 35076, 35345, 36868, 36880, 36900, 36928, 37025, 37142,
- 37248, 37445, 37888, 37922, 37956, 38225, 39041, 39200, 40962, 41040, 41093, 41225, 41472, 42008, 43088, 43268,
- };
- static const uint16_t kgrid_2bit_512[512] = {
- 0, 2, 5, 8, 10, 17, 20, 22, 25, 32, 34, 37, 40, 65, 68, 70,
- 73, 80, 82, 85, 88, 97, 100, 128, 130, 133, 136, 145, 148, 153, 160, 257,
- 260, 262, 265, 272, 274, 277, 280, 282, 289, 292, 320, 322, 325, 328, 337, 340,
- 352, 360, 385, 388, 400, 512, 514, 517, 520, 529, 532, 544, 577, 580, 592, 597,
- 640, 650, 1025, 1028, 1030, 1033, 1040, 1042, 1045, 1048, 1057, 1060, 1088, 1090, 1093, 1096,
- 1105, 1108, 1110, 1120, 1153, 1156, 1168, 1280, 1282, 1285, 1288, 1297, 1300, 1312, 1345, 1348,
- 1360, 1377, 1408, 1537, 1540, 1552, 1574, 1600, 1602, 1668, 2048, 2050, 2053, 2056, 2058, 2065,
- 2068, 2080, 2085, 2113, 2116, 2128, 2136, 2176, 2208, 2218, 2305, 2308, 2320, 2368, 2433, 2441,
- 2560, 2592, 2600, 2710, 2720, 4097, 4100, 4102, 4105, 4112, 4114, 4117, 4120, 4129, 4132, 4160,
- 4162, 4165, 4168, 4177, 4180, 4192, 4202, 4225, 4228, 4240, 4352, 4354, 4357, 4360, 4369, 4372,
- 4384, 4417, 4420, 4432, 4480, 4500, 4502, 4609, 4612, 4614, 4624, 4672, 4704, 5120, 5122, 5125,
- 5128, 5137, 5140, 5152, 5185, 5188, 5193, 5200, 5220, 5248, 5377, 5380, 5392, 5440, 5632, 5652,
- 5705, 6145, 6148, 6160, 6162, 6208, 6228, 6278, 6400, 6405, 6502, 6737, 6825, 8192, 8194, 8197,
- 8200, 8202, 8209, 8212, 8224, 8257, 8260, 8272, 8320, 8352, 8449, 8452, 8464, 8512, 8520, 8549,
- 8704, 8738, 8832, 8872, 9217, 9220, 9232, 9257, 9280, 9472, 9537, 9554, 9625, 9729, 9754, 9894,
- 10240, 10248, 10250, 10272, 10325, 10376, 10402, 10600, 10640, 10760, 10784, 10882, 10888, 10890, 16385, 16388,
- 16390, 16393, 16400, 16402, 16405, 16408, 16417, 16420, 16448, 16450, 16453, 16456, 16458, 16465, 16468, 16480,
- 16485, 16513, 16516, 16528, 16640, 16642, 16645, 16648, 16657, 16660, 16672, 16705, 16708, 16720, 16768, 16773,
- 16802, 16897, 16900, 16912, 16914, 16937, 16960, 17408, 17410, 17413, 17416, 17425, 17428, 17433, 17440, 17473,
- 17476, 17488, 17536, 17556, 17665, 17668, 17680, 17700, 17728, 17818, 17920, 17930, 17988, 18000, 18433, 18436,
- 18448, 18496, 18501, 18516, 18530, 18688, 18705, 18756, 18768, 18793, 18948, 20480, 20482, 20485, 20488, 20497,
- 20500, 20512, 20520, 20545, 20548, 20560, 20608, 20737, 20740, 20752, 20757, 20800, 20802, 20992, 21060, 21162,
- 21505, 21508, 21520, 21537, 21568, 21600, 21633, 21665, 21760, 21768, 21888, 21896, 22049, 22120, 22177, 22528,
- 22548, 22593, 22608, 22681, 22810, 22848, 22850, 23173, 24577, 24580, 24592, 24640, 24660, 24674, 24710, 24745,
- 24832, 25124, 25162, 25234, 25600, 25622, 25872, 25920, 25925, 26020, 26625, 26730, 26917, 27142, 27220, 27234,
- 32768, 32770, 32773, 32776, 32785, 32788, 32800, 32810, 32833, 32836, 32848, 32896, 32898, 32936, 32938, 33025,
- 33028, 33030, 33040, 33088, 33105, 33113, 33280, 33312, 33408, 33410, 33440, 33448, 33793, 33796, 33808, 33810,
- 33813, 33856, 33888, 33929, 34048, 34116, 34213, 34328, 34410, 34816, 34824, 34853, 34906, 34944, 34946, 34984,
- 35078, 35362, 35456, 35464, 35478, 35496, 36865, 36868, 36880, 36928, 36950, 36996, 37120, 37154, 37220, 37462,
- 37513, 37888, 37893, 37956, 37968, 37976, 38185, 38288, 38290, 38465, 38993, 39078, 39241, 39445, 39520, 40960,
- 40962, 40968, 40970, 40992, 41002, 41120, 41297, 41305, 41382, 41472, 41474, 41480, 41514, 41600, 41632, 42048,
- 42133, 42597, 42648, 43018, 43040, 43042, 43048, 43168, 43176, 43268, 43396, 43398, 43560, 43562, 43665, 43690,
- };
- static const uint16_t kgrid_1bit_512[512] = {
- 10, 33, 41, 85, 132, 134, 160, 162, 277, 337, 340, 345, 357, 405, 516, 545,
- 553, 598, 641, 650, 681, 1042, 1044, 1097, 1169, 1176, 1320, 1345, 1365, 1378, 1434, 1444,
- 1545, 1617, 1642, 1685, 2053, 2080, 2089, 2133, 2176, 2182, 2208, 2214, 2306, 2384, 2393, 2440,
- 2453, 2581, 2664, 2690, 2721, 4117, 4161, 4182, 4184, 4261, 4357, 4369, 4372, 4377, 4390, 4422,
- 4432, 4437, 4449, 4457, 4485, 4497, 4505, 4629, 4677, 4696, 4774, 5205, 5217, 5225, 5386, 5397,
- 5409, 5445, 5457, 5460, 5461, 5462, 5465, 5472, 5477, 5525, 5545, 5650, 5668, 5717, 5729, 5769,
- 5777, 6212, 6234, 6244, 6293, 6424, 6482, 6485, 6502, 6505, 6529, 6538, 6565, 6656, 6682, 6788,
- 6806, 6820, 8218, 8224, 8226, 8232, 8277, 8326, 8354, 8469, 8521, 8530, 8549, 8596, 8737, 8794,
- 9221, 9253, 9348, 9369, 9380, 9474, 9557, 9633, 9732, 9753, 9793, 9830, 9862, 9880, 10240, 10272,
- 10282, 10321, 10406, 10517, 10530, 10566, 10585, 10645, 10896, 16466, 16468, 16473, 16485, 16646, 16660, 16665,
- 16725, 16793, 16806, 16914, 16969, 16977, 16996, 17028, 17057, 17408, 17416, 17434, 17493, 17512, 17578, 17685,
- 17696, 17733, 17745, 17748, 17749, 17750, 17753, 17765, 17794, 17813, 17946, 17984, 18005, 18072, 18453, 18529,
- 18569, 18722, 18756, 18762, 18773, 18794, 18833, 18853, 18945, 19026, 19033, 19077, 20489, 20497, 20500, 20517,
- 20565, 20586, 20610, 20633, 20757, 20769, 20776, 20805, 20817, 20820, 20821, 20822, 20825, 20837, 20864, 20872,
- 20885, 20896, 21002, 21029, 21077, 21146, 21510, 21525, 21573, 21585, 21588, 21589, 21590, 21593, 21605, 21653,
- 21665, 21765, 21777, 21780, 21781, 21782, 21785, 21797, 21825, 21828, 21829, 21830, 21833, 21840, 21841, 21842,
- 21844, 21846, 21848, 21849, 21850, 21857, 21860, 21861, 21862, 21865, 21893, 21905, 21908, 21909, 21910, 21913,
- 21925, 22024, 22037, 22085, 22097, 22100, 22101, 22102, 22105, 22117, 22165, 22545, 22566, 22568, 22594, 22608,
- 22613, 22676, 22697, 22793, 22805, 22853, 22865, 22868, 22869, 22870, 22873, 22885, 22933, 22946, 23046, 23072,
- 23125, 23209, 24597, 24640, 24665, 24673, 24725, 24833, 24840, 24869, 24917, 24934, 24965, 25001, 25108, 25110,
- 25152, 25184, 25192, 25234, 25616, 25618, 25625, 25685, 25704, 25738, 25744, 25770, 25877, 25897, 25925, 25937,
- 25940, 25941, 25942, 25945, 25957, 25986, 26005, 26186, 26197, 26276, 26632, 26634, 26725, 26757, 26770, 26885,
- 26965, 26976, 26986, 27032, 27153, 27174, 27200, 27208, 27240, 27269, 27282, 27290, 32778, 32800, 32802, 32808,
- 32810, 32853, 32904, 32922, 32930, 32932, 33105, 33110, 33112, 33125, 33157, 33280, 33288, 33301, 33312, 33320,
- 33424, 33797, 33829, 33858, 34068, 34133, 34146, 34176, 34217, 34306, 34342, 34441, 34454, 34468, 34832, 34918,
- 34965, 34984, 35094, 35137, 35161, 35208, 35232, 35332, 35338, 35368, 35429, 36932, 36934, 36953, 37009, 37125,
- 37136, 37138, 37145, 37157, 37205, 37220, 37258, 37290, 37444, 37446, 37465, 37478, 37525, 37905, 37968, 37973,
- 38040, 38054, 38145, 38154, 38165, 38180, 38186, 38213, 38225, 38228, 38229, 38230, 38233, 38245, 38293, 38485,
- 38504, 38530, 38938, 38985, 38993, 39012, 39040, 39173, 39192, 39253, 39265, 39301, 39316, 39322, 39442, 39497,
- 39504, 39590, 40970, 40984, 40992, 41002, 41045, 41120, 41128, 41237, 41289, 41297, 41317, 41364, 41366, 41514,
- 41557, 41633, 41989, 42021, 42056, 42068, 42074, 42113, 42242, 42265, 42274, 42325, 42340, 42402, 42501, 42512,
- 42533, 42624, 42632, 42666, 43040, 43093, 43106, 43168, 43176, 43264, 43286, 43345, 43429, 43590, 43618, 43680,
- };
- const int kmap_size = 43692;
- const int nwant = type == GGML_TYPE_IQ1_S ? 3 : 2;
- const uint16_t * kgrid = type == GGML_TYPE_IQ2_XXS ? kgrid_2bit_256 :
- type == GGML_TYPE_IQ2_XS ? kgrid_2bit_512 : kgrid_1bit_512;
- uint64_t * kgrid_q2xs;
- int * kmap_q2xs;
- uint16_t * kneighbors_q2xs;
- printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size);
- uint64_t * the_grid = (uint64_t *)malloc(grid_size*sizeof(uint64_t));
- for (int k = 0; k < grid_size; ++k) {
- int8_t * pos = (int8_t *)(the_grid + k);
- for (int i = 0; i < 8; ++i) {
- int l = (kgrid[k] >> 2*i) & 0x3;
- pos[i] = 2*l + 1;
- }
- }
- kgrid_q2xs = the_grid;
- iq2_data[gindex].grid = the_grid;
- kmap_q2xs = (int *)malloc(kmap_size*sizeof(int));
- iq2_data[gindex].map = kmap_q2xs;
- for (int i = 0; i < kmap_size; ++i) kmap_q2xs[i] = -1;
- uint64_t aux64;
- uint8_t * aux8 = (uint8_t *)&aux64;
- for (int i = 0; i < grid_size; ++i) {
- aux64 = kgrid_q2xs[i];
- uint16_t index = 0;
- for (int k=0; k<8; ++k) {
- uint16_t q = (aux8[k] - 1)/2;
- index |= (q << 2*k);
- }
- kmap_q2xs[index] = i;
- }
- int8_t pos[8];
- int * dist2 = (int *)malloc(2*grid_size*sizeof(int));
- int num_neighbors = 0, num_not_in_map = 0;
- for (int i = 0; i < kmap_size; ++i) {
- if (kmap_q2xs[i] >= 0) continue;
- ++num_not_in_map;
- for (int k = 0; k < 8; ++k) {
- int l = (i >> 2*k) & 0x3;
- pos[k] = 2*l + 1;
- }
- for (int j = 0; j < grid_size; ++j) {
- const int8_t * pg = (const int8_t *)(kgrid_q2xs + j);
- int d2 = 0;
- for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
- dist2[2*j+0] = d2;
- dist2[2*j+1] = j;
- }
- qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func);
- int n = 0; int d2 = dist2[0];
- int nhave = 1;
- for (int j = 0; j < grid_size; ++j) {
- if (dist2[2*j] > d2) {
- if (nhave == nwant) break;
- d2 = dist2[2*j];
- ++nhave;
- }
- ++n;
- }
- num_neighbors += n;
- }
- printf("%s: %d neighbours in total\n", __func__, num_neighbors);
- kneighbors_q2xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t));
- iq2_data[gindex].neighbours = kneighbors_q2xs;
- int counter = 0;
- for (int i = 0; i < kmap_size; ++i) {
- if (kmap_q2xs[i] >= 0) continue;
- for (int k = 0; k < 8; ++k) {
- int l = (i >> 2*k) & 0x3;
- pos[k] = 2*l + 1;
- }
- for (int j = 0; j < grid_size; ++j) {
- const int8_t * pg = (const int8_t *)(kgrid_q2xs + j);
- int d2 = 0;
- for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
- dist2[2*j+0] = d2;
- dist2[2*j+1] = j;
- }
- qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func);
- kmap_q2xs[i] = -(counter + 1);
- int d2 = dist2[0];
- uint16_t * start = &kneighbors_q2xs[counter++];
- int n = 0, nhave = 1;
- for (int j = 0; j < grid_size; ++j) {
- if (dist2[2*j] > d2) {
- if (nhave == nwant) break;
- d2 = dist2[2*j];
- ++nhave;
- }
- kneighbors_q2xs[counter++] = dist2[2*j+1];
- ++n;
- }
- *start = n;
- }
- free(dist2);
- }
- void iq2xs_free_impl(enum ggml_type type) {
- GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S);
- const int gindex = iq2_data_index(type);
- if (iq2_data[gindex].grid) {
- free(iq2_data[gindex].grid); iq2_data[gindex].grid = NULL;
- free(iq2_data[gindex].map); iq2_data[gindex].map = NULL;
- free(iq2_data[gindex].neighbours); iq2_data[gindex].neighbours = NULL;
- }
- }
- static int iq2_find_best_neighbour(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
- const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) {
- int num_neighbors = neighbours[0];
- GGML_ASSERT(num_neighbors > 0);
- float best_d2 = FLT_MAX;
- int grid_index = -1;
- for (int j = 1; j <= num_neighbors; ++j) {
- const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
- float d2 = 0;
- for (int i = 0; i < 8; ++i) {
- float q = pg[i];
- float diff = scale*q - xval[i];
- d2 += weight[i]*diff*diff;
- }
- if (d2 < best_d2) {
- best_d2 = d2; grid_index = neighbours[j];
- }
- }
- GGML_ASSERT(grid_index >= 0);
- const int8_t * pg = (const int8_t *)(grid + grid_index);
- for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
- return grid_index;
- }
- static void quantize_row_iq2_xxs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
- const int gindex = iq2_data_index(GGML_TYPE_IQ2_XXS);
- const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
- const int * kmap_q2xs = iq2_data[gindex].map;
- const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
- GGML_ASSERT(quant_weights && "missing quantization weights");
- GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
- GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
- GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
- GGML_ASSERT(n%QK_K == 0);
- const int kMaxQ = 3;
- const int nbl = n/256;
- block_iq2_xxs * y = vy;
- float scales[QK_K/32];
- float weight[32];
- float xval[32];
- int8_t L[32];
- int8_t Laux[32];
- float waux[32];
- uint8_t block_signs[4];
- uint32_t q2[2*(QK_K/32)];
- for (int ibl = 0; ibl < nbl; ++ibl) {
- y[ibl].d = GGML_FP32_TO_FP16(0.f);
- memset(q2, 0, QK_K/4);
- float max_scale = 0;
- const float * xbl = x + QK_K*ibl;
- float sumx2 = 0;
- for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
- float sigma2 = sumx2/QK_K;
- for (int ib = 0; ib < QK_K/32; ++ib) {
- const float * xb = xbl + 32*ib;
- const float * qw = quant_weights + QK_K*ibl + 32*ib;
- for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
- for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
- for (int k = 0; k < 4; ++k) {
- int nflip = 0;
- uint8_t s = 0;
- for (int i = 0; i < 8; ++i) {
- if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
- else {
- xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
- }
- }
- if (nflip%2) {
- int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
- for (int i = 1; i < 8; ++i) {
- float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
- if (ax < min) {
- min = ax; imin = i;
- }
- }
- xval[8*k+imin] = -xval[8*k+imin];
- s ^= (1 << imin);
- }
- block_signs[k] = s & 127;
- }
- float max = xval[0];
- for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]);
- if (!max) {
- scales[ib] = 0;
- memset(L, 0, 32);
- continue;
- }
- float scale = make_qp_quants(32, kMaxQ+1, xval, (uint8_t*)L, weight);
- float eff_max = scale*kMaxQ;
- float best = 0;
- for (int is = -6; is <= 6; ++is) {
- float id = (2*kMaxQ-1+is*0.1f)/eff_max;
- float this_scale = 1/id;
- for (int k = 0; k < 4; ++k) {
- for (int i = 0; i < 8; ++i) {
- int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
- Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
- }
- uint16_t u = 0;
- for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
- int grid_index = kmap_q2xs[u];
- if (grid_index < 0) {
- const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
- grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
- }
- }
- float sumqx = 0, sumq2 = 0;
- for (int i = 0; i < 32; ++i) {
- float w = weight[i];
- float q = 2*Laux[i] + 1;
- sumqx += w*xval[i]*q;
- sumq2 += w*q*q;
- }
- if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
- scale = sumqx/sumq2; best = scale*sumqx;
- memcpy(L, Laux, 32);
- }
- }
- if (scale > 0) {
- float id = 1/scale;
- for (int k = 0; k < 4; ++k) {
- uint16_t u = 0;
- for (int i = 0; i < 8; ++i) {
- int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
- l = MAX(0, MIN(kMaxQ-1, l));
- u |= (l << 2*i);
- }
- int grid_index = kmap_q2xs[u];
- if (grid_index < 0) {
- const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
- grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
- }
- const int8_t * pg = (const int8_t *)(kgrid_q2xs + grid_index);
- for (int i = 0; i < 8; ++i) L[8*k+i] = (pg[i] - 1)/2;
- }
- float sumqx = 0, sumq2 = 0;
- for (int i = 0; i < 32; ++i) {
- float w = weight[i];
- float q = 2*L[i] + 1;
- sumqx += w*xval[i]*q;
- sumq2 += w*q*q;
- }
- if (sumq2 > 0) scale = sumqx/sumq2;
- }
- if (scale < 0) {
- // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
- // and correspondingly flip quant signs.
- scale = -scale;
- for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127;
- }
- for (int k = 0; k < 4; ++k) {
- uint16_t u = 0;
- for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
- int grid_index = kmap_q2xs[u];
- if (grid_index < 0) {
- printf("Oops: found point %u not on grid:", u);
- for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
- printf("\n");
- GGML_ASSERT(false);
- }
- q2[2*ib+0] |= (grid_index << 8*k);
- q2[2*ib+1] |= (block_signs[k] << 7*k);
- }
- GGML_ASSERT(scale >= 0);
- scales[ib] = scale;
- max_scale = MAX(max_scale, scale);
- }
- if (!max_scale) {
- memset(y[ibl].qs, 0, QK_K/4);
- continue;
- }
- float d = max_scale/31;
- y[ibl].d = GGML_FP32_TO_FP16(d);
- float id = 1/d;
- for (int ib = 0; ib < QK_K/32; ++ib) {
- int l = nearest_int(0.5f*(id*scales[ib]-1));
- l = MAX(0, MIN(15, l));
- q2[2*ib+1] |= ((uint32_t)l << 28);
- }
- memcpy(y[ibl].qs, q2, QK_K/4);
- }
- }
- static void quantize_row_iq2_xs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
- const int gindex = iq2_data_index(GGML_TYPE_IQ2_XS);
- const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
- const int * kmap_q2xs = iq2_data[gindex].map;
- const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
- GGML_ASSERT(quant_weights && "missing quantization weights");
- GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
- GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
- GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
- GGML_ASSERT(n%QK_K == 0);
- const int kMaxQ = 3;
- const int nbl = n/256;
- block_iq2_xs * y = vy;
- float scales[QK_K/16];
- float weight[16];
- float xval[16];
- int8_t L[16];
- int8_t Laux[16];
- float waux[16];
- bool is_on_grid[2];
- bool is_on_grid_aux[2];
- uint8_t block_signs[2];
- uint16_t q2[2*(QK_K/16)];
- for (int ibl = 0; ibl < nbl; ++ibl) {
- y[ibl].d = GGML_FP32_TO_FP16(0.f);
- memset(q2, 0, QK_K/4);
- memset(y[ibl].scales, 0, QK_K/32);
- float max_scale = 0;
- const float * xbl = x + QK_K*ibl;
- float sumx2 = 0;
- for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
- float sigma2 = sumx2/QK_K;
- for (int ib = 0; ib < QK_K/16; ++ib) {
- const float * xb = xbl + 16*ib;
- const float * qw = quant_weights + QK_K*ibl + 16*ib;
- for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
- for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]);
- for (int k = 0; k < 2; ++k) {
- int nflip = 0;
- uint8_t s = 0;
- for (int i = 0; i < 8; ++i) {
- if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
- else {
- xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
- }
- }
- if (nflip%2) {
- int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
- for (int i = 1; i < 8; ++i) {
- float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
- if (ax < min) {
- min = ax; imin = i;
- }
- }
- xval[8*k+imin] = -xval[8*k+imin];
- s ^= (1 << imin);
- }
- block_signs[k] = s & 127;
- }
- float max = xval[0];
- for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]);
- if (!max) {
- scales[ib] = 0;
- memset(L, 0, 16);
- continue;
- }
- float best = 0;
- float scale = max/(2*kMaxQ-1);
- is_on_grid[0] = is_on_grid[1] = true;
- for (int is = -9; is <= 9; ++is) {
- float id = (2*kMaxQ-1+is*0.1f)/max;
- float this_scale = 1/id;
- for (int k = 0; k < 2; ++k) {
- for (int i = 0; i < 8; ++i) {
- int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
- Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
- }
- uint16_t u = 0;
- for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
- int grid_index = kmap_q2xs[u];
- is_on_grid_aux[k] = true;
- if (grid_index < 0) {
- is_on_grid_aux[k] = false;
- const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
- grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
- }
- }
- float sumqx = 0, sumq2 = 0;
- for (int i = 0; i < 16; ++i) {
- float w = weight[i];
- float q = 2*Laux[i] + 1;
- sumqx += w*xval[i]*q;
- sumq2 += w*q*q;
- }
- if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
- scale = sumqx/sumq2; best = scale*sumqx;
- for (int i = 0; i < 16; ++i) L[i] = Laux[i];
- for (int k = 0; k < 2; ++k) is_on_grid[k] = is_on_grid_aux[k];
- }
- }
- int n_not_ongrid = 0;
- for (int k = 0; k < 2; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
- if (n_not_ongrid > 0 && scale > 0) {
- float id = 1/scale;
- for (int k = 0; k < 2; ++k) {
- if (is_on_grid[k]) continue;
- uint16_t u = 0;
- for (int i = 0; i < 8; ++i) {
- int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
- l = MAX(0, MIN(kMaxQ-1, l));
- u |= (l << 2*i);
- L[8*k + i] = l;
- }
- int grid_index = kmap_q2xs[u];
- if (grid_index < 0) {
- const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
- grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
- }
- }
- float sumqx = 0, sumq2 = 0;
- for (int i = 0; i < 16; ++i) {
- float w = weight[i];
- float q = 2*L[i] + 1;
- sumqx += w*xval[i]*q;
- sumq2 += w*q*q;
- }
- if (sumq2 > 0) scale = sumqx/sumq2;
- }
- if (scale < 0) {
- scale = -scale;
- for (int k = 0; k < 2; ++k) block_signs[k] = (~block_signs[k]) & 127;
- }
- for (int k = 0; k < 2; ++k) {
- uint16_t u = 0;
- for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
- int grid_index = kmap_q2xs[u];
- if (grid_index < 0) {
- printf("Oops: found point %u not on grid:", u);
- for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
- printf("\n");
- GGML_ASSERT(false);
- }
- q2[2*ib+k] = grid_index | (block_signs[k] << 9);
- }
- GGML_ASSERT(scale >= 0);
- scales[ib] = scale;
- max_scale = MAX(max_scale, scale);
- }
- if (!max_scale) {
- memset(y[ibl].qs, 0, QK_K/4);
- continue;
- }
- float d = max_scale/31;
- y[ibl].d = GGML_FP32_TO_FP16(d);
- float id = 1/d;
- for (int ib = 0; ib < QK_K/16; ++ib) {
- int l = nearest_int(0.5f*(id*scales[ib]-1));
- l = MAX(0, MIN(15, l));
- if (ib%2 == 0) y[ibl].scales[ib/2] = l;
- else y[ibl].scales[ib/2] |= (l << 4);
- }
- memcpy(y[ibl].qs, q2, QK_K/4);
- }
- }
- size_t quantize_iq2_xxs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
- (void)hist;
- GGML_ASSERT(n_per_row%QK_K == 0);
- int nblock = n_per_row/QK_K;
- char * qrow = (char *)dst;
- for (int row = 0; row < nrow; ++row) {
- quantize_row_iq2_xxs_impl(src, qrow, n_per_row, quant_weights);
- src += n_per_row;
- qrow += nblock*sizeof(block_iq2_xxs);
- }
- return nrow * nblock * sizeof(block_iq2_xxs);
- }
- size_t quantize_iq2_xs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
- (void)hist;
- GGML_ASSERT(n_per_row%QK_K == 0);
- int nblock = n_per_row/QK_K;
- char * qrow = (char *)dst;
- for (int row = 0; row < nrow; ++row) {
- quantize_row_iq2_xs_impl(src, qrow, n_per_row, quant_weights);
- src += n_per_row;
- qrow += nblock*sizeof(block_iq2_xs);
- }
- return nrow * nblock * sizeof(block_iq2_xs);
- }
- //
- // ============================================= 3-bit using D4 lattice
- //
- typedef struct {
- uint32_t * grid;
- int * map;
- uint16_t * neighbours;
- } iq3_entry_t;
- static iq3_entry_t iq3_data[1] = {
- {NULL, NULL, NULL},
- };
- static inline int iq3_data_index(int grid_size) {
- (void)grid_size;
- GGML_ASSERT(grid_size == 256);
- return 0;
- }
- static int iq3_compare_func(const void * left, const void * right) {
- const int * l = (const int *)left;
- const int * r = (const int *)right;
- return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0;
- }
- void iq3xs_init_impl(int grid_size) {
- const int gindex = iq3_data_index(grid_size);
- if (iq3_data[gindex].grid) {
- return;
- }
- static const uint16_t kgrid_256[256] = {
- 0, 2, 4, 9, 11, 15, 16, 18, 25, 34, 59, 61, 65, 67, 72, 74,
- 81, 85, 88, 90, 97, 108, 120, 128, 130, 132, 137, 144, 146, 153, 155, 159,
- 169, 175, 189, 193, 199, 200, 202, 213, 248, 267, 287, 292, 303, 315, 317, 321,
- 327, 346, 362, 413, 436, 456, 460, 462, 483, 497, 513, 515, 520, 522, 529, 531,
- 536, 538, 540, 551, 552, 576, 578, 585, 592, 594, 641, 643, 648, 650, 657, 664,
- 698, 704, 706, 720, 729, 742, 758, 769, 773, 808, 848, 852, 870, 889, 901, 978,
- 992, 1024, 1026, 1033, 1035, 1040, 1042, 1046, 1049, 1058, 1089, 1091, 1093, 1096, 1098, 1105,
- 1112, 1139, 1143, 1144, 1152, 1154, 1161, 1167, 1168, 1170, 1183, 1184, 1197, 1217, 1224, 1228,
- 1272, 1276, 1309, 1323, 1347, 1367, 1377, 1404, 1473, 1475, 1486, 1509, 1537, 1544, 1546, 1553,
- 1555, 1576, 1589, 1594, 1600, 1602, 1616, 1625, 1636, 1638, 1665, 1667, 1672, 1685, 1706, 1722,
- 1737, 1755, 1816, 1831, 1850, 1856, 1862, 1874, 1901, 1932, 1950, 1971, 2011, 2032, 2052, 2063,
- 2077, 2079, 2091, 2095, 2172, 2192, 2207, 2208, 2224, 2230, 2247, 2277, 2308, 2345, 2356, 2389,
- 2403, 2424, 2501, 2504, 2506, 2520, 2570, 2593, 2616, 2624, 2630, 2646, 2669, 2700, 2714, 2746,
- 2754, 2795, 2824, 2835, 2839, 2874, 2882, 2905, 2984, 3028, 3042, 3092, 3108, 3110, 3124, 3153,
- 3185, 3215, 3252, 3288, 3294, 3364, 3397, 3434, 3483, 3523, 3537, 3587, 3589, 3591, 3592, 3610,
- 3626, 3670, 3680, 3722, 3749, 3754, 3776, 3789, 3803, 3824, 3857, 3873, 3904, 3906, 3924, 3992,
- };
- const int kmap_size = 4096;
- const int nwant = 2;
- const uint16_t * kgrid = kgrid_256;
- uint32_t * kgrid_q3xs;
- int * kmap_q3xs;
- uint16_t * kneighbors_q3xs;
- printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size);
- uint32_t * the_grid = (uint32_t *)malloc(grid_size*sizeof(uint32_t));
- for (int k = 0; k < grid_size; ++k) {
- int8_t * pos = (int8_t *)(the_grid + k);
- for (int i = 0; i < 4; ++i) {
- int l = (kgrid[k] >> 3*i) & 0x7;
- pos[i] = 2*l + 1;
- }
- }
- kgrid_q3xs = the_grid;
- iq3_data[gindex].grid = the_grid;
- kmap_q3xs = (int *)malloc(kmap_size*sizeof(int));
- iq3_data[gindex].map = kmap_q3xs;
- for (int i = 0; i < kmap_size; ++i) kmap_q3xs[i] = -1;
- uint32_t aux32;
- uint8_t * aux8 = (uint8_t *)&aux32;
- for (int i = 0; i < grid_size; ++i) {
- aux32 = kgrid_q3xs[i];
- uint16_t index = 0;
- for (int k=0; k<4; ++k) {
- uint16_t q = (aux8[k] - 1)/2;
- index |= (q << 3*k);
- }
- kmap_q3xs[index] = i;
- }
- int8_t pos[4];
- int * dist2 = (int *)malloc(2*grid_size*sizeof(int));
- int num_neighbors = 0, num_not_in_map = 0;
- for (int i = 0; i < kmap_size; ++i) {
- if (kmap_q3xs[i] >= 0) continue;
- ++num_not_in_map;
- for (int k = 0; k < 4; ++k) {
- int l = (i >> 3*k) & 0x7;
- pos[k] = 2*l + 1;
- }
- for (int j = 0; j < grid_size; ++j) {
- const int8_t * pg = (const int8_t *)(kgrid_q3xs + j);
- int d2 = 0;
- for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
- dist2[2*j+0] = d2;
- dist2[2*j+1] = j;
- }
- qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func);
- int n = 0; int d2 = dist2[0];
- int nhave = 1;
- for (int j = 0; j < grid_size; ++j) {
- if (dist2[2*j] > d2) {
- if (nhave == nwant) break;
- d2 = dist2[2*j];
- ++nhave;
- }
- ++n;
- }
- num_neighbors += n;
- }
- printf("%s: %d neighbours in total\n", __func__, num_neighbors);
- kneighbors_q3xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t));
- iq3_data[gindex].neighbours = kneighbors_q3xs;
- int counter = 0;
- for (int i = 0; i < kmap_size; ++i) {
- if (kmap_q3xs[i] >= 0) continue;
- for (int k = 0; k < 4; ++k) {
- int l = (i >> 3*k) & 0x7;
- pos[k] = 2*l + 1;
- }
- for (int j = 0; j < grid_size; ++j) {
- const int8_t * pg = (const int8_t *)(kgrid_q3xs + j);
- int d2 = 0;
- for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
- dist2[2*j+0] = d2;
- dist2[2*j+1] = j;
- }
- qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func);
- kmap_q3xs[i] = -(counter + 1);
- int d2 = dist2[0];
- uint16_t * start = &kneighbors_q3xs[counter++];
- int n = 0, nhave = 1;
- for (int j = 0; j < grid_size; ++j) {
- if (dist2[2*j] > d2) {
- if (nhave == nwant) break;
- d2 = dist2[2*j];
- ++nhave;
- }
- kneighbors_q3xs[counter++] = dist2[2*j+1];
- ++n;
- }
- *start = n;
- }
- free(dist2);
- }
- void iq3xs_free_impl(int grid_size) {
- GGML_ASSERT(grid_size == 256);
- const int gindex = iq3_data_index(grid_size);
- if (iq3_data[gindex].grid) {
- free(iq3_data[gindex].grid); iq3_data[gindex].grid = NULL;
- free(iq3_data[gindex].map); iq3_data[gindex].map = NULL;
- free(iq3_data[gindex].neighbours); iq3_data[gindex].neighbours = NULL;
- }
- }
- static int iq3_find_best_neighbour(const uint16_t * restrict neighbours, const uint32_t * restrict grid,
- const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) {
- int num_neighbors = neighbours[0];
- GGML_ASSERT(num_neighbors > 0);
- float best_d2 = FLT_MAX;
- int grid_index = -1;
- for (int j = 1; j <= num_neighbors; ++j) {
- const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
- float d2 = 0;
- for (int i = 0; i < 4; ++i) {
- float q = pg[i];
- float diff = scale*q - xval[i];
- d2 += weight[i]*diff*diff;
- }
- if (d2 < best_d2) {
- best_d2 = d2; grid_index = neighbours[j];
- }
- }
- GGML_ASSERT(grid_index >= 0);
- const int8_t * pg = (const int8_t *)(grid + grid_index);
- for (int i = 0; i < 4; ++i) L[i] = (pg[i] - 1)/2;
- return grid_index;
- }
- static void quantize_row_iq3_xxs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
- const int gindex = iq3_data_index(256);
- const uint32_t * kgrid_q3xs = iq3_data[gindex].grid;
- const int * kmap_q3xs = iq3_data[gindex].map;
- const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours;
- //GGML_ASSERT(quant_weights && "missing quantization weights");
- GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?");
- GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?");
- GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?");
- GGML_ASSERT(n%QK_K == 0);
- const int kMaxQ = 8;
- const int nbl = n/256;
- block_iq3_xxs * y = vy;
- float scales[QK_K/32];
- float weight[32];
- float xval[32];
- int8_t L[32];
- int8_t Laux[32];
- float waux[32];
- bool is_on_grid[8];
- bool is_on_grid_aux[8];
- uint8_t block_signs[8];
- uint8_t q3[3*(QK_K/8)];
- uint32_t * scales_and_signs = (uint32_t *)(q3 + QK_K/4);
- for (int ibl = 0; ibl < nbl; ++ibl) {
- y[ibl].d = GGML_FP32_TO_FP16(0.f);
- memset(q3, 0, 3*QK_K/8);
- float max_scale = 0;
- const float * xbl = x + QK_K*ibl;
- float sumx2 = 0;
- for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
- float sigma2 = sumx2/QK_K;
- for (int ib = 0; ib < QK_K/32; ++ib) {
- const float * xb = xbl + 32*ib;
- if (quant_weights) {
- const float * qw = quant_weights + QK_K*ibl + 32*ib;
- for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
- } else {
- for (int i = 0; i < 32; ++i) weight[i] = xb[i]*xb[i];
- }
- for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
- for (int k = 0; k < 4; ++k) {
- int nflip = 0;
- uint8_t s = 0;
- for (int i = 0; i < 8; ++i) {
- if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
- else {
- xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
- }
- }
- if (nflip%2) {
- int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
- for (int i = 1; i < 8; ++i) {
- float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
- if (ax < min) {
- min = ax; imin = i;
- }
- }
- xval[8*k+imin] = -xval[8*k+imin];
- s ^= (1 << imin);
- }
- block_signs[k] = s & 127;
- }
- float max = xval[0];
- for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]);
- if (!max) {
- scales[ib] = 0;
- memset(L, 0, 32);
- continue;
- }
- float best = 0;
- float scale = max/(2*kMaxQ-1);
- for (int is = -15; is <= 15; ++is) {
- float id = (2*kMaxQ-1+is*0.2f)/max;
- float this_scale = 1/id;
- for (int k = 0; k < 8; ++k) {
- for (int i = 0; i < 4; ++i) {
- int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
- Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l));
- }
- uint16_t u = 0;
- for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i);
- int grid_index = kmap_q3xs[u];
- is_on_grid_aux[k] = true;
- if (grid_index < 0) {
- is_on_grid_aux[k] = false;
- const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
- grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k);
- }
- }
- float sumqx = 0, sumq2 = 0;
- for (int i = 0; i < 32; ++i) {
- float w = weight[i];
- float q = 2*Laux[i] + 1;
- sumqx += w*xval[i]*q;
- sumq2 += w*q*q;
- }
- if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
- scale = sumqx/sumq2; best = scale*sumqx;
- for (int i = 0; i < 32; ++i) L[i] = Laux[i];
- for (int k = 0; k < 8; ++k) is_on_grid[k] = is_on_grid_aux[k];
- }
- }
- int n_not_ongrid = 0;
- for (int k = 0; k < 8; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
- if (n_not_ongrid > 0 && scale > 0) {
- float id = 1/scale;
- for (int k = 0; k < 8; ++k) {
- if (is_on_grid[k]) continue;
- uint16_t u = 0;
- for (int i = 0; i < 4; ++i) {
- int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
- l = MAX(0, MIN(kMaxQ-1, l));
- u |= (l << 3*i);
- }
- int grid_index = kmap_q3xs[u];
- if (grid_index < 0) {
- const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
- grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k);
- }
- const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index);
- for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2;
- }
- float sumqx = 0, sumq2 = 0;
- for (int i = 0; i < 32; ++i) {
- float w = weight[i];
- float q = 2*L[i] + 1;
- sumqx += w*xval[i]*q;
- sumq2 += w*q*q;
- }
- if (sumq2 > 0) scale = sumqx/sumq2;
- }
- if (scale < 0) {
- // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
- // and correspondingly flip quant signs.
- scale = -scale;
- for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127;
- }
- for (int k = 0; k < 8; ++k) {
- uint16_t u = 0;
- for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i);
- int grid_index = kmap_q3xs[u];
- if (grid_index < 0) {
- printf("Oops: found point %u not on grid:", u);
- for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]);
- printf("\n");
- GGML_ASSERT(false);
- }
- q3[8*ib+k] = grid_index;
- }
- scales_and_signs[ib] = block_signs[0] | (block_signs[1] << 7) | (block_signs[2] << 14) | (block_signs[3] << 21);
- GGML_ASSERT(scale >= 0);
- scales[ib] = scale;
- max_scale = MAX(max_scale, scale);
- }
- if (!max_scale) {
- memset(y[ibl].qs, 0, 3*QK_K/8);
- continue;
- }
- float d = max_scale/31;
- y[ibl].d = GGML_FP32_TO_FP16(d);
- float id = 1/d;
- float sumqx = 0, sumq2 = 0;
- for (int ib = 0; ib < QK_K/32; ++ib) {
- int l = nearest_int(0.5f*(id*scales[ib]-1));
- l = MAX(0, MIN(15, l));
- scales_and_signs[ib] |= ((uint32_t)l << 28);
- if (false) {
- const float * xb = xbl + 32*ib;
- if (quant_weights) {
- const float * qw = quant_weights + QK_K*ibl + 32*ib;
- for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
- } else {
- for (int i = 0; i < 32; ++i) weight[i] = xb[i]*xb[i];
- }
- const float db = 0.25f * d * (1 + 2*l);
- for (int k = 0; k < 8; ++k) {
- const int8_t * signs = keven_signs_q2xs + 8*((scales_and_signs[ib] >> 7*(k/2)) & 127) + 4*(k%2);
- const float * xk = xb + 4*k;
- const float * wk = weight + 4*k;
- //const uint8_t * grid = (const uint8_t *)(kgrid_q3xs + q3[8*ib+k]);
- const uint8_t * grid = (const uint8_t *)(iq3xxs_grid + q3[8*ib+k]);
- float best_mse = 0; int best_index = q3[8*ib+k];
- for (int j = 0; j < 4; ++j) {
- float diff = db * grid[j] * signs[j] - xk[j];
- best_mse += wk[j] * diff * diff;
- }
- for (int idx = 0; idx < 256; ++idx) {
- //grid = (const uint8_t *)(kgrid_q3xs + idx);
- grid = (const uint8_t *)(iq3xxs_grid + idx);
- float mse = 0;
- for (int j = 0; j < 4; ++j) {
- float diff = db * grid[j] * signs[j] - xk[j];
- mse += wk[j] * diff * diff;
- }
- if (mse < best_mse) {
- best_mse = mse; best_index = idx;
- }
- }
- q3[8*ib+k] = best_index;
- //grid = (const uint8_t *)(kgrid_q3xs + best_index);
- grid = (const uint8_t *)(iq3xxs_grid + best_index);
- for (int j = 0; j < 4; ++j) {
- float q = db * grid[j] * signs[j];
- sumqx += wk[j] * q * xk[j];
- sumq2 += wk[j] * q * q;
- }
- }
- if (sumq2 > 0) y[ibl].d = GGML_FP32_TO_FP16(d*sumqx/sumq2);
- }
- }
- memcpy(y[ibl].qs, q3, 3*QK_K/8);
- }
- }
- size_t quantize_iq3_xxs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
- (void)hist;
- GGML_ASSERT(n_per_row%QK_K == 0);
- int nblock = n_per_row/QK_K;
- char * qrow = (char *)dst;
- for (int row = 0; row < nrow; ++row) {
- quantize_row_iq3_xxs_impl(src, qrow, n_per_row, quant_weights);
- src += n_per_row;
- qrow += nblock*sizeof(block_iq3_xxs);
- }
- return nrow * nblock * sizeof(block_iq3_xxs);
- }
- void quantize_row_iq3_xxs(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK_K == 0);
- block_iq3_xxs * restrict y = vy;
- quantize_row_iq3_xxs_reference(x, y, k);
- }
- void quantize_row_iq3_xxs_reference(const float * restrict x, block_iq3_xxs * restrict y, int k) {
- assert(k % QK_K == 0);
- quantize_row_iq3_xxs_impl(x, y, k, NULL);
- }
- // =================================== 1.5 bpw ===================================================
- static int iq1_find_best_neighbour(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
- const float * restrict xval, const float * restrict weight, float * scale, int8_t * restrict L, int ngrid) {
- int num_neighbors = neighbours[0];
- GGML_ASSERT(num_neighbors > 0);
- float best_score = 0;
- int grid_index = -1;
- for (int j = 1; j <= num_neighbors; ++j) {
- const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
- float sumqx = 0, sumq2 = 0;
- for (int i = 0; i < 8; ++i) {
- float q = (pg[i] - 3)/2;
- float w = weight[i];
- sumqx += w*q*xval[i];
- sumq2 += w*q*q;
- }
- if (sumqx > 0 && sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
- *scale = sumqx/sumq2; best_score = *scale * sumqx;
- grid_index = neighbours[j];
- }
- }
- if (grid_index < 0) {
- for (int i = 0; i < ngrid; ++i) {
- const int8_t * grid_i = (const int8_t *)(grid + i);
- float sumqx = 0, sumq2 = 0;
- for (int j = 0; j < 8; ++j) {
- float w = weight[j];
- float q = (grid_i[j] - 3)/2;
- sumqx += w*q*xval[j];
- sumq2 += w*q*q;
- }
- if (sumqx > 0 && sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
- *scale = sumqx/sumq2; best_score = *scale*sumqx;
- grid_index = i;
- }
- }
- }
- if (grid_index < 0) {
- printf("Oops, did not find grid point\n");
- printf("Have %d neighbours\n", num_neighbors);
- for (int j = 1; j <= num_neighbors; ++j) {
- const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
- float sumqx = 0, sumq2 = 0;
- for (int i = 0; i < 8; ++i) {
- float q = (pg[i] - 3)/2;
- float w = weight[i];
- sumqx += w*q*xval[i];
- sumq2 += w*q*q;
- }
- printf(" neighbour %d: sumqx = %g sumq2 = %g\n", j, (double)sumqx, (double)sumq2);
- }
- }
- GGML_ASSERT(grid_index >= 0);
- //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- *scale *= 1.05f; // This is a fudge factor. Don't ask me why it improves the result.
- //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- const int8_t * pg = (const int8_t *)(grid + grid_index);
- for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
- return grid_index;
- }
- static int iq1_sort_helper(const void * left, const void * right) {
- const float * l = left;
- const float * r = right;
- return *l < *r ? -1 : *l > *r ? 1 : 0;
- }
- static void quantize_row_iq1_s_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
- const int gindex = iq2_data_index(GGML_TYPE_IQ1_S);
- const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
- const int * kmap_q2xs = iq2_data[gindex].map;
- const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
- GGML_ASSERT(quant_weights && "missing quantization weights");
- GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
- GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
- GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
- GGML_ASSERT(n%QK_K == 0);
- const int nbl = n/256;
- block_iq1_s * y = vy;
- float scales[QK_K/8];
- float weight[8];
- int8_t L[8];
- float sumx[9];
- float sumw[9];
- float pairs[16];
- int * idx = (int *)(pairs + 1);
- uint8_t hbit[QK_K/8];
- for (int ibl = 0; ibl < nbl; ++ibl) {
- y[ibl].d = GGML_FP32_TO_FP16(0.f);
- memset(y[ibl].qs, 0, QK_K/8);
- memset(y[ibl].scales, 0, QK_K/16);
- float max_scale = 0;
- const float * xbl = x + QK_K*ibl;
- float sumx2 = 0;
- for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
- float sigma2 = sumx2/QK_K;
- for (int ib = 0; ib < QK_K/8; ++ib) {
- const float * xb = xbl + 8*ib;
- const float * qw = quant_weights + QK_K*ibl + 8*ib;
- for (int i = 0; i < 8; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
- float max = fabsf(xb[0]);
- for (int i = 1; i < 8; ++i) max = MAX(max, fabsf(xb[i]));
- if (!max) {
- scales[ib] = 0;
- memset(L, 1, 8);
- continue;
- }
- // Here we solve exactly the sum of squared difference (SSD) weighted minimization problem.
- // With just 3 allowed quant values (-1, 0, 1), we can search exhaustively for the two
- // boundaries that split the weights xb[i] into 3 groups. To do so, we sort the weights
- // in ascending order, compute Si = sum[weight[j] xb[j], j = 0...i] and
- // Wi = sum[weight[j], j = 0...i], and use these to quckly get get the optimum scale
- // for each possible and score for each split.
- for (int j = 0; j < 8; ++j) {
- pairs[2*j] = xb[j];
- idx[2*j] = j;
- }
- qsort(pairs, 8, 2*sizeof(float), iq1_sort_helper);
- {
- sumx[0] = sumw[0] = 0;
- for (int j = 0; j < 8; ++j) {
- int i = idx[2*j];
- sumx[j+1] = sumx[j] + weight[i]*xb[i];
- sumw[j+1] = sumw[j] + weight[i];
- }
- }
- float best_score = 0, scale = max;
- int besti1 = 0, besti2 = 0;
- for (int i1 = 0; i1 <= 8; ++i1) {
- for (int i2 = i1; i2 <= 8; ++i2) {
- float sumqx = -(sumx[i1] - sumx[0]) + (sumx[8] - sumx[i2]);
- float sumq2 = (sumw[i1] - sumw[0]) + (sumw[8] - sumw[i2]);
- if (sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
- scale = sumqx/sumq2; best_score = scale*sumqx;
- besti1 = i1; besti2 = i2;
- }
- }
- }
- for (int j = 0; j < besti1; ++j) L[idx[2*j]] = 0;
- for (int j = besti1; j < besti2; ++j) L[idx[2*j]] = 1;
- for (int j = besti2; j < 8; ++j) L[idx[2*j]] = 2;
- if (scale < 0) {
- for (int j = 0; j < 8; ++j) L[j] = 2 - L[j];
- scale = -scale;
- }
- // Now we check if the solution found above corresponds to a grid point and, if not, use a neighbouring
- // grid point that minimizes SSD.
- uint16_t u = 0;
- for (int j = 0; j < 8; ++j) u |= (L[j] << 2*j);
- int grid_index = kmap_q2xs[u];
- if (grid_index < 0) {
- const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
- grid_index = iq1_find_best_neighbour(neighbours, kgrid_q2xs, xb, weight, &scale, L, NGRID_IQ2XXS);
- GGML_ASSERT(grid_index >= 0);
- }
- y[ibl].qs[ib] = grid_index & 255;
- hbit[ib] = grid_index >> 8;
- GGML_ASSERT(scale >= 0);
- scales[ib] = scale;
- max_scale = MAX(max_scale, scale);
- }
- if (!max_scale) {
- memset(y[ibl].qs, 0, QK_K/8);
- continue;
- }
- float d = max_scale/15;
- y[ibl].d = GGML_FP32_TO_FP16(d*1.085f); // 1.085f is another fudge factor. Don't ask me why it is needed.
- float id = 1/d;
- for (int ib = 0; ib < QK_K/8; ++ib) {
- int l = nearest_int(0.5f*(id*scales[ib]-1));
- l = MAX(0, MIN(7, l));
- if (hbit[ib]) l |= 8;
- y[ibl].scales[ib/2] |= (l << 4*(ib%2));
- }
- }
- }
- size_t quantize_iq1_s(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
- (void)hist;
- GGML_ASSERT(n_per_row%QK_K == 0);
- int nblock = n_per_row/QK_K;
- char * qrow = (char *)dst;
- for (int row = 0; row < nrow; ++row) {
- quantize_row_iq1_s_impl(src, qrow, n_per_row, quant_weights);
- src += n_per_row;
- qrow += nblock*sizeof(block_iq1_s);
- }
- return nrow * nblock * sizeof(block_iq1_s);
- }
|