ggml-quants.c 532 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643
  1. #include "ggml-quants.h"
  2. #include "ggml-impl.h"
  3. #include <math.h>
  4. #include <string.h>
  5. #include <assert.h>
  6. #include <float.h>
  7. #include <stdlib.h> // for qsort
  8. #include <stdio.h> // for GGML_ASSERT
  9. #ifdef __ARM_NEON
  10. // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
  11. //
  12. // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
  13. //
  14. #include <arm_neon.h>
  15. #else
  16. #ifdef __wasm_simd128__
  17. #include <wasm_simd128.h>
  18. #else
  19. #if defined(__POWER9_VECTOR__) || defined(__powerpc64__)
  20. #include <altivec.h>
  21. #undef bool
  22. #define bool _Bool
  23. #else
  24. #if defined(_MSC_VER) || defined(__MINGW32__)
  25. #include <intrin.h>
  26. #else
  27. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
  28. #if !defined(__riscv)
  29. #include <immintrin.h>
  30. #endif
  31. #endif
  32. #endif
  33. #endif
  34. #endif
  35. #endif
  36. #ifdef __riscv_v_intrinsic
  37. #include <riscv_vector.h>
  38. #endif
  39. #undef MIN
  40. #undef MAX
  41. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  42. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  43. #define UNUSED GGML_UNUSED
  44. #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
  45. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  46. // multiply int8_t, add results pairwise twice
  47. static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
  48. // Get absolute values of x vectors
  49. const __m128i ax = _mm_sign_epi8(x, x);
  50. // Sign the values of the y vectors
  51. const __m128i sy = _mm_sign_epi8(y, x);
  52. // Perform multiplication and create 16-bit values
  53. const __m128i dot = _mm_maddubs_epi16(ax, sy);
  54. const __m128i ones = _mm_set1_epi16(1);
  55. return _mm_madd_epi16(ones, dot);
  56. }
  57. #if __AVX__ || __AVX2__ || __AVX512F__
  58. // horizontally add 8 floats
  59. static inline float hsum_float_8(const __m256 x) {
  60. __m128 res = _mm256_extractf128_ps(x, 1);
  61. res = _mm_add_ps(res, _mm256_castps256_ps128(x));
  62. res = _mm_add_ps(res, _mm_movehl_ps(res, res));
  63. res = _mm_add_ss(res, _mm_movehdup_ps(res));
  64. return _mm_cvtss_f32(res);
  65. }
  66. // horizontally add 8 int32_t
  67. static inline int hsum_i32_8(const __m256i a) {
  68. const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
  69. const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
  70. const __m128i sum64 = _mm_add_epi32(hi64, sum128);
  71. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  72. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  73. }
  74. // horizontally add 4 int32_t
  75. static inline int hsum_i32_4(const __m128i a) {
  76. const __m128i hi64 = _mm_unpackhi_epi64(a, a);
  77. const __m128i sum64 = _mm_add_epi32(hi64, a);
  78. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  79. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  80. }
  81. #if defined(__AVX2__) || defined(__AVX512F__)
  82. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  83. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  84. uint32_t x32;
  85. memcpy(&x32, x, sizeof(uint32_t));
  86. const __m256i shuf_mask = _mm256_set_epi64x(
  87. 0x0303030303030303, 0x0202020202020202,
  88. 0x0101010101010101, 0x0000000000000000);
  89. __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
  90. const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
  91. bytes = _mm256_or_si256(bytes, bit_mask);
  92. return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1));
  93. }
  94. // Unpack 32 4-bit fields into 32 bytes
  95. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  96. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  97. {
  98. const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
  99. const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
  100. const __m256i lowMask = _mm256_set1_epi8( 0xF );
  101. return _mm256_and_si256(lowMask, bytes);
  102. }
  103. // add int16_t pairwise and return as float vector
  104. static inline __m256 sum_i16_pairs_float(const __m256i x) {
  105. const __m256i ones = _mm256_set1_epi16(1);
  106. const __m256i summed_pairs = _mm256_madd_epi16(ones, x);
  107. return _mm256_cvtepi32_ps(summed_pairs);
  108. }
  109. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  110. #if __AVXVNNI__
  111. const __m256i zero = _mm256_setzero_si256();
  112. const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy);
  113. return _mm256_cvtepi32_ps(summed_pairs);
  114. #else
  115. // Perform multiplication and create 16-bit values
  116. const __m256i dot = _mm256_maddubs_epi16(ax, sy);
  117. return sum_i16_pairs_float(dot);
  118. #endif
  119. }
  120. // multiply int8_t, add results pairwise twice and return as float vector
  121. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  122. #if __AVXVNNIINT8__
  123. const __m256i zero = _mm256_setzero_si256();
  124. const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y);
  125. return _mm256_cvtepi32_ps(summed_pairs);
  126. #else
  127. // Get absolute values of x vectors
  128. const __m256i ax = _mm256_sign_epi8(x, x);
  129. // Sign the values of the y vectors
  130. const __m256i sy = _mm256_sign_epi8(y, x);
  131. return mul_sum_us8_pairs_float(ax, sy);
  132. #endif
  133. }
  134. static inline __m128i packNibbles( __m256i bytes )
  135. {
  136. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  137. #if __AVX512F__
  138. const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000
  139. bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh
  140. return _mm256_cvtepi16_epi8(bytes); // abcd_efgh
  141. #else
  142. const __m256i lowByte = _mm256_set1_epi16( 0xFF );
  143. __m256i high = _mm256_andnot_si256( lowByte, bytes );
  144. __m256i low = _mm256_and_si256( lowByte, bytes );
  145. high = _mm256_srli_epi16( high, 4 );
  146. bytes = _mm256_or_si256( low, high );
  147. // Compress uint16_t lanes into bytes
  148. __m128i r0 = _mm256_castsi256_si128( bytes );
  149. __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
  150. return _mm_packus_epi16( r0, r1 );
  151. #endif
  152. }
  153. #elif defined(__AVX__)
  154. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  155. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  156. uint32_t x32;
  157. memcpy(&x32, x, sizeof(uint32_t));
  158. const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  159. const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202);
  160. __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl);
  161. __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh);
  162. const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe);
  163. bytesl = _mm_or_si128(bytesl, bit_mask);
  164. bytesh = _mm_or_si128(bytesh, bit_mask);
  165. bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
  166. bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
  167. return MM256_SET_M128I(bytesh, bytesl);
  168. }
  169. // Unpack 32 4-bit fields into 32 bytes
  170. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  171. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  172. {
  173. // Load 16 bytes from memory
  174. __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi);
  175. __m128i tmph = _mm_srli_epi16(tmpl, 4);
  176. const __m128i lowMask = _mm_set1_epi8(0xF);
  177. tmpl = _mm_and_si128(lowMask, tmpl);
  178. tmph = _mm_and_si128(lowMask, tmph);
  179. return MM256_SET_M128I(tmph, tmpl);
  180. }
  181. // add int16_t pairwise and return as float vector
  182. static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
  183. const __m128i ones = _mm_set1_epi16(1);
  184. const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
  185. const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
  186. const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
  187. return _mm256_cvtepi32_ps(summed_pairs);
  188. }
  189. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  190. const __m128i axl = _mm256_castsi256_si128(ax);
  191. const __m128i axh = _mm256_extractf128_si256(ax, 1);
  192. const __m128i syl = _mm256_castsi256_si128(sy);
  193. const __m128i syh = _mm256_extractf128_si256(sy, 1);
  194. // Perform multiplication and create 16-bit values
  195. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  196. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  197. return sum_i16_pairs_float(doth, dotl);
  198. }
  199. // multiply int8_t, add results pairwise twice and return as float vector
  200. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  201. const __m128i xl = _mm256_castsi256_si128(x);
  202. const __m128i xh = _mm256_extractf128_si256(x, 1);
  203. const __m128i yl = _mm256_castsi256_si128(y);
  204. const __m128i yh = _mm256_extractf128_si256(y, 1);
  205. // Get absolute values of x vectors
  206. const __m128i axl = _mm_sign_epi8(xl, xl);
  207. const __m128i axh = _mm_sign_epi8(xh, xh);
  208. // Sign the values of the y vectors
  209. const __m128i syl = _mm_sign_epi8(yl, xl);
  210. const __m128i syh = _mm_sign_epi8(yh, xh);
  211. // Perform multiplication and create 16-bit values
  212. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  213. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  214. return sum_i16_pairs_float(doth, dotl);
  215. }
  216. static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
  217. {
  218. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  219. const __m128i lowByte = _mm_set1_epi16( 0xFF );
  220. __m128i high = _mm_andnot_si128( lowByte, bytes1 );
  221. __m128i low = _mm_and_si128( lowByte, bytes1 );
  222. high = _mm_srli_epi16( high, 4 );
  223. bytes1 = _mm_or_si128( low, high );
  224. high = _mm_andnot_si128( lowByte, bytes2 );
  225. low = _mm_and_si128( lowByte, bytes2 );
  226. high = _mm_srli_epi16( high, 4 );
  227. bytes2 = _mm_or_si128( low, high );
  228. return _mm_packus_epi16( bytes1, bytes2);
  229. }
  230. #endif
  231. #elif defined(__SSSE3__)
  232. // horizontally add 4x4 floats
  233. static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
  234. __m128 res_0 =_mm_hadd_ps(a, b);
  235. __m128 res_1 =_mm_hadd_ps(c, d);
  236. __m128 res =_mm_hadd_ps(res_0, res_1);
  237. res =_mm_hadd_ps(res, res);
  238. res =_mm_hadd_ps(res, res);
  239. return _mm_cvtss_f32(res);
  240. }
  241. #endif // __AVX__ || __AVX2__ || __AVX512F__
  242. #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  243. #if defined(__ARM_NEON)
  244. #ifdef _MSC_VER
  245. #define ggml_vld1q_u32(w,x,y,z) { ((w) + ((uint64_t)(x) << 32)), ((y) + ((uint64_t)(z) << 32)) }
  246. #else
  247. #define ggml_vld1q_u32(w,x,y,z) { (w), (x), (y), (z) }
  248. #endif
  249. #if !defined(__aarch64__)
  250. // 64-bit compatibility
  251. // vaddvq_s16
  252. // vpaddq_s16
  253. // vpaddq_s32
  254. // vaddvq_s32
  255. // vaddvq_f32
  256. // vmaxvq_f32
  257. // vcvtnq_s32_f32
  258. // vzip1_u8
  259. // vzip2_u8
  260. inline static int32_t vaddvq_s16(int16x8_t v) {
  261. return
  262. (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) +
  263. (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) +
  264. (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) +
  265. (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7);
  266. }
  267. inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) {
  268. int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a));
  269. int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b));
  270. return vcombine_s16(a0, b0);
  271. }
  272. inline static int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) {
  273. int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a));
  274. int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b));
  275. return vcombine_s32(a0, b0);
  276. }
  277. inline static int32_t vaddvq_s32(int32x4_t v) {
  278. return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
  279. }
  280. inline static float vaddvq_f32(float32x4_t v) {
  281. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  282. }
  283. inline static float vmaxvq_f32(float32x4_t v) {
  284. return
  285. MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
  286. MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
  287. }
  288. inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
  289. int32x4_t res;
  290. res[0] = roundf(vgetq_lane_f32(v, 0));
  291. res[1] = roundf(vgetq_lane_f32(v, 1));
  292. res[2] = roundf(vgetq_lane_f32(v, 2));
  293. res[3] = roundf(vgetq_lane_f32(v, 3));
  294. return res;
  295. }
  296. inline static uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) {
  297. uint8x8_t res;
  298. res[0] = a[0]; res[1] = b[0];
  299. res[2] = a[1]; res[3] = b[1];
  300. res[4] = a[2]; res[5] = b[2];
  301. res[6] = a[3]; res[7] = b[3];
  302. return res;
  303. }
  304. inline static uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) {
  305. uint8x8_t res;
  306. res[0] = a[4]; res[1] = b[4];
  307. res[2] = a[5]; res[3] = b[5];
  308. res[4] = a[6]; res[5] = b[6];
  309. res[6] = a[7]; res[7] = b[7];
  310. return res;
  311. }
  312. // vld1q_s16_x2
  313. // vld1q_u8_x2
  314. // vld1q_u8_x4
  315. // vld1q_s8_x2
  316. // vld1q_s8_x4
  317. // TODO: double-check these work correctly
  318. typedef struct ggml_int16x8x2_t {
  319. int16x8_t val[2];
  320. } ggml_int16x8x2_t;
  321. inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) {
  322. ggml_int16x8x2_t res;
  323. res.val[0] = vld1q_s16(ptr + 0);
  324. res.val[1] = vld1q_s16(ptr + 8);
  325. return res;
  326. }
  327. typedef struct ggml_uint8x16x2_t {
  328. uint8x16_t val[2];
  329. } ggml_uint8x16x2_t;
  330. inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) {
  331. ggml_uint8x16x2_t res;
  332. res.val[0] = vld1q_u8(ptr + 0);
  333. res.val[1] = vld1q_u8(ptr + 16);
  334. return res;
  335. }
  336. typedef struct ggml_uint8x16x4_t {
  337. uint8x16_t val[4];
  338. } ggml_uint8x16x4_t;
  339. inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) {
  340. ggml_uint8x16x4_t res;
  341. res.val[0] = vld1q_u8(ptr + 0);
  342. res.val[1] = vld1q_u8(ptr + 16);
  343. res.val[2] = vld1q_u8(ptr + 32);
  344. res.val[3] = vld1q_u8(ptr + 48);
  345. return res;
  346. }
  347. typedef struct ggml_int8x16x2_t {
  348. int8x16_t val[2];
  349. } ggml_int8x16x2_t;
  350. inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) {
  351. ggml_int8x16x2_t res;
  352. res.val[0] = vld1q_s8(ptr + 0);
  353. res.val[1] = vld1q_s8(ptr + 16);
  354. return res;
  355. }
  356. typedef struct ggml_int8x16x4_t {
  357. int8x16_t val[4];
  358. } ggml_int8x16x4_t;
  359. inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) {
  360. ggml_int8x16x4_t res;
  361. res.val[0] = vld1q_s8(ptr + 0);
  362. res.val[1] = vld1q_s8(ptr + 16);
  363. res.val[2] = vld1q_s8(ptr + 32);
  364. res.val[3] = vld1q_s8(ptr + 48);
  365. return res;
  366. }
  367. // NOTE: not tested
  368. inline static int8x16_t ggml_vqtbl1q_s8(int8x16_t a, uint8x16_t b) {
  369. int8x16_t res;
  370. res[ 0] = a[b[ 0]];
  371. res[ 1] = a[b[ 1]];
  372. res[ 2] = a[b[ 2]];
  373. res[ 3] = a[b[ 3]];
  374. res[ 4] = a[b[ 4]];
  375. res[ 5] = a[b[ 5]];
  376. res[ 6] = a[b[ 6]];
  377. res[ 7] = a[b[ 7]];
  378. res[ 8] = a[b[ 8]];
  379. res[ 9] = a[b[ 9]];
  380. res[10] = a[b[10]];
  381. res[11] = a[b[11]];
  382. res[12] = a[b[12]];
  383. res[13] = a[b[13]];
  384. res[14] = a[b[14]];
  385. res[15] = a[b[15]];
  386. return res;
  387. }
  388. // NOTE: not tested
  389. inline static int8x16_t ggml_vqtbl1q_u8(uint8x16_t a, uint8x16_t b) {
  390. int8x16_t res;
  391. res[ 0] = a[b[ 0]];
  392. res[ 1] = a[b[ 1]];
  393. res[ 2] = a[b[ 2]];
  394. res[ 3] = a[b[ 3]];
  395. res[ 4] = a[b[ 4]];
  396. res[ 5] = a[b[ 5]];
  397. res[ 6] = a[b[ 6]];
  398. res[ 7] = a[b[ 7]];
  399. res[ 8] = a[b[ 8]];
  400. res[ 9] = a[b[ 9]];
  401. res[10] = a[b[10]];
  402. res[11] = a[b[11]];
  403. res[12] = a[b[12]];
  404. res[13] = a[b[13]];
  405. res[14] = a[b[14]];
  406. res[15] = a[b[15]];
  407. return res;
  408. }
  409. #else
  410. #define ggml_int16x8x2_t int16x8x2_t
  411. #define ggml_uint8x16x2_t uint8x16x2_t
  412. #define ggml_uint8x16x4_t uint8x16x4_t
  413. #define ggml_int8x16x2_t int8x16x2_t
  414. #define ggml_int8x16x4_t int8x16x4_t
  415. #define ggml_vld1q_s16_x2 vld1q_s16_x2
  416. #define ggml_vld1q_u8_x2 vld1q_u8_x2
  417. #define ggml_vld1q_u8_x4 vld1q_u8_x4
  418. #define ggml_vld1q_s8_x2 vld1q_s8_x2
  419. #define ggml_vld1q_s8_x4 vld1q_s8_x4
  420. #define ggml_vqtbl1q_s8 vqtbl1q_s8
  421. #define ggml_vqtbl1q_u8 vqtbl1q_u8
  422. #endif
  423. #if !defined(__ARM_FEATURE_DOTPROD)
  424. inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) {
  425. const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b));
  426. const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b));
  427. return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1)));
  428. }
  429. #else
  430. #define ggml_vdotq_s32(a, b, c) vdotq_s32(a, b, c)
  431. #endif
  432. #endif
  433. #if defined(__ARM_NEON) || defined(__wasm_simd128__)
  434. #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
  435. #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
  436. #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
  437. #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
  438. #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
  439. #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
  440. #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
  441. #define B8(c,s ) B7(c,s, c), B7(c,s, s)
  442. // precomputed tables for expanding 8bits to 8 bytes:
  443. static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
  444. static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
  445. #endif
  446. // reference implementation for deterministic creation of model files
  447. void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
  448. static const int qk = QK4_0;
  449. assert(k % qk == 0);
  450. const int nb = k / qk;
  451. for (int i = 0; i < nb; i++) {
  452. float amax = 0.0f; // absolute max
  453. float max = 0.0f;
  454. for (int j = 0; j < qk; j++) {
  455. const float v = x[i*qk + j];
  456. if (amax < fabsf(v)) {
  457. amax = fabsf(v);
  458. max = v;
  459. }
  460. }
  461. const float d = max / -8;
  462. const float id = d ? 1.0f/d : 0.0f;
  463. y[i].d = GGML_FP32_TO_FP16(d);
  464. for (int j = 0; j < qk/2; ++j) {
  465. const float x0 = x[i*qk + 0 + j]*id;
  466. const float x1 = x[i*qk + qk/2 + j]*id;
  467. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
  468. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
  469. y[i].qs[j] = xi0;
  470. y[i].qs[j] |= xi1 << 4;
  471. }
  472. }
  473. }
  474. void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
  475. quantize_row_q4_0_reference(x, y, k);
  476. }
  477. void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) {
  478. const int qk = QK4_1;
  479. assert(k % qk == 0);
  480. const int nb = k / qk;
  481. for (int i = 0; i < nb; i++) {
  482. float min = FLT_MAX;
  483. float max = -FLT_MAX;
  484. for (int j = 0; j < qk; j++) {
  485. const float v = x[i*qk + j];
  486. if (v < min) min = v;
  487. if (v > max) max = v;
  488. }
  489. const float d = (max - min) / ((1 << 4) - 1);
  490. const float id = d ? 1.0f/d : 0.0f;
  491. y[i].d = GGML_FP32_TO_FP16(d);
  492. y[i].m = GGML_FP32_TO_FP16(min);
  493. for (int j = 0; j < qk/2; ++j) {
  494. const float x0 = (x[i*qk + 0 + j] - min)*id;
  495. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  496. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
  497. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
  498. y[i].qs[j] = xi0;
  499. y[i].qs[j] |= xi1 << 4;
  500. }
  501. }
  502. }
  503. void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
  504. quantize_row_q4_1_reference(x, y, k);
  505. }
  506. void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
  507. static const int qk = QK5_0;
  508. assert(k % qk == 0);
  509. const int nb = k / qk;
  510. for (int i = 0; i < nb; i++) {
  511. float amax = 0.0f; // absolute max
  512. float max = 0.0f;
  513. for (int j = 0; j < qk; j++) {
  514. const float v = x[i*qk + j];
  515. if (amax < fabsf(v)) {
  516. amax = fabsf(v);
  517. max = v;
  518. }
  519. }
  520. const float d = max / -16;
  521. const float id = d ? 1.0f/d : 0.0f;
  522. y[i].d = GGML_FP32_TO_FP16(d);
  523. uint32_t qh = 0;
  524. for (int j = 0; j < qk/2; ++j) {
  525. const float x0 = x[i*qk + 0 + j]*id;
  526. const float x1 = x[i*qk + qk/2 + j]*id;
  527. const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
  528. const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
  529. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  530. // get the 5-th bit and store it in qh at the right position
  531. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  532. qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
  533. }
  534. memcpy(&y[i].qh, &qh, sizeof(qh));
  535. }
  536. }
  537. void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) {
  538. quantize_row_q5_0_reference(x, y, k);
  539. }
  540. void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
  541. const int qk = QK5_1;
  542. assert(k % qk == 0);
  543. const int nb = k / qk;
  544. for (int i = 0; i < nb; i++) {
  545. float min = FLT_MAX;
  546. float max = -FLT_MAX;
  547. for (int j = 0; j < qk; j++) {
  548. const float v = x[i*qk + j];
  549. if (v < min) min = v;
  550. if (v > max) max = v;
  551. }
  552. const float d = (max - min) / ((1 << 5) - 1);
  553. const float id = d ? 1.0f/d : 0.0f;
  554. y[i].d = GGML_FP32_TO_FP16(d);
  555. y[i].m = GGML_FP32_TO_FP16(min);
  556. uint32_t qh = 0;
  557. for (int j = 0; j < qk/2; ++j) {
  558. const float x0 = (x[i*qk + 0 + j] - min)*id;
  559. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  560. const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
  561. const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
  562. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  563. // get the 5-th bit and store it in qh at the right position
  564. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  565. qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
  566. }
  567. memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
  568. }
  569. }
  570. void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) {
  571. quantize_row_q5_1_reference(x, y, k);
  572. }
  573. // reference implementation for deterministic creation of model files
  574. void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
  575. assert(k % QK8_0 == 0);
  576. const int nb = k / QK8_0;
  577. for (int i = 0; i < nb; i++) {
  578. float amax = 0.0f; // absolute max
  579. for (int j = 0; j < QK8_0; j++) {
  580. const float v = x[i*QK8_0 + j];
  581. amax = MAX(amax, fabsf(v));
  582. }
  583. const float d = amax / ((1 << 7) - 1);
  584. const float id = d ? 1.0f/d : 0.0f;
  585. y[i].d = GGML_FP32_TO_FP16(d);
  586. for (int j = 0; j < QK8_0; ++j) {
  587. const float x0 = x[i*QK8_0 + j]*id;
  588. y[i].qs[j] = roundf(x0);
  589. }
  590. }
  591. }
  592. void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
  593. assert(QK8_0 == 32);
  594. assert(k % QK8_0 == 0);
  595. const int nb = k / QK8_0;
  596. block_q8_0 * restrict y = vy;
  597. #if defined(__ARM_NEON)
  598. for (int i = 0; i < nb; i++) {
  599. float32x4_t srcv [8];
  600. float32x4_t asrcv[8];
  601. float32x4_t amaxv[8];
  602. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  603. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  604. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  605. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  606. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  607. const float amax = vmaxvq_f32(amaxv[0]);
  608. const float d = amax / ((1 << 7) - 1);
  609. const float id = d ? 1.0f/d : 0.0f;
  610. y[i].d = GGML_FP32_TO_FP16(d);
  611. for (int j = 0; j < 8; j++) {
  612. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  613. const int32x4_t vi = vcvtnq_s32_f32(v);
  614. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  615. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  616. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  617. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  618. }
  619. }
  620. #elif defined(__wasm_simd128__)
  621. for (int i = 0; i < nb; i++) {
  622. v128_t srcv [8];
  623. v128_t asrcv[8];
  624. v128_t amaxv[8];
  625. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  626. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  627. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  628. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  629. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  630. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  631. wasm_f32x4_extract_lane(amaxv[0], 1)),
  632. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  633. wasm_f32x4_extract_lane(amaxv[0], 3)));
  634. const float d = amax / ((1 << 7) - 1);
  635. const float id = d ? 1.0f/d : 0.0f;
  636. y[i].d = GGML_FP32_TO_FP16(d);
  637. for (int j = 0; j < 8; j++) {
  638. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  639. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  640. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  641. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  642. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  643. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  644. }
  645. }
  646. #elif defined(__AVX2__) || defined(__AVX__)
  647. for (int i = 0; i < nb; i++) {
  648. // Load elements into 4 AVX vectors
  649. __m256 v0 = _mm256_loadu_ps( x );
  650. __m256 v1 = _mm256_loadu_ps( x + 8 );
  651. __m256 v2 = _mm256_loadu_ps( x + 16 );
  652. __m256 v3 = _mm256_loadu_ps( x + 24 );
  653. x += 32;
  654. // Compute max(abs(e)) for the block
  655. const __m256 signBit = _mm256_set1_ps( -0.0f );
  656. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  657. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  658. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  659. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  660. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  661. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  662. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  663. const float maxScalar = _mm_cvtss_f32( max4 );
  664. // Quantize these floats
  665. const float d = maxScalar / 127.f;
  666. y[i].d = GGML_FP32_TO_FP16(d);
  667. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  668. const __m256 mul = _mm256_set1_ps( id );
  669. // Apply the multiplier
  670. v0 = _mm256_mul_ps( v0, mul );
  671. v1 = _mm256_mul_ps( v1, mul );
  672. v2 = _mm256_mul_ps( v2, mul );
  673. v3 = _mm256_mul_ps( v3, mul );
  674. // Round to nearest integer
  675. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  676. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  677. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  678. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  679. // Convert floats to integers
  680. __m256i i0 = _mm256_cvtps_epi32( v0 );
  681. __m256i i1 = _mm256_cvtps_epi32( v1 );
  682. __m256i i2 = _mm256_cvtps_epi32( v2 );
  683. __m256i i3 = _mm256_cvtps_epi32( v3 );
  684. #if defined(__AVX2__)
  685. // Convert int32 to int16
  686. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  687. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  688. // Convert int16 to int8
  689. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  690. // We got our precious signed bytes, but the order is now wrong
  691. // These AVX2 pack instructions process 16-byte pieces independently
  692. // The following instruction is fixing the order
  693. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  694. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  695. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  696. #else
  697. // Since we don't have in AVX some necessary functions,
  698. // we split the registers in half and call AVX2 analogs from SSE
  699. __m128i ni0 = _mm256_castsi256_si128( i0 );
  700. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  701. __m128i ni2 = _mm256_castsi256_si128( i1 );
  702. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  703. __m128i ni4 = _mm256_castsi256_si128( i2 );
  704. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  705. __m128i ni6 = _mm256_castsi256_si128( i3 );
  706. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  707. // Convert int32 to int16
  708. ni0 = _mm_packs_epi32( ni0, ni1 );
  709. ni2 = _mm_packs_epi32( ni2, ni3 );
  710. ni4 = _mm_packs_epi32( ni4, ni5 );
  711. ni6 = _mm_packs_epi32( ni6, ni7 );
  712. // Convert int16 to int8
  713. ni0 = _mm_packs_epi16( ni0, ni2 );
  714. ni4 = _mm_packs_epi16( ni4, ni6 );
  715. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  716. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  717. #endif
  718. }
  719. #elif defined(__riscv_v_intrinsic)
  720. size_t vl = __riscv_vsetvl_e32m4(QK8_0);
  721. for (int i = 0; i < nb; i++) {
  722. // load elements
  723. vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_0, vl);
  724. vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
  725. vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl);
  726. vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
  727. float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
  728. const float d = amax / ((1 << 7) - 1);
  729. const float id = d ? 1.0f/d : 0.0f;
  730. y[i].d = GGML_FP32_TO_FP16(d);
  731. vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
  732. // convert to integer
  733. vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
  734. vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
  735. // store result
  736. __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
  737. }
  738. #else
  739. GGML_UNUSED(nb);
  740. // scalar
  741. quantize_row_q8_0_reference(x, y, k);
  742. #endif
  743. }
  744. // reference implementation for deterministic creation of model files
  745. void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
  746. assert(QK8_1 == 32);
  747. assert(k % QK8_1 == 0);
  748. const int nb = k / QK8_1;
  749. for (int i = 0; i < nb; i++) {
  750. float amax = 0.0f; // absolute max
  751. for (int j = 0; j < QK8_1; j++) {
  752. const float v = x[i*QK8_1 + j];
  753. amax = MAX(amax, fabsf(v));
  754. }
  755. const float d = amax / ((1 << 7) - 1);
  756. const float id = d ? 1.0f/d : 0.0f;
  757. y[i].d = d;
  758. int sum = 0;
  759. for (int j = 0; j < QK8_1/2; ++j) {
  760. const float v0 = x[i*QK8_1 + j]*id;
  761. const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
  762. y[i].qs[ j] = roundf(v0);
  763. y[i].qs[QK8_1/2 + j] = roundf(v1);
  764. sum += y[i].qs[ j];
  765. sum += y[i].qs[QK8_1/2 + j];
  766. }
  767. y[i].s = sum*d;
  768. }
  769. }
  770. void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) {
  771. assert(k % QK8_1 == 0);
  772. const int nb = k / QK8_1;
  773. block_q8_1 * restrict y = vy;
  774. #if defined(__ARM_NEON)
  775. for (int i = 0; i < nb; i++) {
  776. float32x4_t srcv [8];
  777. float32x4_t asrcv[8];
  778. float32x4_t amaxv[8];
  779. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  780. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  781. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  782. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  783. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  784. const float amax = vmaxvq_f32(amaxv[0]);
  785. const float d = amax / ((1 << 7) - 1);
  786. const float id = d ? 1.0f/d : 0.0f;
  787. y[i].d = d;
  788. int32x4_t accv = vdupq_n_s32(0);
  789. for (int j = 0; j < 8; j++) {
  790. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  791. const int32x4_t vi = vcvtnq_s32_f32(v);
  792. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  793. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  794. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  795. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  796. accv = vaddq_s32(accv, vi);
  797. }
  798. y[i].s = d * vaddvq_s32(accv);
  799. }
  800. #elif defined(__wasm_simd128__)
  801. for (int i = 0; i < nb; i++) {
  802. v128_t srcv [8];
  803. v128_t asrcv[8];
  804. v128_t amaxv[8];
  805. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  806. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  807. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  808. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  809. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  810. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  811. wasm_f32x4_extract_lane(amaxv[0], 1)),
  812. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  813. wasm_f32x4_extract_lane(amaxv[0], 3)));
  814. const float d = amax / ((1 << 7) - 1);
  815. const float id = d ? 1.0f/d : 0.0f;
  816. y[i].d = d;
  817. v128_t accv = wasm_i32x4_splat(0);
  818. for (int j = 0; j < 8; j++) {
  819. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  820. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  821. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  822. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  823. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  824. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  825. accv = wasm_i32x4_add(accv, vi);
  826. }
  827. y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) +
  828. wasm_i32x4_extract_lane(accv, 1) +
  829. wasm_i32x4_extract_lane(accv, 2) +
  830. wasm_i32x4_extract_lane(accv, 3));
  831. }
  832. #elif defined(__AVX2__) || defined(__AVX__)
  833. for (int i = 0; i < nb; i++) {
  834. // Load elements into 4 AVX vectors
  835. __m256 v0 = _mm256_loadu_ps( x );
  836. __m256 v1 = _mm256_loadu_ps( x + 8 );
  837. __m256 v2 = _mm256_loadu_ps( x + 16 );
  838. __m256 v3 = _mm256_loadu_ps( x + 24 );
  839. x += 32;
  840. // Compute max(abs(e)) for the block
  841. const __m256 signBit = _mm256_set1_ps( -0.0f );
  842. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  843. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  844. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  845. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  846. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  847. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  848. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  849. const float maxScalar = _mm_cvtss_f32( max4 );
  850. // Quantize these floats
  851. const float d = maxScalar / 127.f;
  852. y[i].d = d;
  853. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  854. const __m256 mul = _mm256_set1_ps( id );
  855. // Apply the multiplier
  856. v0 = _mm256_mul_ps( v0, mul );
  857. v1 = _mm256_mul_ps( v1, mul );
  858. v2 = _mm256_mul_ps( v2, mul );
  859. v3 = _mm256_mul_ps( v3, mul );
  860. // Round to nearest integer
  861. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  862. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  863. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  864. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  865. // Convert floats to integers
  866. __m256i i0 = _mm256_cvtps_epi32( v0 );
  867. __m256i i1 = _mm256_cvtps_epi32( v1 );
  868. __m256i i2 = _mm256_cvtps_epi32( v2 );
  869. __m256i i3 = _mm256_cvtps_epi32( v3 );
  870. #if defined(__AVX2__)
  871. // Compute the sum of the quants and set y[i].s
  872. y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)));
  873. // Convert int32 to int16
  874. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  875. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  876. // Convert int16 to int8
  877. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  878. // We got our precious signed bytes, but the order is now wrong
  879. // These AVX2 pack instructions process 16-byte pieces independently
  880. // The following instruction is fixing the order
  881. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  882. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  883. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  884. #else
  885. // Since we don't have in AVX some necessary functions,
  886. // we split the registers in half and call AVX2 analogs from SSE
  887. __m128i ni0 = _mm256_castsi256_si128( i0 );
  888. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  889. __m128i ni2 = _mm256_castsi256_si128( i1 );
  890. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  891. __m128i ni4 = _mm256_castsi256_si128( i2 );
  892. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  893. __m128i ni6 = _mm256_castsi256_si128( i3 );
  894. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  895. // Compute the sum of the quants and set y[i].s
  896. const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
  897. const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
  898. y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1));
  899. // Convert int32 to int16
  900. ni0 = _mm_packs_epi32( ni0, ni1 );
  901. ni2 = _mm_packs_epi32( ni2, ni3 );
  902. ni4 = _mm_packs_epi32( ni4, ni5 );
  903. ni6 = _mm_packs_epi32( ni6, ni7 );
  904. // Convert int16 to int8
  905. ni0 = _mm_packs_epi16( ni0, ni2 );
  906. ni4 = _mm_packs_epi16( ni4, ni6 );
  907. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  908. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  909. #endif
  910. }
  911. #elif defined(__riscv_v_intrinsic)
  912. size_t vl = __riscv_vsetvl_e32m4(QK8_1);
  913. for (int i = 0; i < nb; i++) {
  914. // load elements
  915. vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_1, vl);
  916. vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
  917. vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl);
  918. vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
  919. float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
  920. const float d = amax / ((1 << 7) - 1);
  921. const float id = d ? 1.0f/d : 0.0f;
  922. y[i].d = d;
  923. vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
  924. // convert to integer
  925. vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
  926. vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
  927. // store result
  928. __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
  929. // compute sum for y[i].s
  930. vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl);
  931. vint16m1_t vwrs = __riscv_vwredsum_vs_i8m1_i16m1(vs, tmp2, vl);
  932. // set y[i].s
  933. int sum = __riscv_vmv_x_s_i16m1_i16(vwrs);
  934. y[i].s = sum*d;
  935. }
  936. #else
  937. GGML_UNUSED(nb);
  938. // scalar
  939. quantize_row_q8_1_reference(x, y, k);
  940. #endif
  941. }
  942. void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) {
  943. static const int qk = QK4_0;
  944. assert(k % qk == 0);
  945. const int nb = k / qk;
  946. for (int i = 0; i < nb; i++) {
  947. const float d = GGML_FP16_TO_FP32(x[i].d);
  948. for (int j = 0; j < qk/2; ++j) {
  949. const int x0 = (x[i].qs[j] & 0x0F) - 8;
  950. const int x1 = (x[i].qs[j] >> 4) - 8;
  951. y[i*qk + j + 0 ] = x0*d;
  952. y[i*qk + j + qk/2] = x1*d;
  953. }
  954. }
  955. }
  956. void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
  957. static const int qk = QK4_1;
  958. assert(k % qk == 0);
  959. const int nb = k / qk;
  960. for (int i = 0; i < nb; i++) {
  961. const float d = GGML_FP16_TO_FP32(x[i].d);
  962. const float m = GGML_FP16_TO_FP32(x[i].m);
  963. for (int j = 0; j < qk/2; ++j) {
  964. const int x0 = (x[i].qs[j] & 0x0F);
  965. const int x1 = (x[i].qs[j] >> 4);
  966. y[i*qk + j + 0 ] = x0*d + m;
  967. y[i*qk + j + qk/2] = x1*d + m;
  968. }
  969. }
  970. }
  971. void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) {
  972. static const int qk = QK5_0;
  973. assert(k % qk == 0);
  974. const int nb = k / qk;
  975. for (int i = 0; i < nb; i++) {
  976. const float d = GGML_FP16_TO_FP32(x[i].d);
  977. uint32_t qh;
  978. memcpy(&qh, x[i].qh, sizeof(qh));
  979. for (int j = 0; j < qk/2; ++j) {
  980. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  981. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  982. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  983. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  984. y[i*qk + j + 0 ] = x0*d;
  985. y[i*qk + j + qk/2] = x1*d;
  986. }
  987. }
  988. }
  989. void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) {
  990. static const int qk = QK5_1;
  991. assert(k % qk == 0);
  992. const int nb = k / qk;
  993. for (int i = 0; i < nb; i++) {
  994. const float d = GGML_FP16_TO_FP32(x[i].d);
  995. const float m = GGML_FP16_TO_FP32(x[i].m);
  996. uint32_t qh;
  997. memcpy(&qh, x[i].qh, sizeof(qh));
  998. for (int j = 0; j < qk/2; ++j) {
  999. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  1000. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  1001. const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
  1002. const int x1 = (x[i].qs[j] >> 4) | xh_1;
  1003. y[i*qk + j + 0 ] = x0*d + m;
  1004. y[i*qk + j + qk/2] = x1*d + m;
  1005. }
  1006. }
  1007. }
  1008. void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int k) {
  1009. static const int qk = QK8_0;
  1010. assert(k % qk == 0);
  1011. const int nb = k / qk;
  1012. for (int i = 0; i < nb; i++) {
  1013. const float d = GGML_FP16_TO_FP32(x[i].d);
  1014. for (int j = 0; j < qk; ++j) {
  1015. y[i*qk + j] = x[i].qs[j]*d;
  1016. }
  1017. }
  1018. }
  1019. //
  1020. // 2-6 bit quantization in super-blocks
  1021. //
  1022. //
  1023. // ===================== Helper functions
  1024. //
  1025. static inline int nearest_int(float fval) {
  1026. assert(fval <= 4194303.f);
  1027. float val = fval + 12582912.f;
  1028. int i; memcpy(&i, &val, sizeof(int));
  1029. return (i & 0x007fffff) - 0x00400000;
  1030. }
  1031. static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, int rmse_type,
  1032. const float * restrict qw) {
  1033. float max = 0;
  1034. float amax = 0;
  1035. for (int i = 0; i < n; ++i) {
  1036. float ax = fabsf(x[i]);
  1037. if (ax > amax) { amax = ax; max = x[i]; }
  1038. }
  1039. if (amax < 1e-30f) { // all zero
  1040. for (int i = 0; i < n; ++i) {
  1041. L[i] = 0;
  1042. }
  1043. return 0.f;
  1044. }
  1045. float iscale = -nmax / max;
  1046. if (rmse_type == 0) {
  1047. for (int i = 0; i < n; ++i) {
  1048. int l = nearest_int(iscale * x[i]);
  1049. L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
  1050. }
  1051. return 1/iscale;
  1052. }
  1053. bool return_early = false;
  1054. if (rmse_type < 0) {
  1055. rmse_type = -rmse_type;
  1056. return_early = true;
  1057. }
  1058. float sumlx = 0;
  1059. float suml2 = 0;
  1060. #ifdef HAVE_BUGGY_APPLE_LINKER
  1061. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  1062. for (volatile int i = 0; i < n; ++i) {
  1063. #else
  1064. for (int i = 0; i < n; ++i) {
  1065. #endif
  1066. int l = nearest_int(iscale * x[i]);
  1067. l = MAX(-nmax, MIN(nmax-1, l));
  1068. L[i] = l + nmax;
  1069. float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i]));
  1070. sumlx += w*x[i]*l;
  1071. suml2 += w*l*l;
  1072. }
  1073. float scale = sumlx/suml2;
  1074. if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale;
  1075. float best = scale * sumlx;
  1076. for (int is = -9; is <= 9; ++is) {
  1077. if (is == 0) {
  1078. continue;
  1079. }
  1080. iscale = -(nmax + 0.1f*is) / max;
  1081. sumlx = suml2 = 0;
  1082. for (int i = 0; i < n; ++i) {
  1083. int l = nearest_int(iscale * x[i]);
  1084. l = MAX(-nmax, MIN(nmax-1, l));
  1085. float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i]));
  1086. sumlx += w*x[i]*l;
  1087. suml2 += w*l*l;
  1088. }
  1089. if (suml2 > 0 && sumlx*sumlx > best*suml2) {
  1090. for (int i = 0; i < n; ++i) {
  1091. int l = nearest_int(iscale * x[i]);
  1092. L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
  1093. }
  1094. scale = sumlx/suml2; best = scale*sumlx;
  1095. }
  1096. }
  1097. return scale;
  1098. }
  1099. static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, bool do_rmse) {
  1100. float max = 0;
  1101. float amax = 0;
  1102. for (int i = 0; i < n; ++i) {
  1103. float ax = fabsf(x[i]);
  1104. if (ax > amax) { amax = ax; max = x[i]; }
  1105. }
  1106. if (!amax) { // all zero
  1107. for (int i = 0; i < n; ++i) { L[i] = 0; }
  1108. return 0.f;
  1109. }
  1110. float iscale = -nmax / max;
  1111. if (do_rmse) {
  1112. float sumlx = 0;
  1113. float suml2 = 0;
  1114. for (int i = 0; i < n; ++i) {
  1115. int l = nearest_int(iscale * x[i]);
  1116. l = MAX(-nmax, MIN(nmax-1, l));
  1117. L[i] = l;
  1118. float w = x[i]*x[i];
  1119. sumlx += w*x[i]*l;
  1120. suml2 += w*l*l;
  1121. }
  1122. for (int itry = 0; itry < 5; ++itry) {
  1123. int n_changed = 0;
  1124. for (int i = 0; i < n; ++i) {
  1125. float w = x[i]*x[i];
  1126. float slx = sumlx - w*x[i]*L[i];
  1127. if (slx > 0) {
  1128. float sl2 = suml2 - w*L[i]*L[i];
  1129. int new_l = nearest_int(x[i] * sl2 / slx);
  1130. new_l = MAX(-nmax, MIN(nmax-1, new_l));
  1131. if (new_l != L[i]) {
  1132. slx += w*x[i]*new_l;
  1133. sl2 += w*new_l*new_l;
  1134. if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) {
  1135. L[i] = new_l; sumlx = slx; suml2 = sl2;
  1136. ++n_changed;
  1137. }
  1138. }
  1139. }
  1140. }
  1141. if (!n_changed) {
  1142. break;
  1143. }
  1144. }
  1145. for (int i = 0; i < n; ++i) {
  1146. L[i] += nmax;
  1147. }
  1148. return sumlx / suml2;
  1149. }
  1150. for (int i = 0; i < n; ++i) {
  1151. int l = nearest_int(iscale * x[i]);
  1152. l = MAX(-nmax, MIN(nmax-1, l));
  1153. L[i] = l + nmax;
  1154. }
  1155. return 1/iscale;
  1156. }
  1157. static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min,
  1158. int ntry, float alpha) {
  1159. float min = x[0];
  1160. float max = x[0];
  1161. for (int i = 1; i < n; ++i) {
  1162. if (x[i] < min) min = x[i];
  1163. if (x[i] > max) max = x[i];
  1164. }
  1165. if (max == min) {
  1166. for (int i = 0; i < n; ++i) L[i] = 0;
  1167. *the_min = 0;
  1168. return 0.f;
  1169. }
  1170. if (min > 0) min = 0;
  1171. float iscale = nmax/(max - min);
  1172. float scale = 1/iscale;
  1173. for (int itry = 0; itry < ntry; ++itry) {
  1174. float sumlx = 0; int suml2 = 0;
  1175. bool did_change = false;
  1176. for (int i = 0; i < n; ++i) {
  1177. int l = nearest_int(iscale*(x[i] - min));
  1178. l = MAX(0, MIN(nmax, l));
  1179. if (l != L[i]) {
  1180. L[i] = l;
  1181. did_change = true;
  1182. }
  1183. sumlx += (x[i] - min)*l;
  1184. suml2 += l*l;
  1185. }
  1186. scale = sumlx/suml2;
  1187. float sum = 0;
  1188. for (int i = 0; i < n; ++i) {
  1189. sum += x[i] - scale*L[i];
  1190. }
  1191. min = alpha*min + (1 - alpha)*sum/n;
  1192. if (min > 0) min = 0;
  1193. iscale = 1/scale;
  1194. if (!did_change) break;
  1195. }
  1196. *the_min = -min;
  1197. return scale;
  1198. }
  1199. static float make_qkx2_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
  1200. uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
  1201. float rmin, float rdelta, int nstep, bool use_mad) {
  1202. float min = x[0];
  1203. float max = x[0];
  1204. float sum_w = weights[0];
  1205. float sum_x = sum_w * x[0];
  1206. #ifdef HAVE_BUGGY_APPLE_LINKER
  1207. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  1208. for (volatile int i = 1; i < n; ++i) {
  1209. #else
  1210. for (int i = 1; i < n; ++i) {
  1211. #endif
  1212. if (x[i] < min) min = x[i];
  1213. if (x[i] > max) max = x[i];
  1214. float w = weights[i];
  1215. sum_w += w;
  1216. sum_x += w * x[i];
  1217. }
  1218. if (min > 0) min = 0;
  1219. if (max == min) {
  1220. for (int i = 0; i < n; ++i) L[i] = 0;
  1221. *the_min = -min;
  1222. return 0.f;
  1223. }
  1224. float iscale = nmax/(max - min);
  1225. float scale = 1/iscale;
  1226. float best_mad = 0;
  1227. for (int i = 0; i < n; ++i) {
  1228. int l = nearest_int(iscale*(x[i] - min));
  1229. L[i] = MAX(0, MIN(nmax, l));
  1230. float diff = scale * L[i] + min - x[i];
  1231. diff = use_mad ? fabsf(diff) : diff * diff;
  1232. float w = weights[i];
  1233. best_mad += w * diff;
  1234. }
  1235. if (nstep < 1) {
  1236. *the_min = -min;
  1237. return scale;
  1238. }
  1239. for (int is = 0; is <= nstep; ++is) {
  1240. iscale = (rmin + rdelta*is + nmax)/(max - min);
  1241. float sum_l = 0, sum_l2 = 0, sum_xl = 0;
  1242. for (int i = 0; i < n; ++i) {
  1243. int l = nearest_int(iscale*(x[i] - min));
  1244. l = MAX(0, MIN(nmax, l));
  1245. Laux[i] = l;
  1246. float w = weights[i];
  1247. sum_l += w*l;
  1248. sum_l2 += w*l*l;
  1249. sum_xl += w*l*x[i];
  1250. }
  1251. float D = sum_w * sum_l2 - sum_l * sum_l;
  1252. if (D > 0) {
  1253. float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
  1254. float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
  1255. if (this_min > 0) {
  1256. this_min = 0;
  1257. this_scale = sum_xl / sum_l2;
  1258. }
  1259. float mad = 0;
  1260. for (int i = 0; i < n; ++i) {
  1261. float diff = this_scale * Laux[i] + this_min - x[i];
  1262. diff = use_mad ? fabsf(diff) : diff * diff;
  1263. float w = weights[i];
  1264. mad += w * diff;
  1265. }
  1266. if (mad < best_mad) {
  1267. for (int i = 0; i < n; ++i) {
  1268. L[i] = Laux[i];
  1269. }
  1270. best_mad = mad;
  1271. scale = this_scale;
  1272. min = this_min;
  1273. }
  1274. }
  1275. }
  1276. *the_min = -min;
  1277. return scale;
  1278. }
  1279. #if QK_K == 256
  1280. static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) {
  1281. if (j < 4) {
  1282. *d = q[j] & 63; *m = q[j + 4] & 63;
  1283. } else {
  1284. *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
  1285. *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
  1286. }
  1287. }
  1288. #endif
  1289. //========================- 2-bit (de)-quantization
  1290. void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k) {
  1291. assert(k % QK_K == 0);
  1292. const int nb = k / QK_K;
  1293. uint8_t L[QK_K];
  1294. uint8_t Laux[16];
  1295. float weights[16];
  1296. float mins[QK_K/16];
  1297. float scales[QK_K/16];
  1298. const float q4scale = 15.f;
  1299. for (int i = 0; i < nb; i++) {
  1300. float max_scale = 0; // as we are deducting the min, scales are always positive
  1301. float max_min = 0;
  1302. for (int j = 0; j < QK_K/16; ++j) {
  1303. for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]);
  1304. scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true);
  1305. float scale = scales[j];
  1306. if (scale > max_scale) {
  1307. max_scale = scale;
  1308. }
  1309. float min = mins[j];
  1310. if (min > max_min) {
  1311. max_min = min;
  1312. }
  1313. }
  1314. if (max_scale > 0) {
  1315. float iscale = q4scale/max_scale;
  1316. for (int j = 0; j < QK_K/16; ++j) {
  1317. int l = nearest_int(iscale*scales[j]);
  1318. y[i].scales[j] = l;
  1319. }
  1320. y[i].d = GGML_FP32_TO_FP16(max_scale/q4scale);
  1321. } else {
  1322. for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0;
  1323. y[i].d = GGML_FP32_TO_FP16(0.f);
  1324. }
  1325. if (max_min > 0) {
  1326. float iscale = q4scale/max_min;
  1327. for (int j = 0; j < QK_K/16; ++j) {
  1328. int l = nearest_int(iscale*mins[j]);
  1329. y[i].scales[j] |= (l << 4);
  1330. }
  1331. y[i].dmin = GGML_FP32_TO_FP16(max_min/q4scale);
  1332. } else {
  1333. y[i].dmin = GGML_FP32_TO_FP16(0.f);
  1334. }
  1335. for (int j = 0; j < QK_K/16; ++j) {
  1336. const float d = GGML_FP16_TO_FP32(y[i].d) * (y[i].scales[j] & 0xF);
  1337. if (!d) continue;
  1338. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * (y[i].scales[j] >> 4);
  1339. for (int ii = 0; ii < 16; ++ii) {
  1340. int l = nearest_int((x[16*j + ii] + dm)/d);
  1341. l = MAX(0, MIN(3, l));
  1342. L[16*j + ii] = l;
  1343. }
  1344. }
  1345. #if QK_K == 256
  1346. for (int j = 0; j < QK_K; j += 128) {
  1347. for (int l = 0; l < 32; ++l) {
  1348. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1349. }
  1350. }
  1351. #else
  1352. for (int l = 0; l < 16; ++l) {
  1353. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1354. }
  1355. #endif
  1356. x += QK_K;
  1357. }
  1358. }
  1359. void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k) {
  1360. assert(k % QK_K == 0);
  1361. const int nb = k / QK_K;
  1362. for (int i = 0; i < nb; i++) {
  1363. const float d = GGML_FP16_TO_FP32(x[i].d);
  1364. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  1365. const uint8_t * q = x[i].qs;
  1366. #if QK_K == 256
  1367. int is = 0;
  1368. float dl, ml;
  1369. for (int n = 0; n < QK_K; n += 128) {
  1370. int shift = 0;
  1371. for (int j = 0; j < 4; ++j) {
  1372. uint8_t sc = x[i].scales[is++];
  1373. dl = d * (sc & 0xF); ml = min * (sc >> 4);
  1374. for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml;
  1375. sc = x[i].scales[is++];
  1376. dl = d * (sc & 0xF); ml = min * (sc >> 4);
  1377. for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml;
  1378. shift += 2;
  1379. }
  1380. q += 32;
  1381. }
  1382. #else
  1383. float dl1 = d * (x[i].scales[0] & 0xF), ml1 = min * (x[i].scales[0] >> 4);
  1384. float dl2 = d * (x[i].scales[1] & 0xF), ml2 = min * (x[i].scales[1] >> 4);
  1385. float dl3 = d * (x[i].scales[2] & 0xF), ml3 = min * (x[i].scales[2] >> 4);
  1386. float dl4 = d * (x[i].scales[3] & 0xF), ml4 = min * (x[i].scales[3] >> 4);
  1387. for (int l = 0; l < 16; ++l) {
  1388. y[l+ 0] = dl1 * ((int8_t)((q[l] >> 0) & 3)) - ml1;
  1389. y[l+16] = dl2 * ((int8_t)((q[l] >> 2) & 3)) - ml2;
  1390. y[l+32] = dl3 * ((int8_t)((q[l] >> 4) & 3)) - ml3;
  1391. y[l+48] = dl4 * ((int8_t)((q[l] >> 6) & 3)) - ml4;
  1392. }
  1393. y += QK_K;
  1394. #endif
  1395. }
  1396. }
  1397. void quantize_row_q2_K(const float * restrict x, void * restrict vy, int k) {
  1398. quantize_row_q2_K_reference(x, vy, k);
  1399. }
  1400. size_t ggml_quantize_q2_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
  1401. (void)hist; // TODO: collect histograms
  1402. for (int j = 0; j < n; j += k) {
  1403. block_q2_K * restrict y = (block_q2_K *)dst + j/QK_K;
  1404. quantize_row_q2_K_reference(src + j, y, k);
  1405. }
  1406. return (n/QK_K*sizeof(block_q2_K));
  1407. }
  1408. static float make_qkx3_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
  1409. uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
  1410. float rmin, float rdelta, int nstep, bool use_mad) {
  1411. float min = x[0];
  1412. float max = x[0];
  1413. float sum_w = weights ? weights[0] : x[0]*x[0];
  1414. float sum_x = sum_w * x[0];
  1415. #ifdef HAVE_BUGGY_APPLE_LINKER
  1416. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  1417. for (volatile int i = 1; i < n; ++i) {
  1418. #else
  1419. for (int i = 1; i < n; ++i) {
  1420. #endif
  1421. if (x[i] < min) min = x[i];
  1422. if (x[i] > max) max = x[i];
  1423. float w = weights ? weights[i] : x[i]*x[i];
  1424. sum_w += w;
  1425. sum_x += w * x[i];
  1426. }
  1427. if (min > 0) {
  1428. min = 0;
  1429. }
  1430. if (max <= min) {
  1431. memset(L, 0, n);
  1432. *the_min = -min;
  1433. return 0.f;
  1434. }
  1435. float iscale = nmax/(max - min);
  1436. float scale = 1/iscale;
  1437. float best_mad = 0;
  1438. for (int i = 0; i < n; ++i) {
  1439. int l = nearest_int(iscale*(x[i] - min));
  1440. L[i] = MAX(0, MIN(nmax, l));
  1441. float diff = scale * L[i] + min - x[i];
  1442. diff = use_mad ? fabsf(diff) : diff*diff;
  1443. float w = weights ? weights[i] : x[i]*x[i];
  1444. best_mad += w * diff;
  1445. }
  1446. if (nstep < 1) {
  1447. *the_min = -min;
  1448. return scale;
  1449. }
  1450. for (int is = 0; is <= nstep; ++is) {
  1451. iscale = (rmin + rdelta*is + nmax)/(max - min);
  1452. float sum_l = 0, sum_l2 = 0, sum_xl = 0;
  1453. for (int i = 0; i < n; ++i) {
  1454. int l = nearest_int(iscale*(x[i] - min));
  1455. l = MAX(0, MIN(nmax, l));
  1456. Laux[i] = l;
  1457. float w = weights ? weights[i] : x[i]*x[i];
  1458. sum_l += w*l;
  1459. sum_l2 += w*l*l;
  1460. sum_xl += w*l*x[i];
  1461. }
  1462. float D = sum_w * sum_l2 - sum_l * sum_l;
  1463. if (D > 0) {
  1464. float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
  1465. float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
  1466. if (this_min > 0) {
  1467. this_min = 0;
  1468. this_scale = sum_xl / sum_l2;
  1469. }
  1470. float mad = 0;
  1471. for (int i = 0; i < n; ++i) {
  1472. float diff = this_scale * Laux[i] + this_min - x[i];
  1473. diff = use_mad ? fabsf(diff) : diff*diff;
  1474. float w = weights ? weights[i] : x[i]*x[i];
  1475. mad += w * diff;
  1476. }
  1477. if (mad < best_mad) {
  1478. for (int i = 0; i < n; ++i) {
  1479. L[i] = Laux[i];
  1480. }
  1481. best_mad = mad;
  1482. scale = this_scale;
  1483. min = this_min;
  1484. }
  1485. }
  1486. }
  1487. *the_min = -min;
  1488. return scale;
  1489. }
  1490. static float make_qp_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, const float * quant_weights) {
  1491. float max = 0;
  1492. for (int i = 0; i < n; ++i) {
  1493. max = MAX(max, x[i]);
  1494. }
  1495. if (!max) { // all zero
  1496. for (int i = 0; i < n; ++i) { L[i] = 0; }
  1497. return 0.f;
  1498. }
  1499. float iscale = nmax / max;
  1500. for (int i = 0; i < n; ++i) {
  1501. L[i] = nearest_int(iscale * x[i]);
  1502. }
  1503. float scale = 1/iscale;
  1504. float best_mse = 0;
  1505. for (int i = 0; i < n; ++i) {
  1506. float diff = x[i] - scale*L[i];
  1507. float w = quant_weights[i];
  1508. best_mse += w*diff*diff;
  1509. }
  1510. for (int is = -4; is <= 4; ++is) {
  1511. if (is == 0) continue;
  1512. float iscale_is = (0.1f*is + nmax)/max;
  1513. float scale_is = 1/iscale_is;
  1514. float mse = 0;
  1515. for (int i = 0; i < n; ++i) {
  1516. int l = nearest_int(iscale_is*x[i]);
  1517. l = MIN(nmax, l);
  1518. float diff = x[i] - scale_is*l;
  1519. float w = quant_weights[i];
  1520. mse += w*diff*diff;
  1521. }
  1522. if (mse < best_mse) {
  1523. best_mse = mse;
  1524. iscale = iscale_is;
  1525. }
  1526. }
  1527. float sumlx = 0;
  1528. float suml2 = 0;
  1529. for (int i = 0; i < n; ++i) {
  1530. int l = nearest_int(iscale * x[i]);
  1531. l = MIN(nmax, l);
  1532. L[i] = l;
  1533. float w = quant_weights[i];
  1534. sumlx += w*x[i]*l;
  1535. suml2 += w*l*l;
  1536. }
  1537. for (int itry = 0; itry < 5; ++itry) {
  1538. int n_changed = 0;
  1539. for (int i = 0; i < n; ++i) {
  1540. float w = quant_weights[i];
  1541. float slx = sumlx - w*x[i]*L[i];
  1542. float sl2 = suml2 - w*L[i]*L[i];
  1543. if (slx > 0 && sl2 > 0) {
  1544. int new_l = nearest_int(x[i] * sl2 / slx);
  1545. new_l = MIN(nmax, new_l);
  1546. if (new_l != L[i]) {
  1547. slx += w*x[i]*new_l;
  1548. sl2 += w*new_l*new_l;
  1549. if (slx*slx*suml2 > sumlx*sumlx*sl2) {
  1550. L[i] = new_l; sumlx = slx; suml2 = sl2;
  1551. ++n_changed;
  1552. }
  1553. }
  1554. }
  1555. }
  1556. if (!n_changed) {
  1557. break;
  1558. }
  1559. }
  1560. return sumlx / suml2;
  1561. }
  1562. static void quantize_row_q2_K_impl(const float * restrict x, block_q2_K * restrict y, int k, const float * restrict quant_weights) {
  1563. GGML_ASSERT(quant_weights);
  1564. assert(k % QK_K == 0);
  1565. const int nb = k / QK_K;
  1566. const bool requantize = true;
  1567. uint8_t L[QK_K];
  1568. uint8_t Laux[16];
  1569. float mins[QK_K/16];
  1570. float scales[QK_K/16];
  1571. float sw[QK_K/16];
  1572. float weight[16];
  1573. uint8_t Ls[QK_K/16], Lm[QK_K/16];
  1574. for (int i = 0; i < nb; i++) {
  1575. memset(sw, 0, QK_K/16*sizeof(float));
  1576. float sumx2 = 0;
  1577. for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j];
  1578. float sigma2 = sumx2/QK_K;
  1579. for (int j = 0; j < QK_K/16; ++j) {
  1580. const float * restrict qw = quant_weights + QK_K * i + 16*j;
  1581. for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j + l]*x[16*j + l]);
  1582. for (int l = 0; l < QK_K/16; ++l) sw[j] += weight[l];
  1583. scales[j] = make_qkx3_quants(16, 3, x + 16*j, weight, L + 16*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  1584. }
  1585. float dm, mm;
  1586. #if QK_K == 64
  1587. float max_scale = 0, max_min = 0;
  1588. for (int j = 0; j < QK_K/16; ++j) {
  1589. max_scale = MAX(max_scale, scales[j]);
  1590. max_min = MAX(max_min, mins[j]);
  1591. }
  1592. dm = max_scale/15;
  1593. mm = max_min/15;
  1594. if (max_scale) {
  1595. float id = 1/dm;
  1596. for (int j = 0; j < QK_K/16; ++j) {
  1597. int l = nearest_int(id*scales[j]);
  1598. Ls[j] = MAX(0, MIN(15, l));
  1599. }
  1600. } else {
  1601. memset(Ls, 0, QK_K/16);
  1602. }
  1603. if (max_min) {
  1604. float id = 1/mm;
  1605. for (int j = 0; j < QK_K/16; ++j) {
  1606. int l = nearest_int(id*mins[j]);
  1607. Lm[j] = MAX(0, MIN(15, l));
  1608. }
  1609. } else {
  1610. memset(Lm, 0, QK_K/16);
  1611. }
  1612. #else
  1613. dm = make_qp_quants(QK_K/16, 15, scales, Ls, sw);
  1614. mm = make_qp_quants(QK_K/16, 15, mins, Lm, sw);
  1615. #endif
  1616. y[i].d = GGML_FP32_TO_FP16(dm);
  1617. y[i].dmin = GGML_FP32_TO_FP16(mm);
  1618. dm = GGML_FP16_TO_FP32(y[i].d);
  1619. mm = GGML_FP16_TO_FP32(y[i].dmin);
  1620. for (int j = 0; j < QK_K/16; ++j) {
  1621. y[i].scales[j] = Ls[j] | (Lm[j] << 4);
  1622. }
  1623. if (requantize) {
  1624. for (int j = 0; j < QK_K/16; ++j) {
  1625. const float d = dm * (y[i].scales[j] & 0xF);
  1626. if (!d) continue;
  1627. const float m = mm * (y[i].scales[j] >> 4);
  1628. for (int ii = 0; ii < 16; ++ii) {
  1629. int l = nearest_int((x[16*j + ii] + m)/d);
  1630. l = MAX(0, MIN(3, l));
  1631. L[16*j + ii] = l;
  1632. }
  1633. }
  1634. }
  1635. #if QK_K == 256
  1636. for (int j = 0; j < QK_K; j += 128) {
  1637. for (int l = 0; l < 32; ++l) {
  1638. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1639. }
  1640. }
  1641. #else
  1642. for (int l = 0; l < 16; ++l) {
  1643. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1644. }
  1645. #endif
  1646. x += QK_K;
  1647. }
  1648. }
  1649. size_t quantize_q2_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  1650. (void)hist;
  1651. size_t row_size = ggml_row_size(GGML_TYPE_Q2_K, n_per_row);
  1652. if (!quant_weights) {
  1653. quantize_row_q2_K_reference(src, dst, nrow*n_per_row);
  1654. }
  1655. else {
  1656. char * qrow = (char *)dst;
  1657. for (int row = 0; row < nrow; ++row) {
  1658. quantize_row_q2_K_impl(src, (block_q2_K*)qrow, n_per_row, quant_weights);
  1659. src += n_per_row;
  1660. qrow += row_size;
  1661. }
  1662. }
  1663. return nrow * row_size;
  1664. }
  1665. //========================= 3-bit (de)-quantization
  1666. void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k) {
  1667. assert(k % QK_K == 0);
  1668. const int nb = k / QK_K;
  1669. int8_t L[QK_K];
  1670. float scales[QK_K / 16];
  1671. for (int i = 0; i < nb; i++) {
  1672. float max_scale = 0;
  1673. float amax = 0;
  1674. for (int j = 0; j < QK_K/16; ++j) {
  1675. scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true);
  1676. float scale = fabsf(scales[j]);
  1677. if (scale > amax) {
  1678. amax = scale; max_scale = scales[j];
  1679. }
  1680. }
  1681. #if QK_K == 256
  1682. memset(y[i].scales, 0, 12);
  1683. if (max_scale) {
  1684. float iscale = -32.f/max_scale;
  1685. for (int j = 0; j < QK_K/16; ++j) {
  1686. int8_t l = nearest_int(iscale*scales[j]);
  1687. l = MAX(-32, MIN(31, l)) + 32;
  1688. if (j < 8) {
  1689. y[i].scales[j] = l & 0xF;
  1690. } else {
  1691. y[i].scales[j-8] |= ((l & 0xF) << 4);
  1692. }
  1693. l >>= 4;
  1694. y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
  1695. }
  1696. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  1697. } else {
  1698. y[i].d = GGML_FP32_TO_FP16(0.f);
  1699. }
  1700. int8_t sc;
  1701. for (int j = 0; j < QK_K/16; ++j) {
  1702. sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
  1703. sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
  1704. float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1705. if (!d) {
  1706. continue;
  1707. }
  1708. for (int ii = 0; ii < 16; ++ii) {
  1709. int l = nearest_int(x[16*j + ii]/d);
  1710. l = MAX(-4, MIN(3, l));
  1711. L[16*j + ii] = l + 4;
  1712. }
  1713. }
  1714. #else
  1715. if (max_scale) {
  1716. float iscale = -8.f/max_scale;
  1717. for (int j = 0; j < QK_K/16; j+=2) {
  1718. int l1 = nearest_int(iscale*scales[j]);
  1719. l1 = 8 + MAX(-8, MIN(7, l1));
  1720. int l2 = nearest_int(iscale*scales[j+1]);
  1721. l2 = 8 + MAX(-8, MIN(7, l2));
  1722. y[i].scales[j/2] = l1 | (l2 << 4);
  1723. }
  1724. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  1725. } else {
  1726. for (int j = 0; j < QK_K/16; j+=2) {
  1727. y[i].scales[j/2] = 0;
  1728. }
  1729. y[i].d = GGML_FP32_TO_FP16(0.f);
  1730. }
  1731. for (int j = 0; j < QK_K/16; ++j) {
  1732. int s = j%2 == 0 ? y[i].scales[j/2] & 0xF : y[i].scales[j/2] >> 4;
  1733. float d = GGML_FP16_TO_FP32(y[i].d) * (s - 8);
  1734. if (!d) {
  1735. continue;
  1736. }
  1737. for (int ii = 0; ii < 16; ++ii) {
  1738. int l = nearest_int(x[16*j + ii]/d);
  1739. l = MAX(-4, MIN(3, l));
  1740. L[16*j + ii] = l + 4;
  1741. }
  1742. }
  1743. #endif
  1744. memset(y[i].hmask, 0, QK_K/8);
  1745. // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
  1746. int m = 0;
  1747. uint8_t hm = 1;
  1748. for (int j = 0; j < QK_K; ++j) {
  1749. if (L[j] > 3) {
  1750. y[i].hmask[m] |= hm;
  1751. L[j] -= 4;
  1752. }
  1753. if (++m == QK_K/8) {
  1754. m = 0; hm <<= 1;
  1755. }
  1756. }
  1757. #if QK_K == 256
  1758. for (int j = 0; j < QK_K; j += 128) {
  1759. for (int l = 0; l < 32; ++l) {
  1760. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1761. }
  1762. }
  1763. #else
  1764. for (int l = 0; l < 16; ++l) {
  1765. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1766. }
  1767. #endif
  1768. x += QK_K;
  1769. }
  1770. }
  1771. #if QK_K == 256
  1772. void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
  1773. assert(k % QK_K == 0);
  1774. const int nb = k / QK_K;
  1775. const uint32_t kmask1 = 0x03030303;
  1776. const uint32_t kmask2 = 0x0f0f0f0f;
  1777. uint32_t aux[4];
  1778. const int8_t * scales = (const int8_t*)aux;
  1779. for (int i = 0; i < nb; i++) {
  1780. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  1781. const uint8_t * restrict q = x[i].qs;
  1782. const uint8_t * restrict hm = x[i].hmask;
  1783. uint8_t m = 1;
  1784. memcpy(aux, x[i].scales, 12);
  1785. uint32_t tmp = aux[2];
  1786. aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
  1787. aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
  1788. aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
  1789. aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
  1790. int is = 0;
  1791. float dl;
  1792. for (int n = 0; n < QK_K; n += 128) {
  1793. int shift = 0;
  1794. for (int j = 0; j < 4; ++j) {
  1795. dl = d_all * (scales[is++] - 32);
  1796. for (int l = 0; l < 16; ++l) {
  1797. *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4));
  1798. }
  1799. dl = d_all * (scales[is++] - 32);
  1800. for (int l = 0; l < 16; ++l) {
  1801. *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4));
  1802. }
  1803. shift += 2;
  1804. m <<= 1;
  1805. }
  1806. q += 32;
  1807. }
  1808. }
  1809. }
  1810. #else
  1811. void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
  1812. assert(k % QK_K == 0);
  1813. assert(QK_K == 64);
  1814. const int nb = k / QK_K;
  1815. for (int i = 0; i < nb; i++) {
  1816. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  1817. const uint8_t * restrict q = x[i].qs;
  1818. const uint8_t * restrict hm = x[i].hmask;
  1819. const float d1 = d_all * ((x[i].scales[0] & 0xF) - 8);
  1820. const float d2 = d_all * ((x[i].scales[0] >> 4) - 8);
  1821. const float d3 = d_all * ((x[i].scales[1] & 0xF) - 8);
  1822. const float d4 = d_all * ((x[i].scales[1] >> 4) - 8);
  1823. for (int l=0; l<8; ++l) {
  1824. uint8_t h = hm[l];
  1825. y[l+ 0] = d1 * ((int8_t)((q[l+0] >> 0) & 3) - ((h & 0x01) ? 0 : 4));
  1826. y[l+ 8] = d1 * ((int8_t)((q[l+8] >> 0) & 3) - ((h & 0x02) ? 0 : 4));
  1827. y[l+16] = d2 * ((int8_t)((q[l+0] >> 2) & 3) - ((h & 0x04) ? 0 : 4));
  1828. y[l+24] = d2 * ((int8_t)((q[l+8] >> 2) & 3) - ((h & 0x08) ? 0 : 4));
  1829. y[l+32] = d3 * ((int8_t)((q[l+0] >> 4) & 3) - ((h & 0x10) ? 0 : 4));
  1830. y[l+40] = d3 * ((int8_t)((q[l+8] >> 4) & 3) - ((h & 0x20) ? 0 : 4));
  1831. y[l+48] = d4 * ((int8_t)((q[l+0] >> 6) & 3) - ((h & 0x40) ? 0 : 4));
  1832. y[l+56] = d4 * ((int8_t)((q[l+8] >> 6) & 3) - ((h & 0x80) ? 0 : 4));
  1833. }
  1834. y += QK_K;
  1835. }
  1836. }
  1837. #endif
  1838. void quantize_row_q3_K(const float * restrict x, void * restrict vy, int k) {
  1839. quantize_row_q3_K_reference(x, vy, k);
  1840. }
  1841. size_t ggml_quantize_q3_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
  1842. (void)hist; // TODO: collect histograms
  1843. for (int j = 0; j < n; j += k) {
  1844. block_q3_K * restrict y = (block_q3_K *)dst + j/QK_K;
  1845. quantize_row_q3_K_reference(src + j, y, k);
  1846. }
  1847. return (n/QK_K*sizeof(block_q3_K));
  1848. }
  1849. static void quantize_row_q3_K_impl(const float * restrict x, block_q3_K * restrict y, int n_per_row, const float * restrict quant_weights) {
  1850. #if QK_K != 256
  1851. (void)quant_weights;
  1852. quantize_row_q3_K_reference(x, y, n_per_row);
  1853. #else
  1854. assert(n_per_row % QK_K == 0);
  1855. const int nb = n_per_row / QK_K;
  1856. int8_t L[QK_K];
  1857. float scales[QK_K / 16];
  1858. float weight[16];
  1859. float sw[QK_K / 16];
  1860. int8_t Ls[QK_K / 16];
  1861. for (int i = 0; i < nb; i++) {
  1862. float sumx2 = 0;
  1863. for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j];
  1864. float sigma2 = 2*sumx2/QK_K;
  1865. for (int j = 0; j < QK_K/16; ++j) {
  1866. if (quant_weights) {
  1867. const float * qw = quant_weights ? quant_weights + QK_K * i + 16*j : NULL;
  1868. for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j+l]*x[16*j+l]);
  1869. } else {
  1870. for (int l = 0; l < 16; ++l) weight[l] = x[16*j+l]*x[16*j+l];
  1871. }
  1872. float sumw = 0;
  1873. for (int l = 0; l < 16; ++l) sumw += weight[l];
  1874. sw[j] = sumw;
  1875. scales[j] = make_qx_quants(16, 4, x + 16*j, L + 16*j, 1, weight);
  1876. }
  1877. memset(y[i].scales, 0, 12);
  1878. float d_block = make_qx_quants(QK_K/16, 32, scales, Ls, 1, sw);
  1879. for (int j = 0; j < QK_K/16; ++j) {
  1880. int l = Ls[j];
  1881. if (j < 8) {
  1882. y[i].scales[j] = l & 0xF;
  1883. } else {
  1884. y[i].scales[j-8] |= ((l & 0xF) << 4);
  1885. }
  1886. l >>= 4;
  1887. y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
  1888. }
  1889. y[i].d = GGML_FP32_TO_FP16(d_block);
  1890. int8_t sc;
  1891. for (int j = 0; j < QK_K/16; ++j) {
  1892. sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
  1893. sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
  1894. float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1895. if (!d) {
  1896. continue;
  1897. }
  1898. for (int ii = 0; ii < 16; ++ii) {
  1899. int l = nearest_int(x[16*j + ii]/d);
  1900. l = MAX(-4, MIN(3, l));
  1901. L[16*j + ii] = l + 4;
  1902. }
  1903. }
  1904. memset(y[i].hmask, 0, QK_K/8);
  1905. // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
  1906. int m = 0;
  1907. uint8_t hm = 1;
  1908. for (int j = 0; j < QK_K; ++j) {
  1909. if (L[j] > 3) {
  1910. y[i].hmask[m] |= hm;
  1911. L[j] -= 4;
  1912. }
  1913. if (++m == QK_K/8) {
  1914. m = 0; hm <<= 1;
  1915. }
  1916. }
  1917. for (int j = 0; j < QK_K; j += 128) {
  1918. for (int l = 0; l < 32; ++l) {
  1919. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1920. }
  1921. }
  1922. x += QK_K;
  1923. }
  1924. #endif
  1925. }
  1926. size_t quantize_q3_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  1927. (void)hist;
  1928. size_t row_size = ggml_row_size(GGML_TYPE_Q3_K, n_per_row);
  1929. if (!quant_weights) {
  1930. quantize_row_q3_K_reference(src, dst, nrow*n_per_row);
  1931. }
  1932. else {
  1933. char * qrow = (char *)dst;
  1934. for (int row = 0; row < nrow; ++row) {
  1935. quantize_row_q3_K_impl(src, (block_q3_K*)qrow, n_per_row, quant_weights);
  1936. src += n_per_row;
  1937. qrow += row_size;
  1938. }
  1939. }
  1940. return nrow * row_size;
  1941. }
  1942. // ====================== 4-bit (de)-quantization
  1943. void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k) {
  1944. assert(k % QK_K == 0);
  1945. const int nb = k / QK_K;
  1946. uint8_t L[QK_K];
  1947. uint8_t Laux[32];
  1948. float weights[32];
  1949. float mins[QK_K/32];
  1950. float scales[QK_K/32];
  1951. for (int i = 0; i < nb; i++) {
  1952. float max_scale = 0; // as we are deducting the min, scales are always positive
  1953. float max_min = 0;
  1954. for (int j = 0; j < QK_K/32; ++j) {
  1955. //scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
  1956. float sum_x2 = 0;
  1957. for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
  1958. float av_x = sqrtf(sum_x2/32);
  1959. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  1960. scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false);
  1961. float scale = scales[j];
  1962. if (scale > max_scale) {
  1963. max_scale = scale;
  1964. }
  1965. float min = mins[j];
  1966. if (min > max_min) {
  1967. max_min = min;
  1968. }
  1969. }
  1970. #if QK_K == 256
  1971. float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
  1972. float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
  1973. for (int j = 0; j < QK_K/32; ++j) {
  1974. uint8_t ls = nearest_int(inv_scale*scales[j]);
  1975. uint8_t lm = nearest_int(inv_min*mins[j]);
  1976. ls = MIN(63, ls);
  1977. lm = MIN(63, lm);
  1978. if (j < 4) {
  1979. y[i].scales[j] = ls;
  1980. y[i].scales[j+4] = lm;
  1981. } else {
  1982. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  1983. y[i].scales[j-4] |= ((ls >> 4) << 6);
  1984. y[i].scales[j-0] |= ((lm >> 4) << 6);
  1985. }
  1986. }
  1987. y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
  1988. y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
  1989. uint8_t sc, m;
  1990. for (int j = 0; j < QK_K/32; ++j) {
  1991. get_scale_min_k4(j, y[i].scales, &sc, &m);
  1992. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1993. if (!d) continue;
  1994. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  1995. for (int ii = 0; ii < 32; ++ii) {
  1996. int l = nearest_int((x[32*j + ii] + dm)/d);
  1997. l = MAX(0, MIN(15, l));
  1998. L[32*j + ii] = l;
  1999. }
  2000. }
  2001. #else
  2002. const float s_factor = 15.f;
  2003. float inv_scale = max_scale > 0 ? s_factor/max_scale : 0.f;
  2004. float inv_min = max_min > 0 ? s_factor/max_min : 0.f;
  2005. int d1 = nearest_int(inv_scale*scales[0]);
  2006. int m1 = nearest_int(inv_min*mins[0]);
  2007. int d2 = nearest_int(inv_scale*scales[1]);
  2008. int m2 = nearest_int(inv_min*mins[1]);
  2009. y[i].scales[0] = d1 | (m1 << 4);
  2010. y[i].scales[1] = d2 | (m2 << 4);
  2011. y[i].d[0] = GGML_FP32_TO_FP16(max_scale/s_factor);
  2012. y[i].d[1] = GGML_FP32_TO_FP16(max_min/s_factor);
  2013. float sumlx = 0;
  2014. int suml2 = 0;
  2015. for (int j = 0; j < QK_K/32; ++j) {
  2016. const uint8_t sd = y[i].scales[j] & 0xF;
  2017. const uint8_t sm = y[i].scales[j] >> 4;
  2018. const float d = GGML_FP16_TO_FP32(y[i].d[0]) * sd;
  2019. if (!d) continue;
  2020. const float m = GGML_FP16_TO_FP32(y[i].d[1]) * sm;
  2021. for (int ii = 0; ii < 32; ++ii) {
  2022. int l = nearest_int((x[32*j + ii] + m)/d);
  2023. l = MAX(0, MIN(15, l));
  2024. L[32*j + ii] = l;
  2025. sumlx += (x[32*j + ii] + m)*l*sd;
  2026. suml2 += l*l*sd*sd;
  2027. }
  2028. }
  2029. if (suml2) {
  2030. y[i].d[0] = GGML_FP32_TO_FP16(sumlx/suml2);
  2031. }
  2032. #endif
  2033. uint8_t * q = y[i].qs;
  2034. for (int j = 0; j < QK_K; j += 64) {
  2035. for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
  2036. q += 32;
  2037. }
  2038. x += QK_K;
  2039. }
  2040. }
  2041. void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k) {
  2042. assert(k % QK_K == 0);
  2043. const int nb = k / QK_K;
  2044. for (int i = 0; i < nb; i++) {
  2045. const uint8_t * q = x[i].qs;
  2046. #if QK_K == 256
  2047. const float d = GGML_FP16_TO_FP32(x[i].d);
  2048. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  2049. int is = 0;
  2050. uint8_t sc, m;
  2051. for (int j = 0; j < QK_K; j += 64) {
  2052. get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
  2053. const float d1 = d * sc; const float m1 = min * m;
  2054. get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
  2055. const float d2 = d * sc; const float m2 = min * m;
  2056. for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1;
  2057. for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2;
  2058. q += 32; is += 2;
  2059. }
  2060. #else
  2061. const float dall = GGML_FP16_TO_FP32(x[i].d[0]);
  2062. const float mall = GGML_FP16_TO_FP32(x[i].d[1]);
  2063. const float d1 = dall * (x[i].scales[0] & 0xF), m1 = mall * (x[i].scales[0] >> 4);
  2064. const float d2 = dall * (x[i].scales[1] & 0xF), m2 = mall * (x[i].scales[1] >> 4);
  2065. for (int l = 0; l < 32; ++l) {
  2066. y[l+ 0] = d1 * (q[l] & 0xF) - m1;
  2067. y[l+32] = d2 * (q[l] >> 4) - m2;
  2068. }
  2069. y += QK_K;
  2070. #endif
  2071. }
  2072. }
  2073. void quantize_row_q4_K(const float * restrict x, void * restrict vy, int k) {
  2074. assert(k % QK_K == 0);
  2075. block_q4_K * restrict y = vy;
  2076. quantize_row_q4_K_reference(x, y, k);
  2077. }
  2078. size_t ggml_quantize_q4_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
  2079. assert(k % QK_K == 0);
  2080. (void)hist; // TODO: collect histograms
  2081. for (int j = 0; j < n; j += k) {
  2082. block_q4_K * restrict y = (block_q4_K *)dst + j/QK_K;
  2083. quantize_row_q4_K_reference(src + j, y, k);
  2084. }
  2085. return (n/QK_K*sizeof(block_q4_K));
  2086. }
  2087. static void quantize_row_q4_K_impl(const float * restrict x, block_q4_K * restrict y, int n_per_row, const float * quant_weights) {
  2088. #if QK_K != 256
  2089. (void)quant_weights;
  2090. quantize_row_q4_K_reference(x, y, n_per_row);
  2091. #else
  2092. assert(n_per_row % QK_K == 0);
  2093. const int nb = n_per_row / QK_K;
  2094. uint8_t L[QK_K];
  2095. uint8_t Laux[32];
  2096. uint8_t Ls[QK_K/32];
  2097. uint8_t Lm[QK_K/32];
  2098. float weights[32];
  2099. float sw[QK_K/32];
  2100. float mins[QK_K/32];
  2101. float scales[QK_K/32];
  2102. for (int i = 0; i < nb; i++) {
  2103. float sum_x2 = 0;
  2104. for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l];
  2105. float sigma2 = 2*sum_x2/QK_K;
  2106. float av_x = sqrtf(sigma2);
  2107. for (int j = 0; j < QK_K/32; ++j) {
  2108. if (quant_weights) {
  2109. const float * qw = quant_weights + QK_K*i + 32*j;
  2110. for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]);
  2111. } else {
  2112. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  2113. }
  2114. float sumw = 0;
  2115. for (int l = 0; l < 32; ++l) sumw += weights[l];
  2116. sw[j] = sumw;
  2117. scales[j] = make_qkx3_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  2118. }
  2119. float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw);
  2120. float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw);
  2121. for (int j = 0; j < QK_K/32; ++j) {
  2122. uint8_t ls = Ls[j];
  2123. uint8_t lm = Lm[j];
  2124. if (j < 4) {
  2125. y[i].scales[j] = ls;
  2126. y[i].scales[j+4] = lm;
  2127. } else {
  2128. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  2129. y[i].scales[j-4] |= ((ls >> 4) << 6);
  2130. y[i].scales[j-0] |= ((lm >> 4) << 6);
  2131. }
  2132. }
  2133. y[i].d = GGML_FP32_TO_FP16(d_block);
  2134. y[i].dmin = GGML_FP32_TO_FP16(m_block);
  2135. uint8_t sc, m;
  2136. for (int j = 0; j < QK_K/32; ++j) {
  2137. get_scale_min_k4(j, y[i].scales, &sc, &m);
  2138. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  2139. if (!d) continue;
  2140. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  2141. for (int ii = 0; ii < 32; ++ii) {
  2142. int l = nearest_int((x[32*j + ii] + dm)/d);
  2143. l = MAX(0, MIN(15, l));
  2144. L[32*j + ii] = l;
  2145. }
  2146. }
  2147. uint8_t * q = y[i].qs;
  2148. for (int j = 0; j < QK_K; j += 64) {
  2149. for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
  2150. q += 32;
  2151. }
  2152. x += QK_K;
  2153. }
  2154. #endif
  2155. }
  2156. size_t quantize_q4_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2157. (void)hist;
  2158. size_t row_size = ggml_row_size(GGML_TYPE_Q4_K, n_per_row);
  2159. if (!quant_weights) {
  2160. quantize_row_q4_K_reference(src, dst, nrow*n_per_row);
  2161. }
  2162. else {
  2163. char * qrow = (char *)dst;
  2164. for (int row = 0; row < nrow; ++row) {
  2165. quantize_row_q4_K_impl(src, (block_q4_K*)qrow, n_per_row, quant_weights);
  2166. src += n_per_row;
  2167. qrow += row_size;
  2168. }
  2169. }
  2170. return nrow * row_size;
  2171. }
  2172. // ====================== 5-bit (de)-quantization
  2173. void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k) {
  2174. assert(k % QK_K == 0);
  2175. const int nb = k / QK_K;
  2176. #if QK_K == 256
  2177. uint8_t L[QK_K];
  2178. float mins[QK_K/32];
  2179. float scales[QK_K/32];
  2180. float weights[32];
  2181. uint8_t Laux[32];
  2182. #else
  2183. int8_t L[QK_K];
  2184. float scales[QK_K/16];
  2185. #endif
  2186. for (int i = 0; i < nb; i++) {
  2187. #if QK_K == 256
  2188. float max_scale = 0; // as we are deducting the min, scales are always positive
  2189. float max_min = 0;
  2190. for (int j = 0; j < QK_K/32; ++j) {
  2191. //scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
  2192. float sum_x2 = 0;
  2193. for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
  2194. float av_x = sqrtf(sum_x2/32);
  2195. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  2196. scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false);
  2197. float scale = scales[j];
  2198. if (scale > max_scale) {
  2199. max_scale = scale;
  2200. }
  2201. float min = mins[j];
  2202. if (min > max_min) {
  2203. max_min = min;
  2204. }
  2205. }
  2206. float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
  2207. float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
  2208. for (int j = 0; j < QK_K/32; ++j) {
  2209. uint8_t ls = nearest_int(inv_scale*scales[j]);
  2210. uint8_t lm = nearest_int(inv_min*mins[j]);
  2211. ls = MIN(63, ls);
  2212. lm = MIN(63, lm);
  2213. if (j < 4) {
  2214. y[i].scales[j] = ls;
  2215. y[i].scales[j+4] = lm;
  2216. } else {
  2217. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  2218. y[i].scales[j-4] |= ((ls >> 4) << 6);
  2219. y[i].scales[j-0] |= ((lm >> 4) << 6);
  2220. }
  2221. }
  2222. y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
  2223. y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
  2224. uint8_t sc, m;
  2225. for (int j = 0; j < QK_K/32; ++j) {
  2226. get_scale_min_k4(j, y[i].scales, &sc, &m);
  2227. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  2228. if (!d) continue;
  2229. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  2230. for (int ii = 0; ii < 32; ++ii) {
  2231. int l = nearest_int((x[32*j + ii] + dm)/d);
  2232. l = MAX(0, MIN(31, l));
  2233. L[32*j + ii] = l;
  2234. }
  2235. }
  2236. uint8_t * restrict qh = y[i].qh;
  2237. uint8_t * restrict ql = y[i].qs;
  2238. memset(qh, 0, QK_K/8);
  2239. uint8_t m1 = 1, m2 = 2;
  2240. for (int n = 0; n < QK_K; n += 64) {
  2241. for (int j = 0; j < 32; ++j) {
  2242. int l1 = L[n + j];
  2243. if (l1 > 15) {
  2244. l1 -= 16; qh[j] |= m1;
  2245. }
  2246. int l2 = L[n + j + 32];
  2247. if (l2 > 15) {
  2248. l2 -= 16; qh[j] |= m2;
  2249. }
  2250. ql[j] = l1 | (l2 << 4);
  2251. }
  2252. m1 <<= 2; m2 <<= 2;
  2253. ql += 32;
  2254. }
  2255. #else
  2256. float max_scale = 0, amax = 0;
  2257. for (int j = 0; j < QK_K/16; ++j) {
  2258. scales[j] = make_qx_quants(16, 16, x + 16*j, L + 16*j, 1, NULL);
  2259. float abs_scale = fabsf(scales[j]);
  2260. if (abs_scale > amax) {
  2261. amax = abs_scale;
  2262. max_scale = scales[j];
  2263. }
  2264. }
  2265. float iscale = -128.f/max_scale;
  2266. for (int j = 0; j < QK_K/16; ++j) {
  2267. int l = nearest_int(iscale*scales[j]);
  2268. y[i].scales[j] = MAX(-128, MIN(127, l));
  2269. }
  2270. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2271. for (int j = 0; j < QK_K/16; ++j) {
  2272. const float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2273. if (!d) continue;
  2274. for (int ii = 0; ii < 16; ++ii) {
  2275. int l = nearest_int(x[16*j + ii]/d);
  2276. l = MAX(-16, MIN(15, l));
  2277. L[16*j + ii] = l + 16;
  2278. }
  2279. }
  2280. uint8_t * restrict qh = y[i].qh;
  2281. uint8_t * restrict ql = y[i].qs;
  2282. memset(qh, 0, QK_K/8);
  2283. for (int j = 0; j < 32; ++j) {
  2284. int jm = j%8;
  2285. int is = j/8;
  2286. int l1 = L[j];
  2287. if (l1 > 15) {
  2288. l1 -= 16; qh[jm] |= (1 << is);
  2289. }
  2290. int l2 = L[j + 32];
  2291. if (l2 > 15) {
  2292. l2 -= 16; qh[jm] |= (1 << (4 + is));
  2293. }
  2294. ql[j] = l1 | (l2 << 4);
  2295. }
  2296. #endif
  2297. x += QK_K;
  2298. }
  2299. }
  2300. void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k) {
  2301. assert(k % QK_K == 0);
  2302. const int nb = k / QK_K;
  2303. for (int i = 0; i < nb; i++) {
  2304. const uint8_t * ql = x[i].qs;
  2305. const uint8_t * qh = x[i].qh;
  2306. #if QK_K == 256
  2307. const float d = GGML_FP16_TO_FP32(x[i].d);
  2308. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  2309. int is = 0;
  2310. uint8_t sc, m;
  2311. uint8_t u1 = 1, u2 = 2;
  2312. for (int j = 0; j < QK_K; j += 64) {
  2313. get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
  2314. const float d1 = d * sc; const float m1 = min * m;
  2315. get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
  2316. const float d2 = d * sc; const float m2 = min * m;
  2317. for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1;
  2318. for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2;
  2319. ql += 32; is += 2;
  2320. u1 <<= 2; u2 <<= 2;
  2321. }
  2322. #else
  2323. float d = GGML_FP16_TO_FP32(x[i].d);
  2324. const int8_t * restrict s = x[i].scales;
  2325. for (int l = 0; l < 8; ++l) {
  2326. y[l+ 0] = d * s[0] * ((ql[l+ 0] & 0xF) - (qh[l] & 0x01 ? 0 : 16));
  2327. y[l+ 8] = d * s[0] * ((ql[l+ 8] & 0xF) - (qh[l] & 0x02 ? 0 : 16));
  2328. y[l+16] = d * s[1] * ((ql[l+16] & 0xF) - (qh[l] & 0x04 ? 0 : 16));
  2329. y[l+24] = d * s[1] * ((ql[l+24] & 0xF) - (qh[l] & 0x08 ? 0 : 16));
  2330. y[l+32] = d * s[2] * ((ql[l+ 0] >> 4) - (qh[l] & 0x10 ? 0 : 16));
  2331. y[l+40] = d * s[2] * ((ql[l+ 8] >> 4) - (qh[l] & 0x20 ? 0 : 16));
  2332. y[l+48] = d * s[3] * ((ql[l+16] >> 4) - (qh[l] & 0x40 ? 0 : 16));
  2333. y[l+56] = d * s[3] * ((ql[l+24] >> 4) - (qh[l] & 0x80 ? 0 : 16));
  2334. }
  2335. y += QK_K;
  2336. #endif
  2337. }
  2338. }
  2339. void quantize_row_q5_K(const float * restrict x, void * restrict vy, int k) {
  2340. assert(k % QK_K == 0);
  2341. block_q5_K * restrict y = vy;
  2342. quantize_row_q5_K_reference(x, y, k);
  2343. }
  2344. size_t ggml_quantize_q5_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
  2345. assert(k % QK_K == 0);
  2346. (void)hist; // TODO: collect histograms
  2347. for (int j = 0; j < n; j += k) {
  2348. block_q5_K * restrict y = (block_q5_K *)dst + j/QK_K;
  2349. quantize_row_q5_K_reference(src + j, y, k);
  2350. }
  2351. return (n/QK_K*sizeof(block_q5_K));
  2352. }
  2353. static void quantize_row_q5_K_impl(const float * restrict x, block_q5_K * restrict y, int n_per_row, const float * quant_weights) {
  2354. #if QK_K != 256
  2355. (void)quant_weights;
  2356. quantize_row_q5_K_reference(x, y, n_per_row);
  2357. #else
  2358. assert(n_per_row % QK_K == 0);
  2359. const int nb = n_per_row / QK_K;
  2360. uint8_t L[QK_K];
  2361. uint8_t Laux[32];
  2362. uint8_t Ls[QK_K/32];
  2363. uint8_t Lm[QK_K/32];
  2364. float mins[QK_K/32];
  2365. float scales[QK_K/32];
  2366. float sw[QK_K/32];
  2367. float weights[32];
  2368. for (int i = 0; i < nb; i++) {
  2369. float sum_x2 = 0;
  2370. for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l];
  2371. float sigma2 = 2*sum_x2/QK_K;
  2372. float av_x = sqrtf(sigma2);
  2373. for (int j = 0; j < QK_K/32; ++j) {
  2374. if (quant_weights) {
  2375. const float * qw = quant_weights + QK_K*i + 32*j;
  2376. for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]);
  2377. } else {
  2378. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  2379. }
  2380. float sumw = 0;
  2381. for (int l = 0; l < 32; ++l) sumw += weights[l];
  2382. sw[j] = sumw;
  2383. scales[j] = make_qkx3_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  2384. }
  2385. float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw);
  2386. float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw);
  2387. for (int j = 0; j < QK_K/32; ++j) {
  2388. uint8_t ls = Ls[j];
  2389. uint8_t lm = Lm[j];
  2390. ls = MIN(63, ls);
  2391. lm = MIN(63, lm);
  2392. if (j < 4) {
  2393. y[i].scales[j] = ls;
  2394. y[i].scales[j+4] = lm;
  2395. } else {
  2396. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  2397. y[i].scales[j-4] |= ((ls >> 4) << 6);
  2398. y[i].scales[j-0] |= ((lm >> 4) << 6);
  2399. }
  2400. }
  2401. y[i].d = GGML_FP32_TO_FP16(d_block);
  2402. y[i].dmin = GGML_FP32_TO_FP16(m_block);
  2403. uint8_t sc, m;
  2404. for (int j = 0; j < QK_K/32; ++j) {
  2405. get_scale_min_k4(j, y[i].scales, &sc, &m);
  2406. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  2407. if (!d) continue;
  2408. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  2409. for (int ii = 0; ii < 32; ++ii) {
  2410. int l = nearest_int((x[32*j + ii] + dm)/d);
  2411. l = MAX(0, MIN(31, l));
  2412. L[32*j + ii] = l;
  2413. }
  2414. }
  2415. uint8_t * restrict qh = y[i].qh;
  2416. uint8_t * restrict ql = y[i].qs;
  2417. memset(qh, 0, QK_K/8);
  2418. uint8_t m1 = 1, m2 = 2;
  2419. for (int n = 0; n < QK_K; n += 64) {
  2420. for (int j = 0; j < 32; ++j) {
  2421. int l1 = L[n + j];
  2422. if (l1 > 15) {
  2423. l1 -= 16; qh[j] |= m1;
  2424. }
  2425. int l2 = L[n + j + 32];
  2426. if (l2 > 15) {
  2427. l2 -= 16; qh[j] |= m2;
  2428. }
  2429. ql[j] = l1 | (l2 << 4);
  2430. }
  2431. m1 <<= 2; m2 <<= 2;
  2432. ql += 32;
  2433. }
  2434. x += QK_K;
  2435. }
  2436. #endif
  2437. }
  2438. size_t quantize_q5_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2439. (void)hist;
  2440. size_t row_size = ggml_row_size(GGML_TYPE_Q5_K, n_per_row);
  2441. if (!quant_weights) {
  2442. quantize_row_q5_K_reference(src, dst, nrow*n_per_row);
  2443. }
  2444. else {
  2445. char * qrow = (char *)dst;
  2446. for (int row = 0; row < nrow; ++row) {
  2447. quantize_row_q5_K_impl(src, (block_q5_K*)qrow, n_per_row, quant_weights);
  2448. src += n_per_row;
  2449. qrow += row_size;
  2450. }
  2451. }
  2452. return nrow * row_size;
  2453. }
  2454. // ====================== 6-bit (de)-quantization
  2455. void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k) {
  2456. assert(k % QK_K == 0);
  2457. const int nb = k / QK_K;
  2458. int8_t L[QK_K];
  2459. float scales[QK_K/16];
  2460. for (int i = 0; i < nb; i++) {
  2461. float max_scale = 0;
  2462. float max_abs_scale = 0;
  2463. for (int ib = 0; ib < QK_K/16; ++ib) {
  2464. const float scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL);
  2465. scales[ib] = scale;
  2466. const float abs_scale = fabsf(scale);
  2467. if (abs_scale > max_abs_scale) {
  2468. max_abs_scale = abs_scale;
  2469. max_scale = scale;
  2470. }
  2471. }
  2472. if (!max_abs_scale) {
  2473. memset(&y[i], 0, sizeof(block_q6_K));
  2474. y[i].d = GGML_FP32_TO_FP16(0.f);
  2475. x += QK_K;
  2476. continue;
  2477. }
  2478. float iscale = -128.f/max_scale;
  2479. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2480. for (int ib = 0; ib < QK_K/16; ++ib) {
  2481. y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
  2482. }
  2483. for (int j = 0; j < QK_K/16; ++j) {
  2484. float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2485. if (!d) {
  2486. continue;
  2487. }
  2488. for (int ii = 0; ii < 16; ++ii) {
  2489. int l = nearest_int(x[16*j + ii]/d);
  2490. l = MAX(-32, MIN(31, l));
  2491. L[16*j + ii] = l + 32;
  2492. }
  2493. }
  2494. uint8_t * restrict ql = y[i].ql;
  2495. uint8_t * restrict qh = y[i].qh;
  2496. #if QK_K == 256
  2497. for (int j = 0; j < QK_K; j += 128) {
  2498. for (int l = 0; l < 32; ++l) {
  2499. const uint8_t q1 = L[j + l + 0] & 0xF;
  2500. const uint8_t q2 = L[j + l + 32] & 0xF;
  2501. const uint8_t q3 = L[j + l + 64] & 0xF;
  2502. const uint8_t q4 = L[j + l + 96] & 0xF;
  2503. ql[l+ 0] = q1 | (q3 << 4);
  2504. ql[l+32] = q2 | (q4 << 4);
  2505. qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
  2506. }
  2507. ql += 64;
  2508. qh += 32;
  2509. }
  2510. #else
  2511. for (int l = 0; l < 32; ++l) {
  2512. const uint8_t q1 = L[l + 0] & 0xF;
  2513. const uint8_t q2 = L[l + 32] & 0xF;
  2514. ql[l] = q1 | (q2 << 4);
  2515. }
  2516. for (int l = 0; l < 16; ++l) {
  2517. qh[l] = (L[l] >> 4) | ((L[l + 16] >> 4) << 2) | ((L[l + 32] >> 4) << 4) | ((L[l + 48] >> 4) << 6);
  2518. }
  2519. #endif
  2520. x += QK_K;
  2521. }
  2522. }
  2523. void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k) {
  2524. assert(k % QK_K == 0);
  2525. const int nb = k / QK_K;
  2526. for (int i = 0; i < nb; i++) {
  2527. const float d = GGML_FP16_TO_FP32(x[i].d);
  2528. const uint8_t * restrict ql = x[i].ql;
  2529. const uint8_t * restrict qh = x[i].qh;
  2530. const int8_t * restrict sc = x[i].scales;
  2531. #if QK_K == 256
  2532. for (int n = 0; n < QK_K; n += 128) {
  2533. for (int l = 0; l < 32; ++l) {
  2534. int is = l/16;
  2535. const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  2536. const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  2537. const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  2538. const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  2539. y[l + 0] = d * sc[is + 0] * q1;
  2540. y[l + 32] = d * sc[is + 2] * q2;
  2541. y[l + 64] = d * sc[is + 4] * q3;
  2542. y[l + 96] = d * sc[is + 6] * q4;
  2543. }
  2544. y += 128;
  2545. ql += 64;
  2546. qh += 32;
  2547. sc += 8;
  2548. }
  2549. #else
  2550. for (int l = 0; l < 16; ++l) {
  2551. const int8_t q1 = (int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  2552. const int8_t q2 = (int8_t)((ql[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  2553. const int8_t q3 = (int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  2554. const int8_t q4 = (int8_t)((ql[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  2555. y[l+ 0] = d * sc[0] * q1;
  2556. y[l+16] = d * sc[1] * q2;
  2557. y[l+32] = d * sc[2] * q3;
  2558. y[l+48] = d * sc[3] * q4;
  2559. }
  2560. y += 64;
  2561. #endif
  2562. }
  2563. }
  2564. void quantize_row_q6_K(const float * restrict x, void * restrict vy, int k) {
  2565. assert(k % QK_K == 0);
  2566. block_q6_K * restrict y = vy;
  2567. quantize_row_q6_K_reference(x, y, k);
  2568. }
  2569. size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist) {
  2570. assert(k % QK_K == 0);
  2571. (void)hist; // TODO: collect histograms
  2572. for (int j = 0; j < n; j += k) {
  2573. block_q6_K * restrict y = (block_q6_K *)dst + j/QK_K;
  2574. quantize_row_q6_K_reference(src + j, y, k);
  2575. }
  2576. return (n/QK_K*sizeof(block_q6_K));
  2577. }
  2578. static void quantize_row_q6_K_impl(const float * restrict x, block_q6_K * restrict y, int n_per_row, const float * quant_weights) {
  2579. #if QK_K != 256
  2580. (void)quant_weights;
  2581. quantize_row_q6_K_reference(x, y, n_per_row);
  2582. #else
  2583. assert(n_per_row % QK_K == 0);
  2584. const int nb = n_per_row / QK_K;
  2585. int8_t L[QK_K];
  2586. float scales[QK_K/16];
  2587. //float weights[16];
  2588. for (int i = 0; i < nb; i++) {
  2589. //float sum_x2 = 0;
  2590. //for (int j = 0; j < QK_K; ++j) sum_x2 += x[j]*x[j];
  2591. //float sigma2 = sum_x2/QK_K;
  2592. float max_scale = 0;
  2593. float max_abs_scale = 0;
  2594. for (int ib = 0; ib < QK_K/16; ++ib) {
  2595. float scale;
  2596. if (quant_weights) {
  2597. const float * qw = quant_weights + QK_K*i + 16*ib;
  2598. //for (int j = 0; j < 16; ++j) weights[j] = qw[j] * sqrtf(sigma2 + x[16*ib + j]*x[16*ib + j]);
  2599. //scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, weights);
  2600. scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, qw);
  2601. } else {
  2602. scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL);
  2603. }
  2604. scales[ib] = scale;
  2605. const float abs_scale = fabsf(scale);
  2606. if (abs_scale > max_abs_scale) {
  2607. max_abs_scale = abs_scale;
  2608. max_scale = scale;
  2609. }
  2610. }
  2611. if (!max_abs_scale) {
  2612. memset(&y[i], 0, sizeof(block_q6_K));
  2613. y[i].d = GGML_FP32_TO_FP16(0.f);
  2614. x += QK_K;
  2615. continue;
  2616. }
  2617. float iscale = -128.f/max_scale;
  2618. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2619. for (int ib = 0; ib < QK_K/16; ++ib) {
  2620. y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
  2621. }
  2622. for (int j = 0; j < QK_K/16; ++j) {
  2623. float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2624. if (!d) {
  2625. continue;
  2626. }
  2627. for (int ii = 0; ii < 16; ++ii) {
  2628. int l = nearest_int(x[16*j + ii]/d);
  2629. l = MAX(-32, MIN(31, l));
  2630. L[16*j + ii] = l + 32;
  2631. }
  2632. }
  2633. uint8_t * restrict ql = y[i].ql;
  2634. uint8_t * restrict qh = y[i].qh;
  2635. for (int j = 0; j < QK_K; j += 128) {
  2636. for (int l = 0; l < 32; ++l) {
  2637. const uint8_t q1 = L[j + l + 0] & 0xF;
  2638. const uint8_t q2 = L[j + l + 32] & 0xF;
  2639. const uint8_t q3 = L[j + l + 64] & 0xF;
  2640. const uint8_t q4 = L[j + l + 96] & 0xF;
  2641. ql[l+ 0] = q1 | (q3 << 4);
  2642. ql[l+32] = q2 | (q4 << 4);
  2643. qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
  2644. }
  2645. ql += 64;
  2646. qh += 32;
  2647. }
  2648. x += QK_K;
  2649. }
  2650. #endif
  2651. }
  2652. size_t quantize_q6_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2653. (void)hist;
  2654. size_t row_size = ggml_row_size(GGML_TYPE_Q6_K, n_per_row);
  2655. if (!quant_weights) {
  2656. quantize_row_q6_K_reference(src, dst, nrow*n_per_row);
  2657. }
  2658. else {
  2659. char * qrow = (char *)dst;
  2660. for (int row = 0; row < nrow; ++row) {
  2661. quantize_row_q6_K_impl(src, (block_q6_K*)qrow, n_per_row, quant_weights);
  2662. src += n_per_row;
  2663. qrow += row_size;
  2664. }
  2665. }
  2666. return nrow * row_size;
  2667. }
  2668. static void quantize_row_q4_0_impl(const float * restrict x, block_q4_0 * restrict y, int n_per_row, const float * quant_weights) {
  2669. static_assert(QK4_0 == 32, "QK4_0 must be 32");
  2670. if (!quant_weights) {
  2671. quantize_row_q4_0_reference(x, y, n_per_row);
  2672. return;
  2673. }
  2674. float weight[QK4_0];
  2675. int8_t L[QK4_0];
  2676. float sum_x2 = 0;
  2677. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2678. float sigma2 = sum_x2/n_per_row;
  2679. const int nb = n_per_row/QK4_0;
  2680. for (int ib = 0; ib < nb; ++ib) {
  2681. const float * xb = x + QK4_0 * ib;
  2682. const float * qw = quant_weights + QK4_0 * ib;
  2683. for (int j = 0; j < QK4_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2684. float d = make_qx_quants(QK4_0, 8, xb, L, 1, weight);
  2685. y[ib].d = GGML_FP32_TO_FP16(d);
  2686. for (int j = 0; j < 16; ++j) {
  2687. y[ib].qs[j] = L[j] | (L[j+16] << 4);
  2688. }
  2689. }
  2690. }
  2691. size_t quantize_q4_0(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2692. if (!quant_weights) {
  2693. return ggml_quantize_q4_0(src, dst, nrow*n_per_row, n_per_row, hist);
  2694. }
  2695. size_t row_size = ggml_row_size(GGML_TYPE_Q4_0, n_per_row);
  2696. char * qrow = (char *)dst;
  2697. for (int row = 0; row < nrow; ++row) {
  2698. quantize_row_q4_0_impl(src, (block_q4_0*)qrow, n_per_row, quant_weights);
  2699. src += n_per_row;
  2700. qrow += row_size;
  2701. }
  2702. return nrow * row_size;
  2703. }
  2704. static void quantize_row_q4_1_impl(const float * restrict x, block_q4_1 * restrict y, int n_per_row, const float * quant_weights) {
  2705. static_assert(QK4_1 == 32, "QK4_1 must be 32");
  2706. if (!quant_weights) {
  2707. quantize_row_q4_1_reference(x, y, n_per_row);
  2708. return;
  2709. }
  2710. float weight[QK4_1];
  2711. uint8_t L[QK4_1], Laux[QK4_1];
  2712. float sum_x2 = 0;
  2713. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2714. float sigma2 = sum_x2/n_per_row;
  2715. const int nb = n_per_row/QK4_1;
  2716. for (int ib = 0; ib < nb; ++ib) {
  2717. const float * xb = x + QK4_1 * ib;
  2718. const float * qw = quant_weights + QK4_1 * ib;
  2719. for (int j = 0; j < QK4_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2720. float min;
  2721. float d = make_qkx3_quants(QK4_1, 15, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false);
  2722. y[ib].d = GGML_FP32_TO_FP16(d);
  2723. y[ib].m = GGML_FP32_TO_FP16(-min);
  2724. for (int j = 0; j < 16; ++j) {
  2725. y[ib].qs[j] = L[j] | (L[j+16] << 4);
  2726. }
  2727. }
  2728. }
  2729. size_t quantize_q4_1(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2730. if (!quant_weights) {
  2731. return ggml_quantize_q4_1(src, dst, nrow*n_per_row, n_per_row, hist);
  2732. }
  2733. size_t row_size = ggml_row_size(GGML_TYPE_Q4_1, n_per_row);
  2734. char * qrow = (char *)dst;
  2735. for (int row = 0; row < nrow; ++row) {
  2736. quantize_row_q4_1_impl(src, (block_q4_1*)qrow, n_per_row, quant_weights);
  2737. src += n_per_row;
  2738. qrow += row_size;
  2739. }
  2740. return nrow * row_size;
  2741. }
  2742. static void quantize_row_q5_0_impl(const float * restrict x, block_q5_0 * restrict y, int n_per_row, const float * quant_weights) {
  2743. static_assert(QK5_0 == 32, "QK5_0 must be 32");
  2744. if (!quant_weights) {
  2745. quantize_row_q5_0_reference(x, y, n_per_row);
  2746. return;
  2747. }
  2748. float weight[QK5_0];
  2749. int8_t L[QK5_0];
  2750. float sum_x2 = 0;
  2751. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2752. float sigma2 = sum_x2/n_per_row;
  2753. const int nb = n_per_row/QK5_0;
  2754. for (int ib = 0; ib < nb; ++ib) {
  2755. const float * xb = x + QK5_0 * ib;
  2756. const float * qw = quant_weights + QK5_0 * ib;
  2757. for (int j = 0; j < QK5_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2758. float d = make_qx_quants(QK5_0, 16, xb, L, 1, weight);
  2759. y[ib].d = GGML_FP32_TO_FP16(d);
  2760. uint32_t qh = 0;
  2761. for (int j = 0; j < 16; ++j) {
  2762. const uint8_t xi0 = L[j];
  2763. const uint8_t xi1 = L[j+16];
  2764. y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  2765. // get the 5-th bit and store it in qh at the right position
  2766. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  2767. qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
  2768. }
  2769. memcpy(&y[ib].qh, &qh, sizeof(qh));
  2770. }
  2771. }
  2772. size_t quantize_q5_0(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2773. if (!quant_weights) {
  2774. return ggml_quantize_q5_0(src, dst, nrow*n_per_row, n_per_row, hist);
  2775. }
  2776. size_t row_size = ggml_row_size(GGML_TYPE_Q5_0, n_per_row);
  2777. char * qrow = (char *)dst;
  2778. for (int row = 0; row < nrow; ++row) {
  2779. quantize_row_q5_0_impl(src, (block_q5_0*)qrow, n_per_row, quant_weights);
  2780. src += n_per_row;
  2781. qrow += row_size;
  2782. }
  2783. return nrow * row_size;
  2784. }
  2785. static void quantize_row_q5_1_impl(const float * restrict x, block_q5_1 * restrict y, int n_per_row, const float * quant_weights) {
  2786. static_assert(QK5_1 == 32, "QK5_1 must be 32");
  2787. if (!quant_weights) {
  2788. quantize_row_q5_1_reference(x, y, n_per_row);
  2789. return;
  2790. }
  2791. float weight[QK5_1];
  2792. uint8_t L[QK5_1], Laux[QK5_1];
  2793. float sum_x2 = 0;
  2794. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2795. float sigma2 = sum_x2/n_per_row;
  2796. const int nb = n_per_row/QK5_1;
  2797. for (int ib = 0; ib < nb; ++ib) {
  2798. const float * xb = x + QK5_1 * ib;
  2799. const float * qw = quant_weights + QK5_1 * ib;
  2800. for (int j = 0; j < QK5_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2801. float min;
  2802. float d = make_qkx3_quants(QK5_1, 31, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false);
  2803. y[ib].d = GGML_FP32_TO_FP16(d);
  2804. y[ib].m = GGML_FP32_TO_FP16(-min);
  2805. uint32_t qh = 0;
  2806. for (int j = 0; j < 16; ++j) {
  2807. const uint8_t xi0 = L[j];
  2808. const uint8_t xi1 = L[j+16];
  2809. y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  2810. // get the 5-th bit and store it in qh at the right position
  2811. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  2812. qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
  2813. }
  2814. memcpy(&y[ib].qh, &qh, sizeof(qh));
  2815. }
  2816. }
  2817. size_t quantize_q5_1(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  2818. if (!quant_weights) {
  2819. return ggml_quantize_q5_1(src, dst, nrow*n_per_row, n_per_row, hist);
  2820. }
  2821. size_t row_size = ggml_row_size(GGML_TYPE_Q5_1, n_per_row);
  2822. char * qrow = (char *)dst;
  2823. for (int row = 0; row < nrow; ++row) {
  2824. quantize_row_q5_1_impl(src, (block_q5_1*)qrow, n_per_row, quant_weights);
  2825. src += n_per_row;
  2826. qrow += row_size;
  2827. }
  2828. return nrow * row_size;
  2829. }
  2830. // ====================== "True" 2-bit (de)-quantization
  2831. static const uint64_t iq2xxs_grid[256] = {
  2832. 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08,
  2833. 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x08080808082b0808,
  2834. 0x08080808082b082b, 0x08080808082b2b08, 0x08080808082b2b2b, 0x0808080819080819,
  2835. 0x0808080819081908, 0x0808080819190808, 0x0808080819192b08, 0x08080808192b0819,
  2836. 0x08080808192b1908, 0x080808082b080808, 0x080808082b08082b, 0x080808082b082b2b,
  2837. 0x080808082b2b082b, 0x0808081908080819, 0x0808081908081908, 0x0808081908190808,
  2838. 0x0808081908191919, 0x0808081919080808, 0x080808192b081908, 0x080808192b192b08,
  2839. 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b082b082b, 0x0808082b2b08082b,
  2840. 0x0808190808080819, 0x0808190808081908, 0x0808190808190808, 0x08081908082b0819,
  2841. 0x08081908082b1908, 0x0808190819080808, 0x080819081908082b, 0x0808190819082b08,
  2842. 0x08081908192b0808, 0x080819082b080819, 0x080819082b081908, 0x080819082b190808,
  2843. 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, 0x0808191908082b08,
  2844. 0x08081919082b0808, 0x080819191908192b, 0x08081919192b2b19, 0x080819192b080808,
  2845. 0x080819192b190819, 0x0808192b08082b19, 0x0808192b08190808, 0x0808192b19080808,
  2846. 0x0808192b2b081908, 0x0808192b2b2b1908, 0x08082b0808080808, 0x08082b0808081919,
  2847. 0x08082b0808082b08, 0x08082b0808191908, 0x08082b08082b2b08, 0x08082b0819080819,
  2848. 0x08082b0819081908, 0x08082b0819190808, 0x08082b081919082b, 0x08082b082b082b08,
  2849. 0x08082b1908081908, 0x08082b1919080808, 0x08082b2b0808082b, 0x08082b2b08191908,
  2850. 0x0819080808080819, 0x0819080808081908, 0x0819080808190808, 0x08190808082b0819,
  2851. 0x0819080819080808, 0x08190808192b0808, 0x081908082b081908, 0x081908082b190808,
  2852. 0x081908082b191919, 0x0819081908080808, 0x0819081908082b08, 0x08190819082b0808,
  2853. 0x0819081919190808, 0x0819081919192b2b, 0x081908192b080808, 0x0819082b082b1908,
  2854. 0x0819082b19081919, 0x0819190808080808, 0x0819190808082b08, 0x08191908082b0808,
  2855. 0x08191908082b1919, 0x0819190819082b19, 0x081919082b080808, 0x0819191908192b08,
  2856. 0x08191919192b082b, 0x0819192b08080808, 0x0819192b0819192b, 0x08192b0808080819,
  2857. 0x08192b0808081908, 0x08192b0808190808, 0x08192b0819080808, 0x08192b082b080819,
  2858. 0x08192b1908080808, 0x08192b1908081919, 0x08192b192b2b0808, 0x08192b2b19190819,
  2859. 0x082b080808080808, 0x082b08080808082b, 0x082b080808082b2b, 0x082b080819081908,
  2860. 0x082b0808192b0819, 0x082b08082b080808, 0x082b08082b08082b, 0x082b0819082b2b19,
  2861. 0x082b081919082b08, 0x082b082b08080808, 0x082b082b0808082b, 0x082b190808080819,
  2862. 0x082b190808081908, 0x082b190808190808, 0x082b190819080808, 0x082b19081919192b,
  2863. 0x082b191908080808, 0x082b191919080819, 0x082b1919192b1908, 0x082b192b2b190808,
  2864. 0x082b2b0808082b08, 0x082b2b08082b0808, 0x082b2b082b191908, 0x082b2b2b19081908,
  2865. 0x1908080808080819, 0x1908080808081908, 0x1908080808190808, 0x1908080808192b08,
  2866. 0x19080808082b0819, 0x19080808082b1908, 0x1908080819080808, 0x1908080819082b08,
  2867. 0x190808081919192b, 0x19080808192b0808, 0x190808082b080819, 0x190808082b081908,
  2868. 0x190808082b190808, 0x1908081908080808, 0x19080819082b0808, 0x19080819192b0819,
  2869. 0x190808192b080808, 0x190808192b081919, 0x1908082b08080819, 0x1908082b08190808,
  2870. 0x1908082b19082b08, 0x1908082b1919192b, 0x1908082b192b2b08, 0x1908190808080808,
  2871. 0x1908190808082b08, 0x19081908082b0808, 0x190819082b080808, 0x190819082b192b19,
  2872. 0x190819190819082b, 0x19081919082b1908, 0x1908192b08080808, 0x19082b0808080819,
  2873. 0x19082b0808081908, 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919,
  2874. 0x19082b1908080808, 0x19082b1919192b08, 0x19082b19192b0819, 0x19082b192b08082b,
  2875. 0x19082b2b19081919, 0x19082b2b2b190808, 0x1919080808080808, 0x1919080808082b08,
  2876. 0x1919080808190819, 0x1919080808192b19, 0x19190808082b0808, 0x191908082b080808,
  2877. 0x191908082b082b08, 0x1919081908081908, 0x191908191908082b, 0x191908192b2b1908,
  2878. 0x1919082b2b190819, 0x191919082b190808, 0x191919082b19082b, 0x1919191908082b2b,
  2879. 0x1919192b08080819, 0x1919192b19191908, 0x19192b0808080808, 0x19192b0808190819,
  2880. 0x19192b0808192b19, 0x19192b08192b1908, 0x19192b1919080808, 0x19192b2b08082b08,
  2881. 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, 0x192b0808192b2b08,
  2882. 0x192b081908080808, 0x192b081919191919, 0x192b082b08192b08, 0x192b082b192b0808,
  2883. 0x192b190808080808, 0x192b190808081919, 0x192b191908190808, 0x192b19190819082b,
  2884. 0x192b19192b081908, 0x192b2b081908082b, 0x2b08080808080808, 0x2b0808080808082b,
  2885. 0x2b08080808082b2b, 0x2b08080819080819, 0x2b0808082b08082b, 0x2b08081908081908,
  2886. 0x2b08081908192b08, 0x2b08081919080808, 0x2b08082b08190819, 0x2b08190808080819,
  2887. 0x2b08190808081908, 0x2b08190808190808, 0x2b08190808191919, 0x2b08190819080808,
  2888. 0x2b081908192b0808, 0x2b08191908080808, 0x2b0819191908192b, 0x2b0819192b191908,
  2889. 0x2b08192b08082b19, 0x2b08192b19080808, 0x2b08192b192b0808, 0x2b082b080808082b,
  2890. 0x2b082b1908081908, 0x2b082b2b08190819, 0x2b19080808081908, 0x2b19080808190808,
  2891. 0x2b190808082b1908, 0x2b19080819080808, 0x2b1908082b2b0819, 0x2b1908190819192b,
  2892. 0x2b1908192b080808, 0x2b19082b19081919, 0x2b19190808080808, 0x2b191908082b082b,
  2893. 0x2b19190819081908, 0x2b19191919190819, 0x2b192b082b080819, 0x2b192b19082b0808,
  2894. 0x2b2b08080808082b, 0x2b2b080819190808, 0x2b2b08082b081919, 0x2b2b081908082b19,
  2895. 0x2b2b082b08080808, 0x2b2b190808192b08, 0x2b2b2b0819190808, 0x2b2b2b1908081908,
  2896. };
  2897. static const uint64_t iq2xs_grid[512] = {
  2898. 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08,
  2899. 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b,
  2900. 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919,
  2901. 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b,
  2902. 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919,
  2903. 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x080808082b080808,
  2904. 0x080808082b08082b, 0x080808082b081919, 0x080808082b082b08, 0x080808082b190819,
  2905. 0x080808082b191908, 0x080808082b192b19, 0x080808082b2b0808, 0x0808081908080819,
  2906. 0x0808081908081908, 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808,
  2907. 0x080808190819082b, 0x0808081908191919, 0x0808081908192b08, 0x0808081908192b2b,
  2908. 0x08080819082b0819, 0x08080819082b1908, 0x0808081919080808, 0x080808191908082b,
  2909. 0x0808081919081919, 0x0808081919082b08, 0x0808081919190819, 0x0808081919191908,
  2910. 0x08080819192b0808, 0x08080819192b2b08, 0x080808192b080819, 0x080808192b081908,
  2911. 0x080808192b190808, 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b08081919,
  2912. 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, 0x0808082b082b0808,
  2913. 0x0808082b19080819, 0x0808082b19081908, 0x0808082b19190808, 0x0808082b19191919,
  2914. 0x0808082b2b080808, 0x0808082b2b082b2b, 0x0808190808080819, 0x0808190808081908,
  2915. 0x080819080808192b, 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b,
  2916. 0x0808190808191919, 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908,
  2917. 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, 0x0808190819082b08,
  2918. 0x0808190819190819, 0x0808190819191908, 0x080819081919192b, 0x08081908192b0808,
  2919. 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, 0x0808191908080808,
  2920. 0x080819190808082b, 0x0808191908081919, 0x0808191908082b08, 0x0808191908190819,
  2921. 0x0808191908191908, 0x08081919082b0808, 0x0808191919080819, 0x0808191919081908,
  2922. 0x0808191919190808, 0x08081919192b0819, 0x080819192b080808, 0x0808192b08080819,
  2923. 0x0808192b08081908, 0x0808192b08190808, 0x0808192b082b192b, 0x0808192b19080808,
  2924. 0x0808192b1908082b, 0x0808192b2b081908, 0x08082b0808080808, 0x08082b080808082b,
  2925. 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808082b2b, 0x08082b0808190819,
  2926. 0x08082b0808191908, 0x08082b08082b0808, 0x08082b08082b1919, 0x08082b0819080819,
  2927. 0x08082b0819081908, 0x08082b0819190808, 0x08082b0819192b08, 0x08082b082b080808,
  2928. 0x08082b082b2b0808, 0x08082b082b2b2b2b, 0x08082b1908080819, 0x08082b1908081908,
  2929. 0x08082b1908190808, 0x08082b1919080808, 0x08082b192b080819, 0x08082b192b082b19,
  2930. 0x08082b2b08080808, 0x08082b2b082b0808, 0x08082b2b082b2b08, 0x08082b2b2b19192b,
  2931. 0x08082b2b2b2b0808, 0x0819080808080819, 0x0819080808081908, 0x081908080808192b,
  2932. 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, 0x0819080808191919,
  2933. 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, 0x0819080819080808,
  2934. 0x081908081908082b, 0x0819080819081919, 0x0819080819082b08, 0x0819080819190819,
  2935. 0x0819080819191908, 0x08190808192b0808, 0x08190808192b2b2b, 0x081908082b080819,
  2936. 0x081908082b081908, 0x081908082b190808, 0x0819081908080808, 0x081908190808082b,
  2937. 0x0819081908081919, 0x0819081908082b08, 0x0819081908190819, 0x0819081908191908,
  2938. 0x08190819082b0808, 0x0819081919080819, 0x0819081919081908, 0x0819081919190808,
  2939. 0x081908192b080808, 0x081908192b191908, 0x081908192b19192b, 0x0819082b08080819,
  2940. 0x0819082b08081908, 0x0819082b0808192b, 0x0819082b08190808, 0x0819082b19080808,
  2941. 0x0819082b192b0808, 0x0819190808080808, 0x081919080808082b, 0x0819190808081919,
  2942. 0x0819190808082b08, 0x0819190808190819, 0x0819190808191908, 0x08191908082b0808,
  2943. 0x0819190819080819, 0x0819190819081908, 0x0819190819082b19, 0x0819190819190808,
  2944. 0x08191908192b1908, 0x081919082b080808, 0x0819191908080819, 0x0819191908081908,
  2945. 0x0819191908190808, 0x0819191919080808, 0x0819192b08080808, 0x0819192b08191908,
  2946. 0x0819192b19082b19, 0x08192b0808080819, 0x08192b0808081908, 0x08192b0808190808,
  2947. 0x08192b080819082b, 0x08192b0819080808, 0x08192b0819191908, 0x08192b082b08192b,
  2948. 0x08192b1908080808, 0x08192b1908081919, 0x08192b19192b192b, 0x08192b2b19190819,
  2949. 0x08192b2b2b2b2b19, 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919,
  2950. 0x082b080808082b08, 0x082b080808082b2b, 0x082b080808190819, 0x082b080808191908,
  2951. 0x082b0808082b0808, 0x082b080819080819, 0x082b080819081908, 0x082b080819190808,
  2952. 0x082b08082b080808, 0x082b08082b2b0808, 0x082b081908080819, 0x082b081908081908,
  2953. 0x082b081908190808, 0x082b081919080808, 0x082b081919082b08, 0x082b0819192b1919,
  2954. 0x082b082b08080808, 0x082b082b082b082b, 0x082b082b2b080808, 0x082b082b2b2b2b08,
  2955. 0x082b190808080819, 0x082b190808081908, 0x082b190808190808, 0x082b1908082b2b19,
  2956. 0x082b190819080808, 0x082b191908080808, 0x082b191919080819, 0x082b19191919082b,
  2957. 0x082b19192b192b19, 0x082b192b08080819, 0x082b192b08192b2b, 0x082b192b2b2b192b,
  2958. 0x082b2b0808080808, 0x082b2b0808082b08, 0x082b2b0808082b2b, 0x082b2b08082b0808,
  2959. 0x082b2b0819191919, 0x082b2b082b082b08, 0x082b2b082b2b082b, 0x082b2b19192b2b08,
  2960. 0x082b2b192b190808, 0x082b2b2b08082b08, 0x082b2b2b082b0808, 0x082b2b2b2b08082b,
  2961. 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, 0x1908080808081908,
  2962. 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, 0x190808080819082b,
  2963. 0x1908080808191919, 0x1908080808192b08, 0x19080808082b0819, 0x19080808082b1908,
  2964. 0x1908080819080808, 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08,
  2965. 0x1908080819082b2b, 0x1908080819190819, 0x1908080819191908, 0x19080808192b0808,
  2966. 0x19080808192b1919, 0x190808082b080819, 0x190808082b081908, 0x190808082b190808,
  2967. 0x1908081908080808, 0x190808190808082b, 0x1908081908081919, 0x1908081908082b08,
  2968. 0x1908081908190819, 0x1908081908191908, 0x19080819082b0808, 0x1908081919080819,
  2969. 0x1908081919081908, 0x1908081919190808, 0x190808192b080808, 0x190808192b081919,
  2970. 0x190808192b2b082b, 0x1908082b08080819, 0x1908082b08081908, 0x1908082b08190808,
  2971. 0x1908082b0819082b, 0x1908082b082b2b19, 0x1908082b19080808, 0x1908190808080808,
  2972. 0x190819080808082b, 0x1908190808081919, 0x1908190808082b08, 0x1908190808190819,
  2973. 0x1908190808191908, 0x1908190808192b19, 0x19081908082b0808, 0x1908190819080819,
  2974. 0x1908190819081908, 0x1908190819190808, 0x190819082b080808, 0x190819082b191908,
  2975. 0x1908191908080819, 0x1908191908081908, 0x1908191908190808, 0x19081919082b1908,
  2976. 0x1908191919080808, 0x190819192b192b2b, 0x1908192b08080808, 0x1908192b08082b2b,
  2977. 0x1908192b19081908, 0x1908192b19190808, 0x19082b0808080819, 0x19082b0808081908,
  2978. 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, 0x19082b0819191908,
  2979. 0x19082b08192b082b, 0x19082b1908080808, 0x19082b1908190819, 0x19082b1919081908,
  2980. 0x19082b1919190808, 0x19082b19192b2b19, 0x19082b2b08081908, 0x1919080808080808,
  2981. 0x191908080808082b, 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819,
  2982. 0x1919080808191908, 0x19190808082b0808, 0x19190808082b2b08, 0x1919080819080819,
  2983. 0x1919080819081908, 0x1919080819190808, 0x191908082b080808, 0x1919081908080819,
  2984. 0x1919081908081908, 0x1919081908190808, 0x1919081908191919, 0x1919081919080808,
  2985. 0x191908191908082b, 0x1919082b08080808, 0x1919082b19081908, 0x1919082b2b2b2b2b,
  2986. 0x1919190808080819, 0x1919190808081908, 0x1919190808190808, 0x19191908082b0819,
  2987. 0x1919190819080808, 0x19191908192b0808, 0x191919082b080819, 0x191919082b2b0819,
  2988. 0x1919191908080808, 0x1919191908082b08, 0x191919192b080808, 0x191919192b082b08,
  2989. 0x1919192b082b0819, 0x1919192b192b2b08, 0x1919192b2b2b0819, 0x19192b0808080808,
  2990. 0x19192b0808191908, 0x19192b0819080819, 0x19192b0819190808, 0x19192b082b192b19,
  2991. 0x19192b1908192b2b, 0x19192b1919080808, 0x19192b191908082b, 0x19192b2b2b081919,
  2992. 0x192b080808080819, 0x192b080808081908, 0x192b080808190808, 0x192b080819080808,
  2993. 0x192b080819191908, 0x192b0808192b082b, 0x192b08082b08192b, 0x192b08082b2b2b19,
  2994. 0x192b081908080808, 0x192b082b082b1908, 0x192b082b19082b2b, 0x192b082b2b19082b,
  2995. 0x192b190808080808, 0x192b19080819192b, 0x192b191908190808, 0x192b191919080808,
  2996. 0x192b191919081919, 0x192b19192b2b1908, 0x192b2b0808080819, 0x192b2b08192b2b2b,
  2997. 0x192b2b19082b1919, 0x192b2b2b0808192b, 0x192b2b2b19191908, 0x192b2b2b192b082b,
  2998. 0x2b08080808080808, 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08,
  2999. 0x2b08080808190819, 0x2b08080808191908, 0x2b080808082b0808, 0x2b080808082b2b2b,
  3000. 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808082b080808,
  3001. 0x2b0808082b08082b, 0x2b0808082b2b2b08, 0x2b0808082b2b2b2b, 0x2b08081908080819,
  3002. 0x2b08081908081908, 0x2b0808190808192b, 0x2b08081908190808, 0x2b08081919080808,
  3003. 0x2b08081919190819, 0x2b08081919192b19, 0x2b08082b08080808, 0x2b08082b082b0808,
  3004. 0x2b08082b2b080808, 0x2b08082b2b08082b, 0x2b08082b2b2b0808, 0x2b08082b2b2b2b08,
  3005. 0x2b08190808080819, 0x2b08190808081908, 0x2b08190808190808, 0x2b0819080819082b,
  3006. 0x2b08190808191919, 0x2b08190819080808, 0x2b081908192b0808, 0x2b0819082b082b19,
  3007. 0x2b08191908080808, 0x2b08191919081908, 0x2b0819192b2b1919, 0x2b08192b08192b08,
  3008. 0x2b08192b192b2b2b, 0x2b082b0808080808, 0x2b082b0808082b08, 0x2b082b08082b1919,
  3009. 0x2b082b0819192b2b, 0x2b082b082b080808, 0x2b082b082b08082b, 0x2b082b082b2b2b08,
  3010. 0x2b082b190808192b, 0x2b082b2b082b082b, 0x2b082b2b2b080808, 0x2b082b2b2b082b08,
  3011. 0x2b082b2b2b19192b, 0x2b082b2b2b2b2b08, 0x2b19080808080819, 0x2b19080808081908,
  3012. 0x2b19080808190808, 0x2b19080819080808, 0x2b1908081919192b, 0x2b1908082b081908,
  3013. 0x2b19081908080808, 0x2b190819082b082b, 0x2b190819192b1908, 0x2b19082b1919192b,
  3014. 0x2b19082b2b082b19, 0x2b19190808080808, 0x2b19190808081919, 0x2b19190819081908,
  3015. 0x2b19190819190808, 0x2b19190819192b08, 0x2b191919082b2b19, 0x2b1919192b190808,
  3016. 0x2b1919192b19082b, 0x2b19192b19080819, 0x2b192b0819190819, 0x2b192b082b2b192b,
  3017. 0x2b192b1919082b19, 0x2b192b2b08191919, 0x2b192b2b192b0808, 0x2b2b080808080808,
  3018. 0x2b2b08080808082b, 0x2b2b080808082b08, 0x2b2b080808082b2b, 0x2b2b0808082b0808,
  3019. 0x2b2b0808082b2b2b, 0x2b2b08082b2b0808, 0x2b2b081919190819, 0x2b2b081919192b19,
  3020. 0x2b2b08192b2b192b, 0x2b2b082b08080808, 0x2b2b082b0808082b, 0x2b2b082b08082b08,
  3021. 0x2b2b082b082b2b2b, 0x2b2b082b2b080808, 0x2b2b082b2b2b0808, 0x2b2b190819080808,
  3022. 0x2b2b19082b191919, 0x2b2b192b192b1919, 0x2b2b192b2b192b08, 0x2b2b2b0808082b2b,
  3023. 0x2b2b2b08082b0808, 0x2b2b2b08082b082b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b0808,
  3024. 0x2b2b2b082b2b2b08, 0x2b2b2b1908081908, 0x2b2b2b192b081908, 0x2b2b2b192b08192b,
  3025. 0x2b2b2b2b082b2b08, 0x2b2b2b2b082b2b2b, 0x2b2b2b2b2b190819, 0x2b2b2b2b2b2b2b2b,
  3026. };
  3027. static const uint64_t iq2s_grid[1024] = {
  3028. 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08,
  3029. 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b,
  3030. 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919,
  3031. 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b,
  3032. 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919,
  3033. 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x08080808192b192b,
  3034. 0x08080808192b2b19, 0x080808082b080808, 0x080808082b08082b, 0x080808082b081919,
  3035. 0x080808082b082b08, 0x080808082b190819, 0x080808082b191908, 0x080808082b2b0808,
  3036. 0x080808082b2b1919, 0x080808082b2b2b2b, 0x0808081908080819, 0x0808081908081908,
  3037. 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808, 0x080808190819082b,
  3038. 0x0808081908191919, 0x0808081908192b08, 0x08080819082b0819, 0x08080819082b1908,
  3039. 0x0808081919080808, 0x080808191908082b, 0x0808081919081919, 0x0808081919082b08,
  3040. 0x0808081919190819, 0x0808081919191908, 0x080808191919192b, 0x0808081919192b19,
  3041. 0x08080819192b0808, 0x08080819192b1919, 0x08080819192b2b08, 0x080808192b080819,
  3042. 0x080808192b081908, 0x080808192b190808, 0x080808192b19082b, 0x080808192b191919,
  3043. 0x080808192b2b0819, 0x080808192b2b1908, 0x0808082b08080808, 0x0808082b0808082b,
  3044. 0x0808082b08081919, 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908,
  3045. 0x0808082b082b0808, 0x0808082b082b2b2b, 0x0808082b19080819, 0x0808082b19081908,
  3046. 0x0808082b1908192b, 0x0808082b19082b19, 0x0808082b19190808, 0x0808082b19191919,
  3047. 0x0808082b2b080808, 0x0808082b2b081919, 0x0808082b2b082b2b, 0x0808082b2b191908,
  3048. 0x0808082b2b2b082b, 0x0808190808080819, 0x0808190808081908, 0x080819080808192b,
  3049. 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b, 0x0808190808191919,
  3050. 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908, 0x08081908082b192b,
  3051. 0x08081908082b2b19, 0x0808190819080808, 0x080819081908082b, 0x0808190819081919,
  3052. 0x0808190819082b08, 0x0808190819082b2b, 0x0808190819190819, 0x0808190819191908,
  3053. 0x080819081919192b, 0x0808190819192b19, 0x08081908192b0808, 0x08081908192b082b,
  3054. 0x08081908192b1919, 0x080819082b080819, 0x080819082b081908, 0x080819082b08192b,
  3055. 0x080819082b082b19, 0x080819082b190808, 0x080819082b191919, 0x080819082b192b08,
  3056. 0x080819082b2b0819, 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b,
  3057. 0x0808191908081919, 0x0808191908082b08, 0x0808191908082b2b, 0x0808191908190819,
  3058. 0x0808191908191908, 0x080819190819192b, 0x0808191908192b19, 0x08081919082b0808,
  3059. 0x08081919082b1919, 0x08081919082b2b08, 0x0808191919080819, 0x0808191919081908,
  3060. 0x080819191908192b, 0x0808191919082b19, 0x0808191919190808, 0x080819191919082b,
  3061. 0x0808191919191919, 0x0808191919192b08, 0x08081919192b0819, 0x08081919192b1908,
  3062. 0x080819192b080808, 0x080819192b08082b, 0x080819192b081919, 0x080819192b082b08,
  3063. 0x080819192b190819, 0x080819192b191908, 0x080819192b2b0808, 0x0808192b08080819,
  3064. 0x0808192b08081908, 0x0808192b0808192b, 0x0808192b08082b19, 0x0808192b08190808,
  3065. 0x0808192b08191919, 0x0808192b19080808, 0x0808192b19081919, 0x0808192b19082b08,
  3066. 0x0808192b19190819, 0x0808192b19191908, 0x0808192b192b0808, 0x0808192b2b080819,
  3067. 0x0808192b2b081908, 0x0808192b2b190808, 0x08082b0808080808, 0x08082b080808082b,
  3068. 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808190819, 0x08082b0808191908,
  3069. 0x08082b080819192b, 0x08082b0808192b19, 0x08082b08082b0808, 0x08082b08082b1919,
  3070. 0x08082b08082b2b2b, 0x08082b0819080819, 0x08082b0819081908, 0x08082b081908192b,
  3071. 0x08082b0819082b19, 0x08082b0819190808, 0x08082b081919082b, 0x08082b0819191919,
  3072. 0x08082b0819192b08, 0x08082b08192b0819, 0x08082b08192b1908, 0x08082b082b080808,
  3073. 0x08082b082b081919, 0x08082b082b191908, 0x08082b082b2b2b2b, 0x08082b1908080819,
  3074. 0x08082b1908081908, 0x08082b1908190808, 0x08082b190819082b, 0x08082b1908191919,
  3075. 0x08082b1908192b08, 0x08082b19082b0819, 0x08082b1919080808, 0x08082b1919081919,
  3076. 0x08082b1919082b08, 0x08082b1919190819, 0x08082b1919191908, 0x08082b19192b0808,
  3077. 0x08082b192b080819, 0x08082b192b190808, 0x08082b2b08080808, 0x08082b2b08190819,
  3078. 0x08082b2b08191908, 0x08082b2b082b082b, 0x08082b2b082b2b08, 0x08082b2b082b2b2b,
  3079. 0x08082b2b19190808, 0x08082b2b2b192b19, 0x0819080808080819, 0x0819080808081908,
  3080. 0x081908080808192b, 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b,
  3081. 0x0819080808191919, 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908,
  3082. 0x08190808082b192b, 0x0819080819080808, 0x081908081908082b, 0x0819080819081919,
  3083. 0x0819080819082b08, 0x0819080819190819, 0x0819080819191908, 0x081908081919192b,
  3084. 0x0819080819192b19, 0x08190808192b0808, 0x08190808192b082b, 0x08190808192b1919,
  3085. 0x08190808192b2b08, 0x081908082b080819, 0x081908082b081908, 0x081908082b08192b,
  3086. 0x081908082b190808, 0x081908082b191919, 0x081908082b192b08, 0x081908082b2b0819,
  3087. 0x081908082b2b1908, 0x0819081908080808, 0x081908190808082b, 0x0819081908081919,
  3088. 0x0819081908082b08, 0x0819081908082b2b, 0x0819081908190819, 0x0819081908191908,
  3089. 0x081908190819192b, 0x0819081908192b19, 0x08190819082b0808, 0x08190819082b082b,
  3090. 0x08190819082b1919, 0x08190819082b2b08, 0x0819081919080819, 0x0819081919081908,
  3091. 0x081908191908192b, 0x0819081919082b19, 0x0819081919190808, 0x081908191919082b,
  3092. 0x0819081919191919, 0x0819081919192b08, 0x08190819192b0819, 0x08190819192b1908,
  3093. 0x081908192b080808, 0x081908192b08082b, 0x081908192b081919, 0x081908192b082b08,
  3094. 0x081908192b190819, 0x081908192b191908, 0x0819082b08080819, 0x0819082b08081908,
  3095. 0x0819082b08082b19, 0x0819082b08190808, 0x0819082b08191919, 0x0819082b082b0819,
  3096. 0x0819082b082b1908, 0x0819082b19080808, 0x0819082b19081919, 0x0819082b19190819,
  3097. 0x0819082b19191908, 0x0819082b2b080819, 0x0819082b2b081908, 0x0819082b2b190808,
  3098. 0x0819190808080808, 0x081919080808082b, 0x0819190808081919, 0x0819190808082b08,
  3099. 0x0819190808190819, 0x0819190808191908, 0x081919080819192b, 0x0819190808192b19,
  3100. 0x08191908082b0808, 0x08191908082b1919, 0x08191908082b2b08, 0x0819190819080819,
  3101. 0x0819190819081908, 0x081919081908192b, 0x0819190819082b19, 0x0819190819190808,
  3102. 0x081919081919082b, 0x0819190819191919, 0x0819190819192b08, 0x08191908192b0819,
  3103. 0x08191908192b1908, 0x081919082b080808, 0x081919082b08082b, 0x081919082b081919,
  3104. 0x081919082b082b08, 0x081919082b190819, 0x081919082b191908, 0x081919082b2b0808,
  3105. 0x0819191908080819, 0x0819191908081908, 0x081919190808192b, 0x0819191908082b19,
  3106. 0x0819191908190808, 0x081919190819082b, 0x0819191908191919, 0x0819191908192b08,
  3107. 0x08191919082b0819, 0x08191919082b1908, 0x0819191919080808, 0x081919191908082b,
  3108. 0x0819191919081919, 0x0819191919082b08, 0x0819191919190819, 0x0819191919191908,
  3109. 0x08191919192b0808, 0x081919192b080819, 0x081919192b081908, 0x081919192b190808,
  3110. 0x0819192b08080808, 0x0819192b08081919, 0x0819192b08082b08, 0x0819192b08190819,
  3111. 0x0819192b08191908, 0x0819192b082b0808, 0x0819192b19080819, 0x0819192b19081908,
  3112. 0x0819192b19190808, 0x0819192b2b080808, 0x0819192b2b2b2b2b, 0x08192b0808080819,
  3113. 0x08192b0808081908, 0x08192b080808192b, 0x08192b0808082b19, 0x08192b0808190808,
  3114. 0x08192b0808191919, 0x08192b0808192b08, 0x08192b08082b0819, 0x08192b0819080808,
  3115. 0x08192b081908082b, 0x08192b0819081919, 0x08192b0819082b08, 0x08192b0819190819,
  3116. 0x08192b0819191908, 0x08192b08192b0808, 0x08192b082b080819, 0x08192b082b081908,
  3117. 0x08192b1908080808, 0x08192b190808082b, 0x08192b1908081919, 0x08192b1908082b08,
  3118. 0x08192b1908190819, 0x08192b1908191908, 0x08192b19082b0808, 0x08192b1919080819,
  3119. 0x08192b1919081908, 0x08192b1919190808, 0x08192b19192b2b19, 0x08192b192b2b082b,
  3120. 0x08192b2b08081908, 0x08192b2b08190808, 0x08192b2b19080808, 0x08192b2b1919192b,
  3121. 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919, 0x082b080808082b08,
  3122. 0x082b080808190819, 0x082b080808191908, 0x082b08080819192b, 0x082b080808192b19,
  3123. 0x082b0808082b0808, 0x082b0808082b1919, 0x082b0808082b2b2b, 0x082b080819080819,
  3124. 0x082b080819081908, 0x082b080819190808, 0x082b08081919082b, 0x082b080819191919,
  3125. 0x082b0808192b1908, 0x082b08082b080808, 0x082b08082b082b2b, 0x082b08082b191908,
  3126. 0x082b08082b2b2b2b, 0x082b081908080819, 0x082b081908081908, 0x082b081908190808,
  3127. 0x082b08190819082b, 0x082b081908191919, 0x082b0819082b0819, 0x082b081919080808,
  3128. 0x082b08191908082b, 0x082b081919081919, 0x082b081919190819, 0x082b081919191908,
  3129. 0x082b0819192b0808, 0x082b08192b080819, 0x082b08192b081908, 0x082b08192b190808,
  3130. 0x082b082b08080808, 0x082b082b08082b2b, 0x082b082b082b082b, 0x082b082b082b2b08,
  3131. 0x082b082b082b2b2b, 0x082b082b19081908, 0x082b082b19190808, 0x082b082b2b082b08,
  3132. 0x082b082b2b082b2b, 0x082b082b2b2b2b08, 0x082b190808080819, 0x082b190808081908,
  3133. 0x082b19080808192b, 0x082b190808082b19, 0x082b190808190808, 0x082b190808191919,
  3134. 0x082b190808192b08, 0x082b1908082b0819, 0x082b1908082b1908, 0x082b190819080808,
  3135. 0x082b19081908082b, 0x082b190819081919, 0x082b190819082b08, 0x082b190819190819,
  3136. 0x082b190819191908, 0x082b1908192b0808, 0x082b19082b080819, 0x082b19082b081908,
  3137. 0x082b19082b190808, 0x082b191908080808, 0x082b191908081919, 0x082b191908082b08,
  3138. 0x082b191908190819, 0x082b191908191908, 0x082b1919082b0808, 0x082b191919080819,
  3139. 0x082b191919081908, 0x082b191919190808, 0x082b1919192b192b, 0x082b19192b080808,
  3140. 0x082b192b08080819, 0x082b192b08081908, 0x082b192b08190808, 0x082b192b19080808,
  3141. 0x082b192b19192b19, 0x082b2b0808080808, 0x082b2b0808081919, 0x082b2b0808190819,
  3142. 0x082b2b0808191908, 0x082b2b0819080819, 0x082b2b0819081908, 0x082b2b0819190808,
  3143. 0x082b2b082b082b2b, 0x082b2b082b2b2b2b, 0x082b2b1908080819, 0x082b2b1908081908,
  3144. 0x082b2b1908190808, 0x082b2b192b191919, 0x082b2b2b08082b2b, 0x082b2b2b082b082b,
  3145. 0x082b2b2b192b1908, 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819,
  3146. 0x1908080808081908, 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808,
  3147. 0x190808080819082b, 0x1908080808191919, 0x1908080808192b08, 0x1908080808192b2b,
  3148. 0x19080808082b0819, 0x19080808082b1908, 0x19080808082b192b, 0x1908080819080808,
  3149. 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08, 0x1908080819082b2b,
  3150. 0x1908080819190819, 0x1908080819191908, 0x190808081919192b, 0x1908080819192b19,
  3151. 0x19080808192b0808, 0x19080808192b082b, 0x19080808192b1919, 0x190808082b080819,
  3152. 0x190808082b081908, 0x190808082b190808, 0x190808082b191919, 0x190808082b192b08,
  3153. 0x190808082b2b0819, 0x190808082b2b1908, 0x1908081908080808, 0x190808190808082b,
  3154. 0x1908081908081919, 0x1908081908082b08, 0x1908081908190819, 0x1908081908191908,
  3155. 0x190808190819192b, 0x1908081908192b19, 0x19080819082b0808, 0x19080819082b082b,
  3156. 0x19080819082b1919, 0x1908081919080819, 0x1908081919081908, 0x190808191908192b,
  3157. 0x1908081919082b19, 0x1908081919190808, 0x190808191919082b, 0x1908081919191919,
  3158. 0x1908081919192b08, 0x19080819192b0819, 0x19080819192b1908, 0x190808192b080808,
  3159. 0x190808192b08082b, 0x190808192b081919, 0x190808192b082b08, 0x190808192b190819,
  3160. 0x190808192b191908, 0x190808192b2b0808, 0x1908082b08080819, 0x1908082b08081908,
  3161. 0x1908082b08190808, 0x1908082b0819082b, 0x1908082b08191919, 0x1908082b08192b08,
  3162. 0x1908082b082b1908, 0x1908082b19080808, 0x1908082b19081919, 0x1908082b19082b08,
  3163. 0x1908082b19190819, 0x1908082b19191908, 0x1908082b192b0808, 0x1908082b2b080819,
  3164. 0x1908082b2b081908, 0x1908190808080808, 0x190819080808082b, 0x1908190808081919,
  3165. 0x1908190808082b08, 0x1908190808082b2b, 0x1908190808190819, 0x1908190808191908,
  3166. 0x190819080819192b, 0x1908190808192b19, 0x19081908082b0808, 0x19081908082b082b,
  3167. 0x19081908082b1919, 0x19081908082b2b08, 0x1908190819080819, 0x1908190819081908,
  3168. 0x190819081908192b, 0x1908190819082b19, 0x1908190819190808, 0x190819081919082b,
  3169. 0x1908190819191919, 0x1908190819192b08, 0x19081908192b0819, 0x19081908192b1908,
  3170. 0x190819082b080808, 0x190819082b08082b, 0x190819082b081919, 0x190819082b082b08,
  3171. 0x190819082b190819, 0x190819082b191908, 0x190819082b2b0808, 0x1908191908080819,
  3172. 0x1908191908081908, 0x190819190808192b, 0x1908191908082b19, 0x1908191908190808,
  3173. 0x190819190819082b, 0x1908191908191919, 0x1908191908192b08, 0x19081919082b0819,
  3174. 0x19081919082b1908, 0x1908191919080808, 0x190819191908082b, 0x1908191919081919,
  3175. 0x1908191919082b08, 0x1908191919190819, 0x1908191919191908, 0x19081919192b0808,
  3176. 0x19081919192b2b2b, 0x190819192b080819, 0x190819192b081908, 0x190819192b190808,
  3177. 0x1908192b08080808, 0x1908192b0808082b, 0x1908192b08081919, 0x1908192b08082b08,
  3178. 0x1908192b08190819, 0x1908192b08191908, 0x1908192b082b0808, 0x1908192b19080819,
  3179. 0x1908192b19081908, 0x1908192b19190808, 0x1908192b2b080808, 0x1908192b2b2b1919,
  3180. 0x19082b0808080819, 0x19082b0808081908, 0x19082b0808082b19, 0x19082b0808190808,
  3181. 0x19082b080819082b, 0x19082b0808191919, 0x19082b0808192b08, 0x19082b08082b0819,
  3182. 0x19082b08082b1908, 0x19082b0819080808, 0x19082b081908082b, 0x19082b0819081919,
  3183. 0x19082b0819082b08, 0x19082b0819190819, 0x19082b0819191908, 0x19082b08192b0808,
  3184. 0x19082b082b081908, 0x19082b082b190808, 0x19082b1908080808, 0x19082b190808082b,
  3185. 0x19082b1908081919, 0x19082b1908082b08, 0x19082b1908190819, 0x19082b1908191908,
  3186. 0x19082b19082b0808, 0x19082b1919080819, 0x19082b1919081908, 0x19082b1919190808,
  3187. 0x19082b192b080808, 0x19082b192b19192b, 0x19082b2b08080819, 0x19082b2b08081908,
  3188. 0x19082b2b08190808, 0x19082b2b19080808, 0x1919080808080808, 0x191908080808082b,
  3189. 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819, 0x1919080808191908,
  3190. 0x191908080819192b, 0x1919080808192b19, 0x19190808082b0808, 0x19190808082b082b,
  3191. 0x19190808082b1919, 0x19190808082b2b08, 0x1919080819080819, 0x1919080819081908,
  3192. 0x191908081908192b, 0x1919080819082b19, 0x1919080819190808, 0x191908081919082b,
  3193. 0x1919080819191919, 0x1919080819192b08, 0x19190808192b0819, 0x19190808192b1908,
  3194. 0x191908082b080808, 0x191908082b08082b, 0x191908082b081919, 0x191908082b082b08,
  3195. 0x191908082b190819, 0x191908082b191908, 0x1919081908080819, 0x1919081908081908,
  3196. 0x191908190808192b, 0x1919081908082b19, 0x1919081908190808, 0x191908190819082b,
  3197. 0x1919081908191919, 0x1919081908192b08, 0x19190819082b0819, 0x19190819082b1908,
  3198. 0x1919081919080808, 0x191908191908082b, 0x1919081919081919, 0x1919081919082b08,
  3199. 0x1919081919190819, 0x1919081919191908, 0x19190819192b0808, 0x191908192b080819,
  3200. 0x191908192b081908, 0x191908192b190808, 0x1919082b08080808, 0x1919082b08081919,
  3201. 0x1919082b08082b08, 0x1919082b08190819, 0x1919082b08191908, 0x1919082b082b0808,
  3202. 0x1919082b19080819, 0x1919082b19081908, 0x1919082b19190808, 0x1919082b192b2b19,
  3203. 0x1919082b2b080808, 0x1919190808080819, 0x1919190808081908, 0x191919080808192b,
  3204. 0x1919190808082b19, 0x1919190808190808, 0x191919080819082b, 0x1919190808191919,
  3205. 0x1919190808192b08, 0x19191908082b0819, 0x19191908082b1908, 0x1919190819080808,
  3206. 0x191919081908082b, 0x1919190819081919, 0x1919190819082b08, 0x1919190819190819,
  3207. 0x1919190819191908, 0x19191908192b0808, 0x191919082b080819, 0x191919082b081908,
  3208. 0x191919082b190808, 0x1919191908080808, 0x191919190808082b, 0x1919191908081919,
  3209. 0x1919191908082b08, 0x1919191908190819, 0x1919191908191908, 0x19191919082b0808,
  3210. 0x1919191919080819, 0x1919191919081908, 0x1919191919190808, 0x191919192b080808,
  3211. 0x1919192b08080819, 0x1919192b08081908, 0x1919192b08190808, 0x1919192b082b192b,
  3212. 0x1919192b19080808, 0x19192b0808080808, 0x19192b080808082b, 0x19192b0808081919,
  3213. 0x19192b0808082b08, 0x19192b0808190819, 0x19192b0808191908, 0x19192b08082b0808,
  3214. 0x19192b0819080819, 0x19192b0819081908, 0x19192b0819190808, 0x19192b0819192b2b,
  3215. 0x19192b082b080808, 0x19192b1908080819, 0x19192b1908081908, 0x19192b1908190808,
  3216. 0x19192b1919080808, 0x19192b2b08080808, 0x19192b2b08192b19, 0x19192b2b2b081919,
  3217. 0x19192b2b2b2b2b08, 0x192b080808080819, 0x192b080808081908, 0x192b08080808192b,
  3218. 0x192b080808190808, 0x192b08080819082b, 0x192b080808191919, 0x192b080808192b08,
  3219. 0x192b0808082b0819, 0x192b0808082b1908, 0x192b080819080808, 0x192b080819081919,
  3220. 0x192b080819082b08, 0x192b080819190819, 0x192b080819191908, 0x192b0808192b0808,
  3221. 0x192b08082b081908, 0x192b08082b190808, 0x192b081908080808, 0x192b08190808082b,
  3222. 0x192b081908081919, 0x192b081908082b08, 0x192b081908190819, 0x192b081908191908,
  3223. 0x192b0819082b0808, 0x192b081919080819, 0x192b081919081908, 0x192b081919190808,
  3224. 0x192b08192b080808, 0x192b08192b192b19, 0x192b082b08081908, 0x192b082b08190808,
  3225. 0x192b082b19080808, 0x192b082b1919192b, 0x192b082b2b2b0819, 0x192b190808080808,
  3226. 0x192b190808081919, 0x192b190808082b08, 0x192b190808190819, 0x192b190808191908,
  3227. 0x192b1908082b0808, 0x192b190819080819, 0x192b190819081908, 0x192b190819190808,
  3228. 0x192b19082b080808, 0x192b191908080819, 0x192b191908081908, 0x192b191908190808,
  3229. 0x192b191919080808, 0x192b191919082b2b, 0x192b1919192b2b08, 0x192b19192b19082b,
  3230. 0x192b192b08080808, 0x192b192b2b191908, 0x192b2b0808080819, 0x192b2b0808081908,
  3231. 0x192b2b0808190808, 0x192b2b08192b1919, 0x192b2b082b192b08, 0x192b2b1908080808,
  3232. 0x192b2b19082b2b2b, 0x192b2b2b1908082b, 0x192b2b2b2b2b0819, 0x2b08080808080808,
  3233. 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08, 0x2b08080808190819,
  3234. 0x2b08080808191908, 0x2b08080808192b19, 0x2b080808082b0808, 0x2b080808082b1919,
  3235. 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808081919082b,
  3236. 0x2b08080819191919, 0x2b08080819192b08, 0x2b080808192b0819, 0x2b0808082b080808,
  3237. 0x2b0808082b081919, 0x2b0808082b190819, 0x2b0808082b191908, 0x2b08081908080819,
  3238. 0x2b08081908081908, 0x2b08081908082b19, 0x2b08081908190808, 0x2b0808190819082b,
  3239. 0x2b08081908191919, 0x2b08081908192b08, 0x2b080819082b0819, 0x2b080819082b1908,
  3240. 0x2b08081919080808, 0x2b0808191908082b, 0x2b08081919081919, 0x2b08081919082b08,
  3241. 0x2b08081919190819, 0x2b08081919191908, 0x2b0808192b080819, 0x2b0808192b081908,
  3242. 0x2b0808192b190808, 0x2b0808192b2b2b19, 0x2b08082b08080808, 0x2b08082b08081919,
  3243. 0x2b08082b08082b2b, 0x2b08082b08190819, 0x2b08082b08191908, 0x2b08082b19080819,
  3244. 0x2b08082b19081908, 0x2b08082b19190808, 0x2b08190808080819, 0x2b08190808081908,
  3245. 0x2b0819080808192b, 0x2b08190808082b19, 0x2b08190808190808, 0x2b0819080819082b,
  3246. 0x2b08190808191919, 0x2b08190808192b08, 0x2b081908082b0819, 0x2b08190819080808,
  3247. 0x2b0819081908082b, 0x2b08190819081919, 0x2b08190819082b08, 0x2b08190819190819,
  3248. 0x2b08190819191908, 0x2b081908192b0808, 0x2b0819082b080819, 0x2b0819082b081908,
  3249. 0x2b0819082b190808, 0x2b08191908080808, 0x2b0819190808082b, 0x2b08191908081919,
  3250. 0x2b08191908082b08, 0x2b08191908190819, 0x2b08191908191908, 0x2b081919082b0808,
  3251. 0x2b08191919080819, 0x2b08191919081908, 0x2b08191919190808, 0x2b0819192b080808,
  3252. 0x2b0819192b082b2b, 0x2b08192b08080819, 0x2b08192b08081908, 0x2b08192b08190808,
  3253. 0x2b08192b082b2b19, 0x2b08192b19080808, 0x2b082b0808080808, 0x2b082b0808081919,
  3254. 0x2b082b0808190819, 0x2b082b0808191908, 0x2b082b0819080819, 0x2b082b0819081908,
  3255. 0x2b082b0819190808, 0x2b082b082b2b082b, 0x2b082b1908080819, 0x2b082b1908081908,
  3256. 0x2b082b1919080808, 0x2b082b19192b1919, 0x2b082b2b082b082b, 0x2b082b2b19192b08,
  3257. 0x2b082b2b19192b2b, 0x2b082b2b2b08082b, 0x2b082b2b2b2b082b, 0x2b19080808080819,
  3258. 0x2b19080808081908, 0x2b19080808082b19, 0x2b19080808190808, 0x2b1908080819082b,
  3259. 0x2b19080808191919, 0x2b19080808192b08, 0x2b190808082b1908, 0x2b19080819080808,
  3260. 0x2b1908081908082b, 0x2b19080819081919, 0x2b19080819082b08, 0x2b19080819190819,
  3261. 0x2b19080819191908, 0x2b190808192b0808, 0x2b1908082b080819, 0x2b1908082b081908,
  3262. 0x2b1908082b190808, 0x2b19081908080808, 0x2b19081908081919, 0x2b19081908190819,
  3263. 0x2b19081908191908, 0x2b19081919080819, 0x2b19081919081908, 0x2b19081919190808,
  3264. 0x2b19081919192b2b, 0x2b19082b08080819, 0x2b19082b08081908, 0x2b19082b08190808,
  3265. 0x2b19082b19080808, 0x2b19082b2b2b192b, 0x2b19190808080808, 0x2b1919080808082b,
  3266. 0x2b19190808081919, 0x2b19190808082b08, 0x2b19190808190819, 0x2b19190808191908,
  3267. 0x2b191908082b0808, 0x2b19190819080819, 0x2b19190819081908, 0x2b19190819190808,
  3268. 0x2b1919082b080808, 0x2b1919082b19192b, 0x2b19191908080819, 0x2b19191908081908,
  3269. 0x2b19191908190808, 0x2b19191919080808, 0x2b1919192b192b08, 0x2b1919192b2b0819,
  3270. 0x2b19192b08080808, 0x2b19192b1908192b, 0x2b19192b192b1908, 0x2b192b0808080819,
  3271. 0x2b192b0808081908, 0x2b192b0808190808, 0x2b192b08082b192b, 0x2b192b0819080808,
  3272. 0x2b192b082b2b2b19, 0x2b192b1908080808, 0x2b192b1919082b19, 0x2b192b191919082b,
  3273. 0x2b192b2b2b190808, 0x2b2b080808080808, 0x2b2b080808081919, 0x2b2b080808082b2b,
  3274. 0x2b2b080808191908, 0x2b2b0808082b082b, 0x2b2b0808082b2b2b, 0x2b2b080819080819,
  3275. 0x2b2b080819081908, 0x2b2b080819190808, 0x2b2b08082b2b082b, 0x2b2b08082b2b2b2b,
  3276. 0x2b2b081919080808, 0x2b2b0819192b1919, 0x2b2b082b0808082b, 0x2b2b082b08082b2b,
  3277. 0x2b2b082b082b082b, 0x2b2b082b082b2b08, 0x2b2b082b082b2b2b, 0x2b2b082b2b08082b,
  3278. 0x2b2b082b2b082b08, 0x2b2b082b2b082b2b, 0x2b2b082b2b2b2b08, 0x2b2b190808080819,
  3279. 0x2b2b190808081908, 0x2b2b190808190808, 0x2b2b190819080808, 0x2b2b19082b082b19,
  3280. 0x2b2b19082b2b1908, 0x2b2b191908080808, 0x2b2b191908192b19, 0x2b2b192b19190819,
  3281. 0x2b2b2b0808082b2b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b082b, 0x2b2b2b1919191908,
  3282. 0x2b2b2b192b08192b, 0x2b2b2b2b08082b08, 0x2b2b2b2b08082b2b, 0x2b2b2b2b082b0808,
  3283. 0x2b2b2b2b082b082b, 0x2b2b2b2b082b2b08, 0x2b2b2b2b2b082b08, 0x2b2b2b2b2b2b2b2b,
  3284. };
  3285. static const uint32_t iq3xxs_grid[256] = {
  3286. 0x04040404, 0x04040414, 0x04040424, 0x04040c0c, 0x04040c1c, 0x04040c3e, 0x04041404, 0x04041414,
  3287. 0x04041c0c, 0x04042414, 0x04043e1c, 0x04043e2c, 0x040c040c, 0x040c041c, 0x040c0c04, 0x040c0c14,
  3288. 0x040c140c, 0x040c142c, 0x040c1c04, 0x040c1c14, 0x040c240c, 0x040c2c24, 0x040c3e04, 0x04140404,
  3289. 0x04140414, 0x04140424, 0x04140c0c, 0x04141404, 0x04141414, 0x04141c0c, 0x04141c1c, 0x04141c3e,
  3290. 0x04142c0c, 0x04142c3e, 0x04143e2c, 0x041c040c, 0x041c043e, 0x041c0c04, 0x041c0c14, 0x041c142c,
  3291. 0x041c3e04, 0x04240c1c, 0x04241c3e, 0x04242424, 0x04242c3e, 0x04243e1c, 0x04243e2c, 0x042c040c,
  3292. 0x042c043e, 0x042c1c14, 0x042c2c14, 0x04341c2c, 0x04343424, 0x043e0c04, 0x043e0c24, 0x043e0c34,
  3293. 0x043e241c, 0x043e340c, 0x0c04040c, 0x0c04041c, 0x0c040c04, 0x0c040c14, 0x0c04140c, 0x0c04141c,
  3294. 0x0c041c04, 0x0c041c14, 0x0c041c24, 0x0c04243e, 0x0c042c04, 0x0c0c0404, 0x0c0c0414, 0x0c0c0c0c,
  3295. 0x0c0c1404, 0x0c0c1414, 0x0c14040c, 0x0c14041c, 0x0c140c04, 0x0c140c14, 0x0c14140c, 0x0c141c04,
  3296. 0x0c143e14, 0x0c1c0404, 0x0c1c0414, 0x0c1c1404, 0x0c1c1c0c, 0x0c1c2434, 0x0c1c3434, 0x0c24040c,
  3297. 0x0c24042c, 0x0c242c04, 0x0c2c1404, 0x0c2c1424, 0x0c2c2434, 0x0c2c3e0c, 0x0c34042c, 0x0c3e1414,
  3298. 0x0c3e2404, 0x14040404, 0x14040414, 0x14040c0c, 0x14040c1c, 0x14041404, 0x14041414, 0x14041434,
  3299. 0x14041c0c, 0x14042414, 0x140c040c, 0x140c041c, 0x140c042c, 0x140c0c04, 0x140c0c14, 0x140c140c,
  3300. 0x140c1c04, 0x140c341c, 0x140c343e, 0x140c3e04, 0x14140404, 0x14140414, 0x14140c0c, 0x14140c3e,
  3301. 0x14141404, 0x14141414, 0x14141c3e, 0x14142404, 0x14142c2c, 0x141c040c, 0x141c0c04, 0x141c0c24,
  3302. 0x141c3e04, 0x141c3e24, 0x14241c2c, 0x14242c1c, 0x142c041c, 0x142c143e, 0x142c240c, 0x142c3e24,
  3303. 0x143e040c, 0x143e041c, 0x143e0c34, 0x143e242c, 0x1c04040c, 0x1c040c04, 0x1c040c14, 0x1c04140c,
  3304. 0x1c04141c, 0x1c042c04, 0x1c04342c, 0x1c043e14, 0x1c0c0404, 0x1c0c0414, 0x1c0c1404, 0x1c0c1c0c,
  3305. 0x1c0c2424, 0x1c0c2434, 0x1c14040c, 0x1c14041c, 0x1c140c04, 0x1c14142c, 0x1c142c14, 0x1c143e14,
  3306. 0x1c1c0c0c, 0x1c1c1c1c, 0x1c241c04, 0x1c24243e, 0x1c243e14, 0x1c2c0404, 0x1c2c0434, 0x1c2c1414,
  3307. 0x1c2c2c2c, 0x1c340c24, 0x1c341c34, 0x1c34341c, 0x1c3e1c1c, 0x1c3e3404, 0x24040424, 0x24040c3e,
  3308. 0x24041c2c, 0x24041c3e, 0x24042c1c, 0x24042c3e, 0x240c3e24, 0x24141404, 0x24141c3e, 0x24142404,
  3309. 0x24143404, 0x24143434, 0x241c043e, 0x241c242c, 0x24240424, 0x24242c0c, 0x24243424, 0x242c142c,
  3310. 0x242c241c, 0x242c3e04, 0x243e042c, 0x243e0c04, 0x243e0c14, 0x243e1c04, 0x2c040c14, 0x2c04240c,
  3311. 0x2c043e04, 0x2c0c0404, 0x2c0c0434, 0x2c0c1434, 0x2c0c2c2c, 0x2c140c24, 0x2c141c14, 0x2c143e14,
  3312. 0x2c1c0414, 0x2c1c2c1c, 0x2c240c04, 0x2c24141c, 0x2c24143e, 0x2c243e14, 0x2c2c0414, 0x2c2c1c0c,
  3313. 0x2c342c04, 0x2c3e1424, 0x2c3e2414, 0x34041424, 0x34042424, 0x34042434, 0x34043424, 0x340c140c,
  3314. 0x340c340c, 0x34140c3e, 0x34143424, 0x341c1c04, 0x341c1c34, 0x34242424, 0x342c042c, 0x342c2c14,
  3315. 0x34341c1c, 0x343e041c, 0x343e140c, 0x3e04041c, 0x3e04042c, 0x3e04043e, 0x3e040c04, 0x3e041c14,
  3316. 0x3e042c14, 0x3e0c1434, 0x3e0c2404, 0x3e140c14, 0x3e14242c, 0x3e142c14, 0x3e1c0404, 0x3e1c0c2c,
  3317. 0x3e1c1c1c, 0x3e1c3404, 0x3e24140c, 0x3e24240c, 0x3e2c0404, 0x3e2c0414, 0x3e2c1424, 0x3e341c04,
  3318. };
  3319. static const uint32_t iq3xs_grid[512] = {
  3320. 0x04040404, 0x0404040c, 0x04040414, 0x0404042c, 0x0404043e, 0x04040c04, 0x04040c0c, 0x04040c14,
  3321. 0x04040c24, 0x04040c34, 0x04041404, 0x0404140c, 0x0404142c, 0x04041c1c, 0x04042404, 0x04042414,
  3322. 0x0404242c, 0x0404243e, 0x04042c0c, 0x04042c1c, 0x04043404, 0x04043414, 0x04043e0c, 0x04043e24,
  3323. 0x04043e3e, 0x040c0404, 0x040c040c, 0x040c0414, 0x040c0424, 0x040c0c04, 0x040c0c0c, 0x040c0c2c,
  3324. 0x040c1404, 0x040c141c, 0x040c143e, 0x040c1c0c, 0x040c1c2c, 0x040c2424, 0x040c340c, 0x040c342c,
  3325. 0x040c3e14, 0x04140404, 0x0414040c, 0x0414042c, 0x0414043e, 0x04140c04, 0x04140c1c, 0x04140c34,
  3326. 0x0414140c, 0x0414142c, 0x04141c04, 0x04141c24, 0x04142414, 0x0414242c, 0x0414243e, 0x04142c0c,
  3327. 0x04142c1c, 0x04143e04, 0x04143e1c, 0x041c041c, 0x041c0c0c, 0x041c0c2c, 0x041c1404, 0x041c1414,
  3328. 0x041c1c0c, 0x041c1c1c, 0x041c1c34, 0x041c2424, 0x041c2c04, 0x041c2c14, 0x041c343e, 0x041c3e0c,
  3329. 0x041c3e2c, 0x04240404, 0x04240c1c, 0x04240c3e, 0x0424140c, 0x04241424, 0x04241c14, 0x04242404,
  3330. 0x0424241c, 0x04242c0c, 0x04243e04, 0x042c0414, 0x042c0424, 0x042c1404, 0x042c1414, 0x042c1434,
  3331. 0x042c1c1c, 0x042c240c, 0x042c242c, 0x042c243e, 0x042c3434, 0x042c3e1c, 0x04340434, 0x04340c0c,
  3332. 0x04340c1c, 0x04341c0c, 0x04342c14, 0x04343e0c, 0x043e0404, 0x043e0414, 0x043e0424, 0x043e1404,
  3333. 0x043e1414, 0x043e1434, 0x043e1c1c, 0x043e2c04, 0x043e2c24, 0x0c040404, 0x0c04040c, 0x0c040414,
  3334. 0x0c040424, 0x0c040c04, 0x0c040c0c, 0x0c040c1c, 0x0c040c2c, 0x0c040c3e, 0x0c041404, 0x0c041414,
  3335. 0x0c041c0c, 0x0c041c24, 0x0c041c34, 0x0c042c24, 0x0c042c34, 0x0c04340c, 0x0c043e14, 0x0c0c0404,
  3336. 0x0c0c040c, 0x0c0c041c, 0x0c0c0434, 0x0c0c0c04, 0x0c0c0c24, 0x0c0c140c, 0x0c0c1c04, 0x0c0c1c1c,
  3337. 0x0c0c240c, 0x0c0c2c04, 0x0c0c2c14, 0x0c0c3e04, 0x0c0c3e34, 0x0c140404, 0x0c140c14, 0x0c140c2c,
  3338. 0x0c140c3e, 0x0c141404, 0x0c141424, 0x0c141c14, 0x0c142404, 0x0c14241c, 0x0c142c2c, 0x0c143404,
  3339. 0x0c143e14, 0x0c1c040c, 0x0c1c0424, 0x0c1c043e, 0x0c1c0c04, 0x0c1c0c1c, 0x0c1c140c, 0x0c1c143e,
  3340. 0x0c1c1c04, 0x0c1c1c24, 0x0c1c240c, 0x0c1c3414, 0x0c1c3e04, 0x0c24041c, 0x0c24042c, 0x0c240c14,
  3341. 0x0c240c24, 0x0c241c0c, 0x0c241c1c, 0x0c242414, 0x0c242434, 0x0c242c04, 0x0c242c24, 0x0c2c040c,
  3342. 0x0c2c0c04, 0x0c2c0c1c, 0x0c2c140c, 0x0c2c1c04, 0x0c2c1c14, 0x0c2c2c0c, 0x0c341404, 0x0c341424,
  3343. 0x0c34143e, 0x0c342424, 0x0c342434, 0x0c3e040c, 0x0c3e041c, 0x0c3e0c04, 0x0c3e0c14, 0x0c3e140c,
  3344. 0x0c3e1c2c, 0x0c3e240c, 0x0c3e3414, 0x0c3e3e04, 0x14040404, 0x1404040c, 0x1404041c, 0x1404042c,
  3345. 0x1404043e, 0x14040c04, 0x14040c14, 0x14040c24, 0x14040c34, 0x1404140c, 0x1404141c, 0x1404143e,
  3346. 0x14041c04, 0x14041c14, 0x1404240c, 0x1404241c, 0x1404242c, 0x14042c04, 0x14042c14, 0x1404343e,
  3347. 0x14043e04, 0x14043e1c, 0x14043e2c, 0x140c0404, 0x140c0414, 0x140c0c04, 0x140c0c1c, 0x140c0c3e,
  3348. 0x140c1414, 0x140c142c, 0x140c1c0c, 0x140c1c24, 0x140c2414, 0x140c2c0c, 0x1414040c, 0x14140424,
  3349. 0x1414043e, 0x1414140c, 0x1414141c, 0x14141c04, 0x14141c3e, 0x1414240c, 0x14142c1c, 0x14142c3e,
  3350. 0x14143e0c, 0x14143e24, 0x141c0404, 0x141c0414, 0x141c042c, 0x141c0c0c, 0x141c1414, 0x141c1424,
  3351. 0x141c1c0c, 0x141c1c1c, 0x141c2414, 0x141c2c04, 0x141c3434, 0x1424040c, 0x1424043e, 0x14241404,
  3352. 0x1424141c, 0x14241c14, 0x14241c2c, 0x1424240c, 0x14243e14, 0x14243e2c, 0x142c0424, 0x142c0c0c,
  3353. 0x142c1414, 0x142c1c3e, 0x142c2404, 0x142c2c1c, 0x142c3e04, 0x14340404, 0x14340414, 0x1434043e,
  3354. 0x1434140c, 0x14342c2c, 0x1434340c, 0x143e042c, 0x143e0c0c, 0x143e1434, 0x143e1c04, 0x143e241c,
  3355. 0x143e2c04, 0x1c040414, 0x1c040c0c, 0x1c040c1c, 0x1c040c2c, 0x1c040c3e, 0x1c041414, 0x1c041c0c,
  3356. 0x1c041c1c, 0x1c041c2c, 0x1c042414, 0x1c042424, 0x1c04243e, 0x1c042c0c, 0x1c04341c, 0x1c043e0c,
  3357. 0x1c0c040c, 0x1c0c041c, 0x1c0c042c, 0x1c0c0c24, 0x1c0c140c, 0x1c0c141c, 0x1c0c2404, 0x1c0c3404,
  3358. 0x1c0c3e14, 0x1c0c3e34, 0x1c140404, 0x1c140c14, 0x1c141404, 0x1c141c14, 0x1c141c24, 0x1c142c04,
  3359. 0x1c1c040c, 0x1c1c0c04, 0x1c1c0c24, 0x1c1c140c, 0x1c1c141c, 0x1c1c143e, 0x1c1c1c04, 0x1c1c240c,
  3360. 0x1c1c241c, 0x1c1c243e, 0x1c1c2c2c, 0x1c1c3e1c, 0x1c24041c, 0x1c240c0c, 0x1c240c34, 0x1c241414,
  3361. 0x1c241c0c, 0x1c242c14, 0x1c243404, 0x1c243424, 0x1c2c040c, 0x1c2c0c04, 0x1c2c0c14, 0x1c2c142c,
  3362. 0x1c2c1c14, 0x1c2c2424, 0x1c2c2c34, 0x1c2c3e1c, 0x1c340c34, 0x1c34240c, 0x1c3e040c, 0x1c3e041c,
  3363. 0x1c3e1404, 0x1c3e1414, 0x1c3e1c2c, 0x24040404, 0x24040424, 0x24040c14, 0x24041404, 0x24041424,
  3364. 0x2404143e, 0x24041c14, 0x2404240c, 0x24042c04, 0x24043e04, 0x240c0414, 0x240c043e, 0x240c0c0c,
  3365. 0x240c0c1c, 0x240c1414, 0x240c1c04, 0x240c1c2c, 0x240c241c, 0x240c2c0c, 0x240c2c2c, 0x2414040c,
  3366. 0x2414041c, 0x24140c04, 0x24140c2c, 0x2414140c, 0x24141c1c, 0x24142404, 0x24142c3e, 0x24143414,
  3367. 0x24143e04, 0x241c0424, 0x241c0c0c, 0x241c0c1c, 0x241c1404, 0x241c1414, 0x241c1c0c, 0x241c1c2c,
  3368. 0x24240404, 0x24240414, 0x24241424, 0x24241c3e, 0x24242404, 0x24243e0c, 0x242c042c, 0x242c043e,
  3369. 0x242c140c, 0x242c3414, 0x24340c1c, 0x24341c24, 0x24343404, 0x243e0c04, 0x243e0c2c, 0x243e1c04,
  3370. 0x243e241c, 0x243e2c0c, 0x2c040414, 0x2c040c04, 0x2c040c24, 0x2c041414, 0x2c042404, 0x2c042424,
  3371. 0x2c04243e, 0x2c042c14, 0x2c043434, 0x2c043e24, 0x2c0c040c, 0x2c0c041c, 0x2c0c042c, 0x2c0c0c14,
  3372. 0x2c0c140c, 0x2c0c1c14, 0x2c0c3e14, 0x2c140404, 0x2c140c0c, 0x2c14141c, 0x2c141c04, 0x2c141c34,
  3373. 0x2c142c1c, 0x2c1c0414, 0x2c1c043e, 0x2c1c0c04, 0x2c1c143e, 0x2c1c2424, 0x2c1c2c0c, 0x2c1c342c,
  3374. 0x2c1c3e1c, 0x2c24040c, 0x2c240424, 0x2c241404, 0x2c241c14, 0x2c242434, 0x2c2c0c14, 0x2c2c1434,
  3375. 0x2c2c2c0c, 0x2c2c2c1c, 0x2c342414, 0x2c3e0414, 0x2c3e0424, 0x2c3e1414, 0x34040c0c, 0x34040c1c,
  3376. 0x34040c2c, 0x34041c0c, 0x34041c1c, 0x34043404, 0x340c0404, 0x340c1404, 0x340c143e, 0x340c3424,
  3377. 0x34140c14, 0x34141c24, 0x34142414, 0x34142c2c, 0x34143414, 0x34143e04, 0x341c0404, 0x341c0c24,
  3378. 0x341c140c, 0x341c2404, 0x3424142c, 0x3424241c, 0x34243414, 0x342c0404, 0x342c041c, 0x342c1c24,
  3379. 0x342c3404, 0x3434042c, 0x34342404, 0x343e0c0c, 0x343e0c1c, 0x3e040404, 0x3e040424, 0x3e04043e,
  3380. 0x3e041404, 0x3e041414, 0x3e041c34, 0x3e042404, 0x3e042c24, 0x3e043414, 0x3e0c0414, 0x3e0c0c0c,
  3381. 0x3e0c1424, 0x3e0c241c, 0x3e0c242c, 0x3e14040c, 0x3e140424, 0x3e140c04, 0x3e140c34, 0x3e14140c,
  3382. 0x3e141c04, 0x3e142c0c, 0x3e1c0414, 0x3e1c1c14, 0x3e1c1c2c, 0x3e1c2c1c, 0x3e24040c, 0x3e24042c,
  3383. 0x3e240c1c, 0x3e241404, 0x3e242c04, 0x3e2c1414, 0x3e2c2414, 0x3e340414, 0x3e341c0c, 0x3e3e0404,
  3384. };
  3385. #define NGRID_IQ2XXS 512
  3386. static const uint64_t iq1s_grid[NGRID_IQ2XXS] = {
  3387. 0xffffffffffff0101, 0xffffffffff01ff00, 0xffffffffff010100, 0xffffffff00000000,
  3388. 0xffffffff01ff00ff, 0xffffffff01ff0001, 0xffffffff0101ffff, 0xffffffff0101ff01,
  3389. 0xffffff00ff000000, 0xffffff000000ff00, 0xffffff00000000ff, 0xffffff0000000100,
  3390. 0xffffff0000010000, 0xffffff0001000000, 0xffffff01ffff00ff, 0xffffff01ff01ff00,
  3391. 0xffffff01ff010100, 0xffffff0100000001, 0xffffff0101ffff00, 0xffffff0101ff0101,
  3392. 0xffffff0101010100, 0xffff00ffff00ff01, 0xffff00ffff0000ff, 0xffff00ff00ff0100,
  3393. 0xffff00ff0100ff00, 0xffff00ff010001ff, 0xffff0000ff0101ff, 0xffff000000ffff00,
  3394. 0xffff000000000000, 0xffff00000001ff01, 0xffff000001000101, 0xffff0000010100ff,
  3395. 0xffff0001ffff0100, 0xffff00010000ff00, 0xffff000100010101, 0xffff000101000000,
  3396. 0xffff01ffffff0000, 0xffff01ffff01ffff, 0xffff01ffff010100, 0xffff01ff00000000,
  3397. 0xffff01ff01ffffff, 0xffff01ff01ff0001, 0xffff01ff0101ffff, 0xffff01ff01010001,
  3398. 0xffff0100ffffff01, 0xffff01000000ffff, 0xffff010000000100, 0xffff010001ff01ff,
  3399. 0xffff010001000000, 0xffff0101ff000000, 0xffff0101000101ff, 0xffff010101ffff01,
  3400. 0xffff01010101ff00, 0xff00ffffff000000, 0xff00ffff00ffff00, 0xff00ffff00000001,
  3401. 0xff00ffff000001ff, 0xff00ffff01010000, 0xff00ff00ffff0000, 0xff00ff00ff00ff00,
  3402. 0xff00ff00ff0000ff, 0xff00ff00ff000100, 0xff00ff00ff010001, 0xff00ff0000ff0001,
  3403. 0xff00ff000000ffff, 0xff00ff0000000000, 0xff00ff000001ff00, 0xff00ff0000010100,
  3404. 0xff00ff0001ff0000, 0xff00ff000100ff00, 0xff00ff0001000100, 0xff00ff01ff000000,
  3405. 0xff00ff0100ff0000, 0xff00ff01000001ff, 0xff00ff0101010001, 0xff0000ff00000000,
  3406. 0xff0000ff0001ff00, 0xff0000ff00010100, 0xff000000ffff0101, 0xff000000ff000000,
  3407. 0xff000000ff01ff00, 0xff00000000ff0000, 0xff0000000000ff00, 0xff000000000000ff,
  3408. 0xff00000000000000, 0xff00000000000001, 0xff00000000000100, 0xff0000000001ffff,
  3409. 0xff00000000010000, 0xff00000001000000, 0xff00000001010100, 0xff000001ff00ff01,
  3410. 0xff000001ff0100ff, 0xff00000100000000, 0xff0000010001ff00, 0xff00000101ff0100,
  3411. 0xff0000010100ff00, 0xff0001ff00ff00ff, 0xff0001ff00000101, 0xff0001ff000100ff,
  3412. 0xff0001ff01000000, 0xff000100ff0001ff, 0xff0001000000ff01, 0xff00010000000000,
  3413. 0xff00010000010001, 0xff00010000010100, 0xff00010001ffff00, 0xff00010001ff0101,
  3414. 0xff00010001010000, 0xff000101ffffffff, 0xff000101ff000101, 0xff00010101ff00ff,
  3415. 0xff00010101000001, 0xff000101010100ff, 0xff01ffffff000101, 0xff01ffffff01ffff,
  3416. 0xff01ffffff01ff01, 0xff01ffffff0101ff, 0xff01ffff00000000, 0xff01ffff01ff0001,
  3417. 0xff01ffff0101ff01, 0xff01ff00ff000000, 0xff01ff0000ff0100, 0xff01ff000000ff01,
  3418. 0xff01ff0000010000, 0xff01ff00010000ff, 0xff01ff01ff01ff00, 0xff01ff0100000101,
  3419. 0xff0100ffffff0000, 0xff0100ffff010000, 0xff0100ff01ff00ff, 0xff0100ff01000100,
  3420. 0xff0100ff010100ff, 0xff010000ffffff01, 0xff01000000000000, 0xff0100000101ff00,
  3421. 0xff010001ffff00ff, 0xff010001ff000100, 0xff01000100ffff00, 0xff01000100010001,
  3422. 0xff01000101ff0001, 0xff010001010001ff, 0xff0101ffffffffff, 0xff0101ffff01ffff,
  3423. 0xff0101ffff010101, 0xff0101ff0000ff00, 0xff0101ff01010001, 0xff010100ff000000,
  3424. 0xff010100ff01ff01, 0xff01010000ff0001, 0xff01010000000100, 0xff01010001000000,
  3425. 0xff0101010100ffff, 0x00ffffff0000ff01, 0x00ffffff000000ff, 0x00ffffff00000100,
  3426. 0x00ffffff00010000, 0x00ffff00ffff0001, 0x00ffff00ff0000ff, 0x00ffff00ff000100,
  3427. 0x00ffff0000000000, 0x00ffff0001000100, 0x00ffff0001010001, 0x00ffff01ff00ff01,
  3428. 0x00ffff0100ff0100, 0x00ffff010000ff00, 0x00ffff01000100ff, 0x00ffff0101ff00ff,
  3429. 0x00ffff010101ff00, 0x00ff00ffffffffff, 0x00ff00ffffff01ff, 0x00ff00ffff000101,
  3430. 0x00ff00ff00000000, 0x00ff00ff000101ff, 0x00ff00ff01010101, 0x00ff0000ff000000,
  3431. 0x00ff0000ff01ffff, 0x00ff000000ff0000, 0x00ff00000000ff00, 0x00ff0000000000ff,
  3432. 0x00ff000000000000, 0x00ff000000000001, 0x00ff000000000100, 0x00ff000000010000,
  3433. 0x00ff000001ffff01, 0x00ff000001000000, 0x00ff0001ff000101, 0x00ff000100ffffff,
  3434. 0x00ff000100000000, 0x00ff0001010001ff, 0x00ff01ffff000000, 0x00ff01ff0001ff00,
  3435. 0x00ff01ff01ff0100, 0x00ff0100ff01ff01, 0x00ff010000ff00ff, 0x00ff010000ff0101,
  3436. 0x00ff010000000000, 0x00ff010000010101, 0x00ff01000100ff00, 0x00ff010001010000,
  3437. 0x00ff0101ffffff00, 0x00ff01010000ff01, 0x00ff010100000100, 0x00ff010101ff0000,
  3438. 0x0000ffffffff0100, 0x0000ffffff00ff00, 0x0000ffffff0000ff, 0x0000ffffff010000,
  3439. 0x0000ffff00000000, 0x0000ffff00010101, 0x0000ffff01ffff01, 0x0000ffff01000100,
  3440. 0x0000ff00ff000000, 0x0000ff00ff01ff00, 0x0000ff00ff0101ff, 0x0000ff0000ff0000,
  3441. 0x0000ff000000ff00, 0x0000ff00000000ff, 0x0000ff0000000000, 0x0000ff0000000001,
  3442. 0x0000ff0000000100, 0x0000ff0000010000, 0x0000ff0001ffffff, 0x0000ff0001ff01ff,
  3443. 0x0000ff0001000000, 0x0000ff000101ffff, 0x0000ff01ffff0101, 0x0000ff01ff010000,
  3444. 0x0000ff0100000000, 0x0000ff0101000101, 0x000000ffffff0001, 0x000000ffff000000,
  3445. 0x000000ff00ff0000, 0x000000ff0000ff00, 0x000000ff000000ff, 0x000000ff00000000,
  3446. 0x000000ff00000001, 0x000000ff00000100, 0x000000ff00010000, 0x000000ff01000000,
  3447. 0x000000ff0101ff00, 0x00000000ffff0000, 0x00000000ff00ff00, 0x00000000ff0000ff,
  3448. 0x00000000ff000000, 0x00000000ff000001, 0x00000000ff000100, 0x00000000ff010000,
  3449. 0x0000000000ffff00, 0x0000000000ff00ff, 0x0000000000ff0000, 0x0000000000ff0001,
  3450. 0x0000000000ff0100, 0x000000000000ffff, 0x000000000000ff00, 0x000000000000ff01,
  3451. 0x00000000000000ff, 0x0000000000000001, 0x00000000000001ff, 0x0000000000000100,
  3452. 0x0000000000000101, 0x000000000001ff00, 0x00000000000100ff, 0x0000000000010000,
  3453. 0x0000000000010001, 0x0000000000010100, 0x0000000001ff0000, 0x000000000100ff00,
  3454. 0x00000000010000ff, 0x0000000001000000, 0x0000000001000001, 0x0000000001000100,
  3455. 0x0000000001010000, 0x00000001ffff01ff, 0x00000001ff000000, 0x0000000100ff0000,
  3456. 0x000000010000ff00, 0x00000001000000ff, 0x0000000100000000, 0x0000000100000001,
  3457. 0x0000000100000100, 0x0000000100010000, 0x0000000101000000, 0x000001ffff00ff00,
  3458. 0x000001ffff010001, 0x000001ffff0101ff, 0x000001ff00ffff01, 0x000001ff0000ffff,
  3459. 0x000001ff00000000, 0x000001ff010000ff, 0x000001ff01010100, 0x00000100ffff0100,
  3460. 0x00000100ff000000, 0x0000010000ff0000, 0x000001000000ff00, 0x00000100000000ff,
  3461. 0x0000010000000000, 0x0000010000000001, 0x0000010000000100, 0x0000010000010000,
  3462. 0x0000010001000000, 0x000001000101ff01, 0x00000101ffff0001, 0x00000101ff01ffff,
  3463. 0x0000010100000000, 0x0000010101010100, 0x0001ffffff000000, 0x0001ffff00ffffff,
  3464. 0x0001ffff00000100, 0x0001ffff0001ff00, 0x0001ffff01000000, 0x0001ff00ffffff00,
  3465. 0x0001ff00ffff01ff, 0x0001ff00ff010000, 0x0001ff0000000000, 0x0001ff0000010001,
  3466. 0x0001ff0001ff0000, 0x0001ff0001010100, 0x0001ff01ff0000ff, 0x0001ff01ff000001,
  3467. 0x0001ff0100ffffff, 0x0001ff010001ffff, 0x0001ff01000101ff, 0x0001ff010100ff01,
  3468. 0x000100ffff00ffff, 0x000100ffff00ff01, 0x000100ffff000100, 0x000100ff00000000,
  3469. 0x000100ff000101ff, 0x000100ff01ff0101, 0x000100ff0100ffff, 0x000100ff01010101,
  3470. 0x00010000ff000000, 0x00010000ff010100, 0x0001000000ff0000, 0x000100000000ff00,
  3471. 0x00010000000000ff, 0x0001000000000000, 0x0001000000000001, 0x0001000000000100,
  3472. 0x0001000000010000, 0x0001000001ffff01, 0x0001000001000000, 0x0001000100ff0101,
  3473. 0x0001000100000000, 0x00010001010100ff, 0x000101ffffff01ff, 0x000101ffffff0101,
  3474. 0x000101ff00010000, 0x000101ff01ff0000, 0x000101ff0100ff01, 0x00010100ffff0000,
  3475. 0x0001010000000000, 0x000101000001ffff, 0x0001010000010101, 0x00010100010001ff,
  3476. 0x00010101ff00ff00, 0x00010101ff010001, 0x0001010100ffffff, 0x0001010100ff01ff,
  3477. 0x00010101000101ff, 0x0001010101ff0000, 0x000101010100ff01, 0x0001010101000101,
  3478. 0x01ffffffffff0101, 0x01ffffffff01ffff, 0x01ffffffff01ff01, 0x01ffffffff0101ff,
  3479. 0x01ffffffff010101, 0x01ffffff00000000, 0x01ffffff01ff01ff, 0x01ffffff01000101,
  3480. 0x01ffffff0101ff01, 0x01ffffff010100ff, 0x01ffff000000ff00, 0x01ffff0000000001,
  3481. 0x01ffff00000001ff, 0x01ffff0000010000, 0x01ffff0001ff0000, 0x01ffff01ffffffff,
  3482. 0x01ffff01ffff01ff, 0x01ffff01ff000000, 0x01ffff01ff01ffff, 0x01ffff01ff0101ff,
  3483. 0x01ffff010100ffff, 0x01ff00ffffff0000, 0x01ff00ffff010000, 0x01ff00ff00ffff01,
  3484. 0x01ff0000ff0000ff, 0x01ff000000000000, 0x01ff00000001ff01, 0x01ff000001ffffff,
  3485. 0x01ff000001010100, 0x01ff0001ffffff01, 0x01ff0001ff010001, 0x01ff000101ff0100,
  3486. 0x01ff000101000001, 0x01ff0001010100ff, 0x01ff01ffff00ffff, 0x01ff01ff00010001,
  3487. 0x01ff01ff01000000, 0x01ff01ff010101ff, 0x01ff0100ff000001, 0x01ff010000ffff00,
  3488. 0x01ff010000000100, 0x01ff010001ff01ff, 0x01ff01000101ffff, 0x01ff0101ffff00ff,
  3489. 0x01ff0101ffff0101, 0x01ff0101ff0101ff, 0x01ff010100010000, 0x0100ffff00ff00ff,
  3490. 0x0100ffff00ff0001, 0x0100ffff00000100, 0x0100ffff0100ff00, 0x0100ff00ffff0000,
  3491. 0x0100ff00ff00ffff, 0x0100ff00ff00ff01, 0x0100ff00ff000100, 0x0100ff00ff010000,
  3492. 0x0100ff0000000000, 0x0100ff00000100ff, 0x0100ff0001ff0101, 0x0100ff0001010101,
  3493. 0x0100ff0100ff00ff, 0x0100ff0100ff0001, 0x0100ff0100000100, 0x0100ff0100010001,
  3494. 0x0100ff0101000000, 0x010000ffff00ff00, 0x010000ff0000ffff, 0x010000ff00000000,
  3495. 0x010000ff010001ff, 0x010000ff01010001, 0x01000000ffffff00, 0x01000000ffff0101,
  3496. 0x01000000ff000000, 0x01000000ff0100ff, 0x01000000ff010101, 0x0100000000ff0000,
  3497. 0x010000000000ff00, 0x01000000000000ff, 0x0100000000000000, 0x0100000000000001,
  3498. 0x0100000000000100, 0x0100000000010000, 0x0100000001000000, 0x0100000100000000,
  3499. 0x01000001000101ff, 0x0100000101ffff01, 0x010001ffff000101, 0x010001ff00ff0100,
  3500. 0x010001ff0000ff00, 0x010001ff000100ff, 0x010001ff01ffffff, 0x01000100ffff0000,
  3501. 0x01000100ff0001ff, 0x0100010000000000, 0x010001000001ff00, 0x0100010001ff0000,
  3502. 0x01000100010000ff, 0x0100010001000101, 0x01000101ff00ff01, 0x0100010100ff0100,
  3503. 0x010001010000ffff, 0x0100010101010001, 0x0101ffffffff0101, 0x0101ffffff0001ff,
  3504. 0x0101ffffff01ffff, 0x0101ffffff010101, 0x0101ffff00000000, 0x0101ffff0101ffff,
  3505. 0x0101ffff010101ff, 0x0101ff00ff000000, 0x0101ff0000ff0100, 0x0101ff000000ff00,
  3506. 0x0101ff0000010000, 0x0101ff00010000ff, 0x0101ff0001000001, 0x0101ff01ff010101,
  3507. 0x0101ff0100000000, 0x0101ff010101ff00, 0x010100ffffff0000, 0x010100ffff010000,
  3508. 0x010100ff00ff01ff, 0x010100ff000000ff, 0x010100ff00000101, 0x010100ff01ffff00,
  3509. 0x01010000ffffff01, 0x01010000ff000100, 0x01010000ff01ff01, 0x0101000000000000,
  3510. 0x01010000000100ff, 0x010100000101ff01, 0x01010001ffff0000, 0x01010001ff00ffff,
  3511. 0x01010001ff010000, 0x0101000101ffffff, 0x0101000101ff01ff, 0x0101000101010101,
  3512. 0x010101ffff01ffff, 0x010101ff00000000, 0x010101ff0001ff01, 0x010101ff0101ffff,
  3513. 0x010101ff010101ff, 0x01010100ffffffff, 0x01010100ff000001, 0x010101000000ff00,
  3514. 0x0101010001010000, 0x0101010100ff0001, 0x010101010001ff01, 0x010101010101ffff,
  3515. };
  3516. static const uint8_t ksigns_iq2xs[128] = {
  3517. 0, 129, 130, 3, 132, 5, 6, 135, 136, 9, 10, 139, 12, 141, 142, 15,
  3518. 144, 17, 18, 147, 20, 149, 150, 23, 24, 153, 154, 27, 156, 29, 30, 159,
  3519. 160, 33, 34, 163, 36, 165, 166, 39, 40, 169, 170, 43, 172, 45, 46, 175,
  3520. 48, 177, 178, 51, 180, 53, 54, 183, 184, 57, 58, 187, 60, 189, 190, 63,
  3521. 192, 65, 66, 195, 68, 197, 198, 71, 72, 201, 202, 75, 204, 77, 78, 207,
  3522. 80, 209, 210, 83, 212, 85, 86, 215, 216, 89, 90, 219, 92, 221, 222, 95,
  3523. 96, 225, 226, 99, 228, 101, 102, 231, 232, 105, 106, 235, 108, 237, 238, 111,
  3524. 240, 113, 114, 243, 116, 245, 246, 119, 120, 249, 250, 123, 252, 125, 126, 255,
  3525. };
  3526. static const uint8_t kmask_iq2xs[8] = {1, 2, 4, 8, 16, 32, 64, 128};
  3527. void dequantize_row_iq2_xxs(const block_iq2_xxs * restrict x, float * restrict y, int k) {
  3528. assert(k % QK_K == 0);
  3529. const int nb = k / QK_K;
  3530. uint32_t aux32[2];
  3531. const uint8_t * aux8 = (const uint8_t *)aux32;
  3532. for (int i = 0; i < nb; i++) {
  3533. const float d = GGML_FP16_TO_FP32(x[i].d);
  3534. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  3535. memcpy(aux32, x[i].qs + 4*ib32, 2*sizeof(uint32_t));
  3536. const float db = d * (0.5f + (aux32[1] >> 28)) * 0.25f;
  3537. for (int l = 0; l < 4; ++l) {
  3538. const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
  3539. const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
  3540. for (int j = 0; j < 8; ++j) {
  3541. y[j] = db * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
  3542. }
  3543. y += 8;
  3544. }
  3545. }
  3546. }
  3547. }
  3548. // ====================== 2.3125 bpw (de)-quantization
  3549. void dequantize_row_iq2_xs(const block_iq2_xs * restrict x, float * restrict y, int k) {
  3550. assert(k % QK_K == 0);
  3551. const int nb = k / QK_K;
  3552. float db[2];
  3553. for (int i = 0; i < nb; i++) {
  3554. const float d = GGML_FP16_TO_FP32(x[i].d);
  3555. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  3556. db[0] = d * (0.5f + (x[i].scales[ib32] & 0xf)) * 0.25f;
  3557. db[1] = d * (0.5f + (x[i].scales[ib32] >> 4)) * 0.25f;
  3558. for (int l = 0; l < 4; ++l) {
  3559. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (x[i].qs[4*ib32 + l] & 511));
  3560. const uint8_t signs = ksigns_iq2xs[x[i].qs[4*ib32 + l] >> 9];
  3561. for (int j = 0; j < 8; ++j) {
  3562. y[j] = db[l/2] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
  3563. }
  3564. y += 8;
  3565. }
  3566. }
  3567. }
  3568. }
  3569. // ====================== 2.5625 bpw (de)-quantization
  3570. void dequantize_row_iq2_s(const block_iq2_s * restrict x, float * restrict y, int k) {
  3571. assert(k % QK_K == 0);
  3572. const int nb = k / QK_K;
  3573. float db[2];
  3574. for (int i = 0; i < nb; i++) {
  3575. const float d = GGML_FP16_TO_FP32(x[i].d);
  3576. const uint8_t * qs = x[i].qs;
  3577. const uint8_t * qh = x[i].qh;
  3578. const uint8_t * signs = qs + QK_K/8;
  3579. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  3580. db[0] = d * (0.5f + (x[i].scales[ib32] & 0xf)) * 0.25f;
  3581. db[1] = d * (0.5f + (x[i].scales[ib32] >> 4)) * 0.25f;
  3582. for (int l = 0; l < 4; ++l) {
  3583. const float dl = db[l/2];
  3584. const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
  3585. for (int j = 0; j < 8; ++j) {
  3586. y[j] = dl * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1.f : 1.f);
  3587. }
  3588. y += 8;
  3589. }
  3590. qs += 4;
  3591. signs += 4;
  3592. }
  3593. }
  3594. }
  3595. // ====================== 3.0625 bpw (de)-quantization
  3596. void dequantize_row_iq3_xxs(const block_iq3_xxs * restrict x, float * restrict y, int k) {
  3597. assert(k % QK_K == 0);
  3598. const int nb = k / QK_K;
  3599. uint32_t aux32;
  3600. for (int i = 0; i < nb; i++) {
  3601. const float d = GGML_FP16_TO_FP32(x[i].d);
  3602. const uint8_t * qs = x[i].qs;
  3603. const uint8_t * scales_and_signs = qs + QK_K/4;
  3604. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  3605. memcpy(&aux32, scales_and_signs + 4*ib32, sizeof(uint32_t));
  3606. const float db = d * (0.5f + (aux32 >> 28)) * 0.5f;
  3607. for (int l = 0; l < 4; ++l) {
  3608. const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
  3609. const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + qs[2*l+0]);
  3610. const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + qs[2*l+1]);
  3611. for (int j = 0; j < 4; ++j) {
  3612. y[j+0] = db * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f);
  3613. y[j+4] = db * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f);
  3614. }
  3615. y += 8;
  3616. }
  3617. qs += 8;
  3618. }
  3619. }
  3620. }
  3621. // ====================== 3.3125 bpw (de)-quantization
  3622. void dequantize_row_iq3_s(const block_iq3_s * restrict x, float * restrict y, int k) {
  3623. assert(k % QK_K == 0);
  3624. const int nb = k / QK_K;
  3625. for (int i = 0; i < nb; i++) {
  3626. const float d = GGML_FP16_TO_FP32(x[i].d);
  3627. const uint8_t * qs = x[i].qs;
  3628. const uint8_t * qh = x[i].qh;
  3629. const uint8_t * signs = x[i].signs;
  3630. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  3631. const float db1 = d * (0.5f + (x[i].scales[ib32/2] & 0xf)) * 0.5f;
  3632. const float db2 = d * (0.5f + (x[i].scales[ib32/2] >> 4)) * 0.5f;
  3633. for (int l = 0; l < 4; ++l) {
  3634. const uint8_t * grid1 = (const uint8_t *)(iq3xs_grid + (qs[2*l+0] | ((qh[0] << (8-2*l)) & 256)));
  3635. const uint8_t * grid2 = (const uint8_t *)(iq3xs_grid + (qs[2*l+1] | ((qh[0] << (7-2*l)) & 256)));
  3636. for (int j = 0; j < 4; ++j) {
  3637. y[j+0] = db1 * grid1[j] * (signs[l] & kmask_iq2xs[j+0] ? -1.f : 1.f);
  3638. y[j+4] = db1 * grid2[j] * (signs[l] & kmask_iq2xs[j+4] ? -1.f : 1.f);
  3639. }
  3640. y += 8;
  3641. }
  3642. qs += 8;
  3643. signs += 4;
  3644. for (int l = 0; l < 4; ++l) {
  3645. const uint8_t * grid1 = (const uint8_t *)(iq3xs_grid + (qs[2*l+0] | ((qh[1] << (8-2*l)) & 256)));
  3646. const uint8_t * grid2 = (const uint8_t *)(iq3xs_grid + (qs[2*l+1] | ((qh[1] << (7-2*l)) & 256)));
  3647. for (int j = 0; j < 4; ++j) {
  3648. y[j+0] = db2 * grid1[j] * (signs[l] & kmask_iq2xs[j+0] ? -1.f : 1.f);
  3649. y[j+4] = db2 * grid2[j] * (signs[l] & kmask_iq2xs[j+4] ? -1.f : 1.f);
  3650. }
  3651. y += 8;
  3652. }
  3653. qh += 2;
  3654. qs += 8;
  3655. signs += 4;
  3656. }
  3657. }
  3658. }
  3659. // ====================== 1.5625 bpw (de)-quantization
  3660. void dequantize_row_iq1_s(const block_iq1_s * restrict x, float * restrict y, int k) {
  3661. assert(k % QK_K == 0);
  3662. const int nb = k / QK_K;
  3663. float db[4];
  3664. uint16_t idx[4];
  3665. //const int8_t * grid[4];
  3666. for (int i = 0; i < nb; i++) {
  3667. const float d = GGML_FP16_TO_FP32(x[i].d);
  3668. const uint8_t * sc = x[i].scales;
  3669. const uint8_t * qs = x[i].qs;
  3670. for (int i8 = 0; i8 < QK_K/8; i8 += 4) {
  3671. idx[0] = qs[0] | ((sc[0] & 0x08) << 5);
  3672. idx[1] = qs[1] | ((sc[0] & 0x80) << 1);
  3673. idx[2] = qs[2] | ((sc[1] & 0x08) << 5);
  3674. idx[3] = qs[3] | ((sc[1] & 0x80) << 1);
  3675. //grid[0] = (const int8_t *)(iq1s_grid + (qs[0] | ((sc[0] & 0x08) << 5)));
  3676. //grid[1] = (const int8_t *)(iq1s_grid + (qs[1] | ((sc[0] & 0x80) << 1)));
  3677. //grid[2] = (const int8_t *)(iq1s_grid + (qs[2] | ((sc[1] & 0x08) << 5)));
  3678. //grid[3] = (const int8_t *)(iq1s_grid + (qs[3] | ((sc[1] & 0x80) << 1)));
  3679. db[0] = d * (2*(sc[0] & 7) + 1);
  3680. db[1] = d * (2*((sc[0] >> 4) & 7) + 1);
  3681. db[2] = d * (2*(sc[1] & 7) + 1);
  3682. db[3] = d * (2*((sc[1] >> 4) & 7) + 1);
  3683. for (int l = 0; l < 4; ++l) {
  3684. const int8_t * grid = (const int8_t *)(iq1s_grid + idx[l]);
  3685. for (int j = 0; j < 8; ++j) {
  3686. //y[j] = db[l] * grid[l][j];
  3687. y[j] = db[l] * grid[j];
  3688. }
  3689. y += 8;
  3690. }
  3691. qs += 4;
  3692. sc += 2;
  3693. }
  3694. }
  3695. }
  3696. static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113};
  3697. void dequantize_row_iq4_nl(const block_iq4_nl * restrict x, float * restrict y, int k) {
  3698. assert(k % QK4_NL == 0);
  3699. const int nb = k / QK4_NL;
  3700. for (int i = 0; i < nb; i++) {
  3701. const uint8_t * qs = x[i].qs;
  3702. const float d = GGML_FP16_TO_FP32(x[i].d);
  3703. for (int j = 0; j < QK4_NL/2; ++j) {
  3704. y[j+ 0] = d * kvalues_iq4nl[qs[j] & 0xf];
  3705. y[j+QK4_NL/2] = d * kvalues_iq4nl[qs[j] >> 4];
  3706. }
  3707. y += QK4_NL;
  3708. qs += QK4_NL/2;
  3709. }
  3710. }
  3711. void dequantize_row_iq4_xs(const block_iq4_xs * restrict x, float * restrict y, int k) {
  3712. assert(k % QK_K == 0);
  3713. #if QK_K == 64
  3714. dequantize_row_iq4_nl((const block_iq4_nl *)x, y, k);
  3715. #else
  3716. const int nb = k / QK_K;
  3717. for (int i = 0; i < nb; i++) {
  3718. const uint8_t * qs = x[i].qs;
  3719. const float d = GGML_FP16_TO_FP32(x[i].d);
  3720. for (int ib = 0; ib < QK_K/32; ++ib) {
  3721. const int ls = ((x[i].scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((x[i].scales_h >> 2*ib) & 3) << 4);
  3722. const float dl = d * (ls - 32);
  3723. for (int j = 0; j < 16; ++j) {
  3724. y[j+ 0] = dl * kvalues_iq4nl[qs[j] & 0xf];
  3725. y[j+16] = dl * kvalues_iq4nl[qs[j] >> 4];
  3726. }
  3727. y += 32;
  3728. qs += 16;
  3729. }
  3730. }
  3731. #endif
  3732. }
  3733. //===================================== Q8_K ==============================================
  3734. void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k) {
  3735. assert(k % QK_K == 0);
  3736. const int nb = k / QK_K;
  3737. for (int i = 0; i < nb; i++) {
  3738. float max = 0;
  3739. float amax = 0;
  3740. for (int j = 0; j < QK_K; ++j) {
  3741. float ax = fabsf(x[j]);
  3742. if (ax > amax) {
  3743. amax = ax; max = x[j];
  3744. }
  3745. }
  3746. if (!amax) {
  3747. y[i].d = 0;
  3748. memset(y[i].qs, 0, QK_K);
  3749. x += QK_K;
  3750. continue;
  3751. }
  3752. //const float iscale = -128.f/max;
  3753. // We need this change for IQ2_XXS, else the AVX implementation becomes very awkward
  3754. const float iscale = -127.f/max;
  3755. for (int j = 0; j < QK_K; ++j) {
  3756. int v = nearest_int(iscale*x[j]);
  3757. y[i].qs[j] = MIN(127, v);
  3758. }
  3759. for (int j = 0; j < QK_K/16; ++j) {
  3760. int sum = 0;
  3761. for (int ii = 0; ii < 16; ++ii) {
  3762. sum += y[i].qs[j*16 + ii];
  3763. }
  3764. y[i].bsums[j] = sum;
  3765. }
  3766. y[i].d = 1/iscale;
  3767. x += QK_K;
  3768. }
  3769. }
  3770. void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k) {
  3771. assert(k % QK_K == 0);
  3772. const int nb = k / QK_K;
  3773. for (int i = 0; i < nb; i++) {
  3774. for (int j = 0; j < QK_K; ++j) {
  3775. *y++ = x[i].d * x[i].qs[j];
  3776. }
  3777. }
  3778. }
  3779. void quantize_row_q8_K(const float * restrict x, void * restrict y, int k) {
  3780. quantize_row_q8_K_reference(x, y, k);
  3781. }
  3782. //===================================== Dot ptoducts =================================
  3783. //
  3784. // Helper functions
  3785. //
  3786. #if __AVX__ || __AVX2__ || __AVX512F__
  3787. // shuffles to pick the required scales in dot products
  3788. static inline __m256i get_scale_shuffle_q3k(int i) {
  3789. static const uint8_t k_shuffle[128] = {
  3790. 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
  3791. 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
  3792. 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
  3793. 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,
  3794. };
  3795. return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
  3796. }
  3797. static inline __m256i get_scale_shuffle_k4(int i) {
  3798. static const uint8_t k_shuffle[256] = {
  3799. 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
  3800. 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
  3801. 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5,
  3802. 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
  3803. 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9,
  3804. 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
  3805. 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,
  3806. 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15
  3807. };
  3808. return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
  3809. }
  3810. static inline __m128i get_scale_shuffle(int i) {
  3811. static const uint8_t k_shuffle[128] = {
  3812. 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
  3813. 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
  3814. 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
  3815. 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
  3816. 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
  3817. 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11,
  3818. 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13,
  3819. 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15
  3820. };
  3821. return _mm_loadu_si128((const __m128i*)k_shuffle + i);
  3822. }
  3823. #endif
  3824. void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  3825. const int qk = QK8_0;
  3826. const int nb = n / qk;
  3827. assert(n % qk == 0);
  3828. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3829. assert((nrc == 2) || (nrc == 1));
  3830. #else
  3831. assert(nrc == 1);
  3832. #endif
  3833. UNUSED(nrc);
  3834. UNUSED(bx);
  3835. UNUSED(by);
  3836. UNUSED(bs);
  3837. const block_q4_0 * restrict x = vx;
  3838. const block_q8_0 * restrict y = vy;
  3839. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3840. if (nrc == 2) {
  3841. const block_q4_0 * restrict vx0 = vx;
  3842. const block_q4_0 * restrict vx1 = vx + bx;
  3843. const block_q8_0 * restrict vy0 = vy;
  3844. const block_q8_0 * restrict vy1 = vy + by;
  3845. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3846. for (int i = 0; i < nb; i++) {
  3847. const block_q4_0 * restrict b_x0 = &vx0[i];
  3848. const block_q4_0 * restrict b_x1 = &vx1[i];
  3849. const block_q8_0 * restrict b_y0 = &vy0[i];
  3850. const block_q8_0 * restrict b_y1 = &vy1[i];
  3851. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3852. const int8x16_t s8b = vdupq_n_s8(0x8);
  3853. const uint8x16_t v0_0 = vld1q_u8(b_x0->qs);
  3854. const uint8x16_t v0_1 = vld1q_u8(b_x1->qs);
  3855. // 4-bit -> 8-bit
  3856. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3857. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3858. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3859. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3860. // sub 8
  3861. const int8x16_t x0_l = vsubq_s8(v0_0l, s8b);
  3862. const int8x16_t x0_h = vsubq_s8(v0_0h, s8b);
  3863. const int8x16_t x1_l = vsubq_s8(v0_1l, s8b);
  3864. const int8x16_t x1_h = vsubq_s8(v0_1h, s8b);
  3865. // load y
  3866. const int8x16_t y0_l = vld1q_s8(b_y0->qs);
  3867. const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
  3868. const int8x16_t y1_l = vld1q_s8(b_y1->qs);
  3869. const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
  3870. float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d),
  3871. GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d),
  3872. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d),
  3873. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)};
  3874. int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  3875. int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  3876. int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  3877. int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  3878. int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  3879. int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  3880. int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  3881. int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  3882. sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
  3883. l1, r1)), l2, r2)), l3, r3))), scale);
  3884. }
  3885. float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
  3886. float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
  3887. vst1_f32(s, vget_low_f32(sumv2));
  3888. vst1_f32(s + bs, vget_high_f32(sumv2));
  3889. return;
  3890. }
  3891. #endif
  3892. #if defined(__ARM_NEON)
  3893. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3894. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3895. assert(nb % 2 == 0); // TODO: handle odd nb
  3896. for (int i = 0; i < nb; i += 2) {
  3897. const block_q4_0 * restrict x0 = &x[i + 0];
  3898. const block_q4_0 * restrict x1 = &x[i + 1];
  3899. const block_q8_0 * restrict y0 = &y[i + 0];
  3900. const block_q8_0 * restrict y1 = &y[i + 1];
  3901. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3902. const int8x16_t s8b = vdupq_n_s8(0x8);
  3903. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3904. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3905. // 4-bit -> 8-bit
  3906. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3907. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3908. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3909. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3910. // sub 8
  3911. const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
  3912. const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
  3913. const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
  3914. const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
  3915. // load y
  3916. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3917. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3918. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3919. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3920. // dot product into int32x4_t
  3921. const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
  3922. const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
  3923. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  3924. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  3925. }
  3926. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  3927. #elif defined(__AVX2__)
  3928. // Initialize accumulator with zeros
  3929. __m256 acc = _mm256_setzero_ps();
  3930. // Main loop
  3931. for (int i = 0; i < nb; ++i) {
  3932. /* Compute combined scale for the block */
  3933. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  3934. __m256i qx = bytes_from_nibbles_32(x[i].qs);
  3935. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  3936. const __m256i off = _mm256_set1_epi8( 8 );
  3937. qx = _mm256_sub_epi8( qx, off );
  3938. __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3939. const __m256 q = mul_sum_i8_pairs_float(qx, qy);
  3940. /* Multiply q with scale and accumulate */
  3941. acc = _mm256_fmadd_ps( d, q, acc );
  3942. }
  3943. *s = hsum_float_8(acc);
  3944. #elif defined(__AVX__)
  3945. // Initialize accumulator with zeros
  3946. __m256 acc = _mm256_setzero_ps();
  3947. // Main loop
  3948. for (int i = 0; i < nb; ++i) {
  3949. // Compute combined scale for the block
  3950. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  3951. const __m128i lowMask = _mm_set1_epi8(0xF);
  3952. const __m128i off = _mm_set1_epi8(8);
  3953. const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
  3954. __m128i bx_0 = _mm_and_si128(lowMask, tmp);
  3955. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
  3956. bx_0 = _mm_sub_epi8(bx_0, off);
  3957. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  3958. bx_0 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
  3959. by_0 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  3960. bx_0 = _mm_sub_epi8(bx_0, off);
  3961. const __m128i i32_1 = mul_sum_i8_pairs(bx_0, by_0);
  3962. // Convert int32_t to float
  3963. __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
  3964. // Apply the scale, and accumulate
  3965. acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
  3966. }
  3967. *s = hsum_float_8(acc);
  3968. #elif defined(__SSSE3__)
  3969. // set constants
  3970. const __m128i lowMask = _mm_set1_epi8(0xF);
  3971. const __m128i off = _mm_set1_epi8(8);
  3972. // Initialize accumulator with zeros
  3973. __m128 acc_0 = _mm_setzero_ps();
  3974. __m128 acc_1 = _mm_setzero_ps();
  3975. __m128 acc_2 = _mm_setzero_ps();
  3976. __m128 acc_3 = _mm_setzero_ps();
  3977. // First round without accumulation
  3978. {
  3979. _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
  3980. _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
  3981. // Compute combined scale for the block 0 and 1
  3982. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) );
  3983. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
  3984. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  3985. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
  3986. bx_0 = _mm_sub_epi8(bx_0, off);
  3987. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  3988. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  3989. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
  3990. bx_1 = _mm_sub_epi8(bx_1, off);
  3991. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  3992. _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
  3993. _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
  3994. // Compute combined scale for the block 2 and 3
  3995. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) );
  3996. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
  3997. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  3998. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
  3999. bx_2 = _mm_sub_epi8(bx_2, off);
  4000. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  4001. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  4002. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
  4003. bx_3 = _mm_sub_epi8(bx_3, off);
  4004. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  4005. // Convert int32_t to float
  4006. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  4007. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  4008. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  4009. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  4010. // Apply the scale
  4011. acc_0 = _mm_mul_ps( d_0_1, p0 );
  4012. acc_1 = _mm_mul_ps( d_0_1, p1 );
  4013. acc_2 = _mm_mul_ps( d_2_3, p2 );
  4014. acc_3 = _mm_mul_ps( d_2_3, p3 );
  4015. }
  4016. assert(nb % 2 == 0); // TODO: handle odd nb
  4017. // Main loop
  4018. for (int i = 2; i < nb; i+=2) {
  4019. _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
  4020. _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
  4021. // Compute combined scale for the block 0 and 1
  4022. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  4023. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
  4024. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  4025. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
  4026. bx_0 = _mm_sub_epi8(bx_0, off);
  4027. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  4028. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  4029. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  4030. bx_1 = _mm_sub_epi8(bx_1, off);
  4031. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  4032. _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
  4033. _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
  4034. // Compute combined scale for the block 2 and 3
  4035. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) );
  4036. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
  4037. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  4038. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
  4039. bx_2 = _mm_sub_epi8(bx_2, off);
  4040. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  4041. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  4042. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
  4043. bx_3 = _mm_sub_epi8(bx_3, off);
  4044. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  4045. // Convert int32_t to float
  4046. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  4047. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  4048. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  4049. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  4050. // Apply the scale
  4051. __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
  4052. __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
  4053. __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
  4054. __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
  4055. // Acummulate
  4056. acc_0 = _mm_add_ps(p0_d, acc_0);
  4057. acc_1 = _mm_add_ps(p1_d, acc_1);
  4058. acc_2 = _mm_add_ps(p2_d, acc_2);
  4059. acc_3 = _mm_add_ps(p3_d, acc_3);
  4060. }
  4061. *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
  4062. #elif defined(__riscv_v_intrinsic)
  4063. float sumf = 0.0;
  4064. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  4065. for (int i = 0; i < nb; i++) {
  4066. // load elements
  4067. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  4068. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  4069. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  4070. // mask and store lower part of x, and then upper part
  4071. vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  4072. vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  4073. vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  4074. vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  4075. // subtract offset
  4076. vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 8, vl);
  4077. vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 8, vl);
  4078. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  4079. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  4080. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  4081. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  4082. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  4083. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  4084. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  4085. }
  4086. *s = sumf;
  4087. #else
  4088. // scalar
  4089. float sumf = 0.0;
  4090. for (int i = 0; i < nb; i++) {
  4091. int sumi = 0;
  4092. for (int j = 0; j < qk/2; ++j) {
  4093. const int v0 = (x[i].qs[j] & 0x0F) - 8;
  4094. const int v1 = (x[i].qs[j] >> 4) - 8;
  4095. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  4096. }
  4097. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  4098. }
  4099. *s = sumf;
  4100. #endif
  4101. }
  4102. void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4103. const int qk = QK8_1;
  4104. const int nb = n / qk;
  4105. assert(n % qk == 0);
  4106. #if defined(__ARM_FEATURE_MATMUL_INT8)
  4107. assert((nrc == 2) || (nrc == 1));
  4108. #else
  4109. assert(nrc == 1);
  4110. #endif
  4111. UNUSED(nrc);
  4112. UNUSED(bx);
  4113. UNUSED(by);
  4114. UNUSED(bs);
  4115. const block_q4_1 * restrict x = vx;
  4116. const block_q8_1 * restrict y = vy;
  4117. #if defined(__ARM_FEATURE_MATMUL_INT8)
  4118. if (nrc == 2) {
  4119. const block_q4_1 * restrict vx0 = vx;
  4120. const block_q4_1 * restrict vx1 = vx + bx;
  4121. const block_q8_1 * restrict vy0 = vy;
  4122. const block_q8_1 * restrict vy1 = vy + by;
  4123. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  4124. float32x4_t summs0 = vdupq_n_f32(0.0f);
  4125. for (int i = 0; i < nb; i++) {
  4126. const block_q4_1 * restrict b_x0 = &vx0[i];
  4127. const block_q4_1 * restrict b_x1 = &vx1[i];
  4128. const block_q8_1 * restrict b_y0 = &vy0[i];
  4129. const block_q8_1 * restrict b_y1 = &vy1[i];
  4130. float32x4_t summs_t = {GGML_FP16_TO_FP32(b_x0->m) * b_y0->s,
  4131. GGML_FP16_TO_FP32(b_x1->m) * b_y0->s,
  4132. GGML_FP16_TO_FP32(b_x0->m) * b_y1->s,
  4133. GGML_FP16_TO_FP32(b_x1->m) * b_y1->s};
  4134. summs0 += summs_t;
  4135. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  4136. const uint8x16_t v0_0 = vld1q_u8(b_x0->qs);
  4137. const uint8x16_t v0_1 = vld1q_u8(b_x1->qs);
  4138. // 4-bit -> 8-bit
  4139. const int8x16_t x0_l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  4140. const int8x16_t x0_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  4141. const int8x16_t x1_l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  4142. const int8x16_t x1_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  4143. // load y
  4144. const int8x16_t y0_l = vld1q_s8(b_y0->qs);
  4145. const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
  4146. const int8x16_t y1_l = vld1q_s8(b_y1->qs);
  4147. const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
  4148. // mmla into int32x4_t
  4149. float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d),
  4150. GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d),
  4151. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d),
  4152. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)};
  4153. int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  4154. int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  4155. int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  4156. int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  4157. int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  4158. int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  4159. int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  4160. int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  4161. sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
  4162. l1, r1)), l2, r2)), l3, r3))), scale);
  4163. }
  4164. float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
  4165. float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
  4166. sumv2 = sumv2 + summs0;
  4167. vst1_f32(s, vget_low_f32(sumv2));
  4168. vst1_f32(s + bs, vget_high_f32(sumv2));
  4169. return;
  4170. }
  4171. #endif
  4172. // TODO: add WASM SIMD
  4173. #if defined(__ARM_NEON)
  4174. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  4175. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  4176. float summs = 0;
  4177. assert(nb % 2 == 0); // TODO: handle odd nb
  4178. for (int i = 0; i < nb; i += 2) {
  4179. const block_q4_1 * restrict x0 = &x[i + 0];
  4180. const block_q4_1 * restrict x1 = &x[i + 1];
  4181. const block_q8_1 * restrict y0 = &y[i + 0];
  4182. const block_q8_1 * restrict y1 = &y[i + 1];
  4183. summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s;
  4184. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  4185. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  4186. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  4187. // 4-bit -> 8-bit
  4188. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  4189. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  4190. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  4191. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  4192. // load y
  4193. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  4194. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  4195. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  4196. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  4197. // dot product into int32x4_t
  4198. const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
  4199. const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
  4200. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d);
  4201. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d);
  4202. }
  4203. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
  4204. #elif defined(__AVX2__) || defined(__AVX__)
  4205. // Initialize accumulator with zeros
  4206. __m256 acc = _mm256_setzero_ps();
  4207. float summs = 0;
  4208. // Main loop
  4209. for (int i = 0; i < nb; ++i) {
  4210. const float d0 = GGML_FP16_TO_FP32(x[i].d);
  4211. const float d1 = y[i].d;
  4212. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  4213. const __m256 d0v = _mm256_set1_ps( d0 );
  4214. const __m256 d1v = _mm256_set1_ps( d1 );
  4215. // Compute combined scales
  4216. const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
  4217. // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
  4218. const __m256i qx = bytes_from_nibbles_32(x[i].qs);
  4219. const __m256i qy = _mm256_loadu_si256( (const __m256i *)y[i].qs );
  4220. const __m256 xy = mul_sum_us8_pairs_float(qx, qy);
  4221. // Accumulate d0*d1*x*y
  4222. #if defined(__AVX2__)
  4223. acc = _mm256_fmadd_ps( d0d1, xy, acc );
  4224. #else
  4225. acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc );
  4226. #endif
  4227. }
  4228. *s = hsum_float_8(acc) + summs;
  4229. #elif defined(__riscv_v_intrinsic)
  4230. float sumf = 0.0;
  4231. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  4232. for (int i = 0; i < nb; i++) {
  4233. // load elements
  4234. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  4235. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  4236. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  4237. // mask and store lower part of x, and then upper part
  4238. vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  4239. vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  4240. vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  4241. vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  4242. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  4243. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  4244. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  4245. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  4246. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  4247. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  4248. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  4249. }
  4250. *s = sumf;
  4251. #else
  4252. // scalar
  4253. float sumf = 0.0;
  4254. for (int i = 0; i < nb; i++) {
  4255. int sumi = 0;
  4256. for (int j = 0; j < qk/2; ++j) {
  4257. const int v0 = (x[i].qs[j] & 0x0F);
  4258. const int v1 = (x[i].qs[j] >> 4);
  4259. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  4260. }
  4261. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  4262. }
  4263. *s = sumf;
  4264. #endif
  4265. }
  4266. void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4267. const int qk = QK8_0;
  4268. const int nb = n / qk;
  4269. assert(n % qk == 0);
  4270. assert(qk == QK5_0);
  4271. assert(nrc == 1);
  4272. UNUSED(nrc);
  4273. UNUSED(bx);
  4274. UNUSED(by);
  4275. UNUSED(bs);
  4276. const block_q5_0 * restrict x = vx;
  4277. const block_q8_0 * restrict y = vy;
  4278. #if defined(__ARM_NEON)
  4279. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  4280. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  4281. uint32_t qh0;
  4282. uint32_t qh1;
  4283. uint64_t tmp0[4];
  4284. uint64_t tmp1[4];
  4285. assert(nb % 2 == 0); // TODO: handle odd nb
  4286. for (int i = 0; i < nb; i += 2) {
  4287. const block_q5_0 * restrict x0 = &x[i];
  4288. const block_q5_0 * restrict x1 = &x[i + 1];
  4289. const block_q8_0 * restrict y0 = &y[i];
  4290. const block_q8_0 * restrict y1 = &y[i + 1];
  4291. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  4292. // extract the 5th bit via lookup table ((!b) << 4)
  4293. memcpy(&qh0, x0->qh, sizeof(qh0));
  4294. memcpy(&qh1, x1->qh, sizeof(qh1));
  4295. tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
  4296. tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
  4297. tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
  4298. tmp0[3] = table_b2b_1[(qh0 >> 24) ];
  4299. tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
  4300. tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
  4301. tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
  4302. tmp1[3] = table_b2b_1[(qh1 >> 24) ];
  4303. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  4304. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  4305. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  4306. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  4307. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  4308. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  4309. // 4-bit -> 8-bit
  4310. int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  4311. int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  4312. int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  4313. int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  4314. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  4315. const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
  4316. const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
  4317. const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
  4318. const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
  4319. // load y
  4320. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  4321. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  4322. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  4323. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  4324. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  4325. ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  4326. ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  4327. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  4328. ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  4329. ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  4330. }
  4331. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  4332. #elif defined(__wasm_simd128__)
  4333. v128_t sumv = wasm_f32x4_splat(0.0f);
  4334. uint32_t qh;
  4335. uint64_t tmp[4];
  4336. // TODO: check if unrolling this is better
  4337. for (int i = 0; i < nb; ++i) {
  4338. const block_q5_0 * restrict x0 = &x[i];
  4339. const block_q8_0 * restrict y0 = &y[i];
  4340. const v128_t m4b = wasm_i8x16_splat(0x0F);
  4341. // extract the 5th bit
  4342. memcpy(&qh, x0->qh, sizeof(qh));
  4343. tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
  4344. tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
  4345. tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
  4346. tmp[3] = table_b2b_1[(qh >> 24) ];
  4347. const v128_t qhl = wasm_v128_load(tmp + 0);
  4348. const v128_t qhh = wasm_v128_load(tmp + 2);
  4349. const v128_t v0 = wasm_v128_load(x0->qs);
  4350. // 4-bit -> 8-bit
  4351. const v128_t v0l = wasm_v128_and (v0, m4b);
  4352. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  4353. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  4354. const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
  4355. const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
  4356. // load y
  4357. const v128_t v1l = wasm_v128_load(y0->qs);
  4358. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  4359. // int8x16 -> int16x8
  4360. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  4361. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  4362. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  4363. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  4364. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  4365. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  4366. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  4367. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  4368. // dot product
  4369. sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
  4370. wasm_i32x4_add(
  4371. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  4372. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  4373. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  4374. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  4375. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
  4376. }
  4377. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  4378. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
  4379. #elif defined(__AVX2__)
  4380. // Initialize accumulator with zeros
  4381. __m256 acc = _mm256_setzero_ps();
  4382. // Main loop
  4383. for (int i = 0; i < nb; i++) {
  4384. /* Compute combined scale for the block */
  4385. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  4386. __m256i qx = bytes_from_nibbles_32(x[i].qs);
  4387. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  4388. bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
  4389. qx = _mm256_or_si256(qx, bxhi);
  4390. __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
  4391. const __m256 q = mul_sum_i8_pairs_float(qx, qy);
  4392. /* Multiply q with scale and accumulate */
  4393. acc = _mm256_fmadd_ps(d, q, acc);
  4394. }
  4395. *s = hsum_float_8(acc);
  4396. #elif defined(__AVX__)
  4397. // Initialize accumulator with zeros
  4398. __m256 acc = _mm256_setzero_ps();
  4399. __m128i mask = _mm_set1_epi8((char)0xF0);
  4400. // Main loop
  4401. for (int i = 0; i < nb; i++) {
  4402. /* Compute combined scale for the block */
  4403. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  4404. __m256i bx_0 = bytes_from_nibbles_32(x[i].qs);
  4405. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  4406. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  4407. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  4408. bxhil = _mm_andnot_si128(bxhil, mask);
  4409. bxhih = _mm_andnot_si128(bxhih, mask);
  4410. __m128i bxl = _mm256_castsi256_si128(bx_0);
  4411. __m128i bxh = _mm256_extractf128_si256(bx_0, 1);
  4412. bxl = _mm_or_si128(bxl, bxhil);
  4413. bxh = _mm_or_si128(bxh, bxhih);
  4414. bx_0 = MM256_SET_M128I(bxh, bxl);
  4415. const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[i].qs);
  4416. const __m256 q = mul_sum_i8_pairs_float(bx_0, by_0);
  4417. /* Multiply q with scale and accumulate */
  4418. acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
  4419. }
  4420. *s = hsum_float_8(acc);
  4421. #elif defined(__riscv_v_intrinsic)
  4422. float sumf = 0.0;
  4423. uint32_t qh;
  4424. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  4425. // These temporary registers are for masking and shift operations
  4426. vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
  4427. vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl);
  4428. vuint32m2_t vt_3 = __riscv_vsll_vx_u32m2(vt_2, 16, vl);
  4429. vuint32m2_t vt_4 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
  4430. for (int i = 0; i < nb; i++) {
  4431. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  4432. // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  4433. vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(vt_2, qh, vl);
  4434. vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(xha_0, vt_1, vl);
  4435. vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
  4436. // ((qh & (1u << (j + 16))) >> (j + 12));
  4437. vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(vt_3, qh, vl);
  4438. vuint32m2_t xhl_1 = __riscv_vsrl_vv_u32m2(xha_1, vt_4, vl);
  4439. // narrowing
  4440. vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xhl_0, vl);
  4441. vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
  4442. vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xhl_1, vl);
  4443. vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
  4444. // load
  4445. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  4446. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  4447. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  4448. vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  4449. vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  4450. vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
  4451. vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
  4452. vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  4453. vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  4454. vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 16, vl);
  4455. vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 16, vl);
  4456. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  4457. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  4458. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  4459. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  4460. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  4461. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  4462. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  4463. }
  4464. *s = sumf;
  4465. #else
  4466. // scalar
  4467. float sumf = 0.0;
  4468. for (int i = 0; i < nb; i++) {
  4469. uint32_t qh;
  4470. memcpy(&qh, x[i].qh, sizeof(qh));
  4471. int sumi = 0;
  4472. for (int j = 0; j < qk/2; ++j) {
  4473. const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  4474. const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
  4475. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  4476. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  4477. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  4478. }
  4479. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  4480. }
  4481. *s = sumf;
  4482. #endif
  4483. }
  4484. void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4485. const int qk = QK8_1;
  4486. const int nb = n / qk;
  4487. assert(n % qk == 0);
  4488. assert(qk == QK5_1);
  4489. assert(nrc == 1);
  4490. UNUSED(nrc);
  4491. UNUSED(bx);
  4492. UNUSED(by);
  4493. UNUSED(bs);
  4494. const block_q5_1 * restrict x = vx;
  4495. const block_q8_1 * restrict y = vy;
  4496. #if defined(__ARM_NEON)
  4497. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  4498. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  4499. float summs0 = 0.0f;
  4500. float summs1 = 0.0f;
  4501. uint32_t qh0;
  4502. uint32_t qh1;
  4503. uint64_t tmp0[4];
  4504. uint64_t tmp1[4];
  4505. assert(nb % 2 == 0); // TODO: handle odd nb
  4506. for (int i = 0; i < nb; i += 2) {
  4507. const block_q5_1 * restrict x0 = &x[i];
  4508. const block_q5_1 * restrict x1 = &x[i + 1];
  4509. const block_q8_1 * restrict y0 = &y[i];
  4510. const block_q8_1 * restrict y1 = &y[i + 1];
  4511. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  4512. summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s;
  4513. summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s;
  4514. // extract the 5th bit via lookup table ((b) << 4)
  4515. memcpy(&qh0, x0->qh, sizeof(qh0));
  4516. memcpy(&qh1, x1->qh, sizeof(qh1));
  4517. tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
  4518. tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
  4519. tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
  4520. tmp0[3] = table_b2b_0[(qh0 >> 24) ];
  4521. tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
  4522. tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
  4523. tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
  4524. tmp1[3] = table_b2b_0[(qh1 >> 24) ];
  4525. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  4526. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  4527. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  4528. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  4529. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  4530. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  4531. // 4-bit -> 8-bit
  4532. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  4533. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  4534. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  4535. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  4536. // add high bit
  4537. const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
  4538. const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
  4539. const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
  4540. const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
  4541. // load y
  4542. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  4543. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  4544. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  4545. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  4546. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  4547. ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  4548. ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d);
  4549. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  4550. ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  4551. ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d);
  4552. }
  4553. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
  4554. #elif defined(__wasm_simd128__)
  4555. v128_t sumv = wasm_f32x4_splat(0.0f);
  4556. float summs = 0.0f;
  4557. uint32_t qh;
  4558. uint64_t tmp[4];
  4559. // TODO: check if unrolling this is better
  4560. for (int i = 0; i < nb; ++i) {
  4561. const block_q5_1 * restrict x0 = &x[i];
  4562. const block_q8_1 * restrict y0 = &y[i];
  4563. summs += GGML_FP16_TO_FP32(x0->m) * y0->s;
  4564. const v128_t m4b = wasm_i8x16_splat(0x0F);
  4565. // extract the 5th bit
  4566. memcpy(&qh, x0->qh, sizeof(qh));
  4567. tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
  4568. tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
  4569. tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
  4570. tmp[3] = table_b2b_0[(qh >> 24) ];
  4571. const v128_t qhl = wasm_v128_load(tmp + 0);
  4572. const v128_t qhh = wasm_v128_load(tmp + 2);
  4573. const v128_t v0 = wasm_v128_load(x0->qs);
  4574. // 4-bit -> 8-bit
  4575. const v128_t v0l = wasm_v128_and (v0, m4b);
  4576. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  4577. // add high bit
  4578. const v128_t v0lf = wasm_v128_or(v0l, qhl);
  4579. const v128_t v0hf = wasm_v128_or(v0h, qhh);
  4580. // load y
  4581. const v128_t v1l = wasm_v128_load(y0->qs);
  4582. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  4583. // int8x16 -> int16x8
  4584. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  4585. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  4586. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  4587. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  4588. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  4589. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  4590. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  4591. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  4592. // dot product
  4593. sumv = wasm_f32x4_add(sumv,
  4594. wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add(
  4595. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  4596. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  4597. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  4598. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  4599. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d)));
  4600. }
  4601. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  4602. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
  4603. #elif defined(__AVX2__)
  4604. // Initialize accumulator with zeros
  4605. __m256 acc = _mm256_setzero_ps();
  4606. float summs = 0.0f;
  4607. // Main loop
  4608. for (int i = 0; i < nb; i++) {
  4609. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  4610. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  4611. __m256i qx = bytes_from_nibbles_32(x[i].qs);
  4612. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  4613. bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
  4614. qx = _mm256_or_si256(qx, bxhi);
  4615. const __m256 dy = _mm256_set1_ps(y[i].d);
  4616. const __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
  4617. const __m256 q = mul_sum_us8_pairs_float(qx, qy);
  4618. acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
  4619. }
  4620. *s = hsum_float_8(acc) + summs;
  4621. #elif defined(__AVX__)
  4622. // Initialize accumulator with zeros
  4623. __m256 acc = _mm256_setzero_ps();
  4624. __m128i mask = _mm_set1_epi8(0x10);
  4625. float summs = 0.0f;
  4626. // Main loop
  4627. for (int i = 0; i < nb; i++) {
  4628. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  4629. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  4630. __m256i bx_0 = bytes_from_nibbles_32(x[i].qs);
  4631. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  4632. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  4633. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  4634. bxhil = _mm_and_si128(bxhil, mask);
  4635. bxhih = _mm_and_si128(bxhih, mask);
  4636. __m128i bxl = _mm256_castsi256_si128(bx_0);
  4637. __m128i bxh = _mm256_extractf128_si256(bx_0, 1);
  4638. bxl = _mm_or_si128(bxl, bxhil);
  4639. bxh = _mm_or_si128(bxh, bxhih);
  4640. bx_0 = MM256_SET_M128I(bxh, bxl);
  4641. const __m256 dy = _mm256_set1_ps(y[i].d);
  4642. const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[i].qs);
  4643. const __m256 q = mul_sum_us8_pairs_float(bx_0, by_0);
  4644. acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
  4645. }
  4646. *s = hsum_float_8(acc) + summs;
  4647. #elif defined(__riscv_v_intrinsic)
  4648. float sumf = 0.0;
  4649. uint32_t qh;
  4650. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  4651. // temporary registers for shift operations
  4652. vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
  4653. vuint32m2_t vt_2 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
  4654. for (int i = 0; i < nb; i++) {
  4655. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  4656. // load qh
  4657. vuint32m2_t vqh = __riscv_vmv_v_x_u32m2(qh, vl);
  4658. // ((qh >> (j + 0)) << 4) & 0x10;
  4659. vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(vqh, vt_1, vl);
  4660. vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
  4661. vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(xhl_0, 0x10, vl);
  4662. // ((qh >> (j + 12)) ) & 0x10;
  4663. vuint32m2_t xhr_1 = __riscv_vsrl_vv_u32m2(vqh, vt_2, vl);
  4664. vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(xhr_1, 0x10, vl);
  4665. // narrowing
  4666. vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xha_0, vl);
  4667. vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
  4668. vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xha_1, vl);
  4669. vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
  4670. // load
  4671. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  4672. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  4673. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  4674. vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  4675. vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  4676. vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
  4677. vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
  4678. vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  4679. vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  4680. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  4681. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  4682. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  4683. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  4684. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  4685. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  4686. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  4687. }
  4688. *s = sumf;
  4689. #else
  4690. // scalar
  4691. float sumf = 0.0;
  4692. for (int i = 0; i < nb; i++) {
  4693. uint32_t qh;
  4694. memcpy(&qh, x[i].qh, sizeof(qh));
  4695. int sumi = 0;
  4696. for (int j = 0; j < qk/2; ++j) {
  4697. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  4698. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  4699. const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
  4700. const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
  4701. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  4702. }
  4703. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  4704. }
  4705. *s = sumf;
  4706. #endif
  4707. }
  4708. void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4709. const int qk = QK8_0;
  4710. const int nb = n / qk;
  4711. assert(n % qk == 0);
  4712. #if defined(__ARM_FEATURE_MATMUL_INT8)
  4713. assert((nrc == 2) || (nrc == 1));
  4714. #else
  4715. assert(nrc == 1);
  4716. #endif
  4717. UNUSED(nrc);
  4718. UNUSED(bx);
  4719. UNUSED(by);
  4720. UNUSED(bs);
  4721. const block_q8_0 * restrict x = vx;
  4722. const block_q8_0 * restrict y = vy;
  4723. #if defined(__ARM_FEATURE_MATMUL_INT8)
  4724. if (nrc == 2) {
  4725. const block_q8_0 * restrict vx0 = vx;
  4726. const block_q8_0 * restrict vx1 = vx + bx;
  4727. const block_q8_0 * restrict vy0 = vy;
  4728. const block_q8_0 * restrict vy1 = vy + by;
  4729. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  4730. for (int i = 0; i < nb; i++) {
  4731. const block_q8_0 * restrict b_x0 = &vx0[i];
  4732. const block_q8_0 * restrict b_y0 = &vy0[i];
  4733. const block_q8_0 * restrict b_x1 = &vx1[i];
  4734. const block_q8_0 * restrict b_y1 = &vy1[i];
  4735. const int8x16_t x0_l = vld1q_s8(b_x0->qs);
  4736. const int8x16_t x0_h = vld1q_s8(b_x0->qs + 16);
  4737. const int8x16_t x1_l = vld1q_s8(b_x1->qs);
  4738. const int8x16_t x1_h = vld1q_s8(b_x1->qs + 16);
  4739. // load y
  4740. const int8x16_t y0_l = vld1q_s8(b_y0->qs);
  4741. const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
  4742. const int8x16_t y1_l = vld1q_s8(b_y1->qs);
  4743. const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
  4744. float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d),
  4745. GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d),
  4746. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d),
  4747. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)};
  4748. int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  4749. int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  4750. int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  4751. int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  4752. int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  4753. int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  4754. int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  4755. int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  4756. sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
  4757. l1, r1)), l2, r2)), l3, r3))), scale);
  4758. }
  4759. float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
  4760. float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
  4761. vst1_f32(s, vget_low_f32(sumv2));
  4762. vst1_f32(s + bs, vget_high_f32(sumv2));
  4763. return;
  4764. }
  4765. #endif
  4766. #if defined(__ARM_NEON)
  4767. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  4768. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  4769. assert(nb % 2 == 0); // TODO: handle odd nb
  4770. for (int i = 0; i < nb; i += 2) {
  4771. const block_q8_0 * restrict x0 = &x[i + 0];
  4772. const block_q8_0 * restrict x1 = &x[i + 1];
  4773. const block_q8_0 * restrict y0 = &y[i + 0];
  4774. const block_q8_0 * restrict y1 = &y[i + 1];
  4775. const int8x16_t x0_0 = vld1q_s8(x0->qs);
  4776. const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
  4777. const int8x16_t x1_0 = vld1q_s8(x1->qs);
  4778. const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
  4779. // load y
  4780. const int8x16_t y0_0 = vld1q_s8(y0->qs);
  4781. const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
  4782. const int8x16_t y1_0 = vld1q_s8(y1->qs);
  4783. const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
  4784. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  4785. ggml_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
  4786. ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  4787. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  4788. ggml_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
  4789. ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  4790. }
  4791. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  4792. #elif defined(__AVX2__) || defined(__AVX__)
  4793. // Initialize accumulator with zeros
  4794. __m256 acc = _mm256_setzero_ps();
  4795. // Main loop
  4796. for (int i = 0; i < nb; ++i) {
  4797. // Compute combined scale for the block
  4798. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  4799. __m256i qx = _mm256_loadu_si256((const __m256i *)x[i].qs);
  4800. __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
  4801. const __m256 q = mul_sum_i8_pairs_float(qx, qy);
  4802. // Multiply q with scale and accumulate
  4803. #if defined(__AVX2__)
  4804. acc = _mm256_fmadd_ps( d, q, acc );
  4805. #else
  4806. acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc );
  4807. #endif
  4808. }
  4809. *s = hsum_float_8(acc);
  4810. #elif defined(__riscv_v_intrinsic)
  4811. float sumf = 0.0;
  4812. size_t vl = __riscv_vsetvl_e8m1(qk);
  4813. for (int i = 0; i < nb; i++) {
  4814. // load elements
  4815. vint8m1_t bx_0 = __riscv_vle8_v_i8m1(x[i].qs, vl);
  4816. vint8m1_t by_0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
  4817. vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx_0, by_0, vl);
  4818. vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl);
  4819. vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl);
  4820. int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum);
  4821. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  4822. }
  4823. *s = sumf;
  4824. #else
  4825. // scalar
  4826. float sumf = 0.0;
  4827. for (int i = 0; i < nb; i++) {
  4828. int sumi = 0;
  4829. for (int j = 0; j < qk; j++) {
  4830. sumi += x[i].qs[j]*y[i].qs[j];
  4831. }
  4832. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  4833. }
  4834. *s = sumf;
  4835. #endif
  4836. }
  4837. #if QK_K == 256
  4838. void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4839. assert(nrc == 1);
  4840. UNUSED(nrc);
  4841. UNUSED(bx);
  4842. UNUSED(by);
  4843. UNUSED(bs);
  4844. const block_q2_K * restrict x = vx;
  4845. const block_q8_K * restrict y = vy;
  4846. const int nb = n / QK_K;
  4847. #ifdef __ARM_NEON
  4848. const uint8x16_t m3 = vdupq_n_u8(0x3);
  4849. const uint8x16_t m4 = vdupq_n_u8(0xF);
  4850. const int32x4_t vzero = vdupq_n_s32(0);
  4851. ggml_int8x16x2_t q2bytes;
  4852. uint8_t aux[16];
  4853. float sum = 0;
  4854. for (int i = 0; i < nb; ++i) {
  4855. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4856. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4857. const uint8_t * restrict q2 = x[i].qs;
  4858. const int8_t * restrict q8 = y[i].qs;
  4859. const uint8_t * restrict sc = x[i].scales;
  4860. const uint8x16_t mins_and_scales = vld1q_u8(sc);
  4861. const uint8x16_t scales = vandq_u8(mins_and_scales, m4);
  4862. vst1q_u8(aux, scales);
  4863. const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4);
  4864. const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
  4865. const ggml_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}};
  4866. const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])),
  4867. vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0])));
  4868. const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])),
  4869. vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1])));
  4870. sum += dmin * vaddvq_s32(vaddq_s32(s0, s1));
  4871. int isum = 0;
  4872. int is = 0;
  4873. // We use this macro instead of a function call because for some reason
  4874. // the code runs 2-3% slower, even if the function is declared inline
  4875. #define MULTIPLY_ACCUM_WITH_SCALE(index)\
  4876. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\
  4877. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)];
  4878. #define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\
  4879. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\
  4880. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\
  4881. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\
  4882. MULTIPLY_ACCUM_WITH_SCALE((index));
  4883. for (int j = 0; j < QK_K/128; ++j) {
  4884. const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32;
  4885. ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  4886. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3));
  4887. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3));
  4888. MULTIPLY_ACCUM_WITH_SCALE(0);
  4889. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2);
  4890. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4);
  4891. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6);
  4892. is += 8;
  4893. }
  4894. sum += d * isum;
  4895. }
  4896. *s = sum;
  4897. #elif defined __AVX2__
  4898. const __m256i m3 = _mm256_set1_epi8(3);
  4899. const __m128i m4 = _mm_set1_epi8(0xF);
  4900. __m256 acc = _mm256_setzero_ps();
  4901. for (int i = 0; i < nb; ++i) {
  4902. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4903. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4904. const uint8_t * restrict q2 = x[i].qs;
  4905. const int8_t * restrict q8 = y[i].qs;
  4906. const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  4907. const __m128i scales8 = _mm_and_si128(mins_and_scales, m4);
  4908. const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
  4909. const __m256i mins = _mm256_cvtepi8_epi16(mins8);
  4910. const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums));
  4911. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc);
  4912. const __m256i all_scales = _mm256_cvtepi8_epi16(scales8);
  4913. const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
  4914. const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
  4915. const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
  4916. __m256i sumi = _mm256_setzero_si256();
  4917. for (int j = 0; j < QK_K/128; ++j) {
  4918. const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32;
  4919. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4920. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4921. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4922. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4923. const __m256i q2_0 = _mm256_and_si256(q2bits, m3);
  4924. const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3);
  4925. const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3);
  4926. const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3);
  4927. __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
  4928. __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
  4929. __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2);
  4930. __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3);
  4931. p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0);
  4932. p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1);
  4933. p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2);
  4934. p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3);
  4935. p0 = _mm256_add_epi32(p0, p1);
  4936. p2 = _mm256_add_epi32(p2, p3);
  4937. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2));
  4938. }
  4939. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  4940. }
  4941. *s = hsum_float_8(acc);
  4942. #elif defined __AVX__
  4943. const __m128i m3 = _mm_set1_epi8(0x3);
  4944. const __m128i m4 = _mm_set1_epi8(0xF);
  4945. const __m128i m2 = _mm_set1_epi8(0x2);
  4946. __m256 acc = _mm256_setzero_ps();
  4947. for (int i = 0; i < nb; ++i) {
  4948. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4949. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4950. const uint8_t * restrict q2 = x[i].qs;
  4951. const int8_t * restrict q8 = y[i].qs;
  4952. // load mins and scales from block_q2_K.scales[QK_K/16]
  4953. const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  4954. const __m128i scales16 = _mm_and_si128(mins_and_scales, m4);
  4955. const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
  4956. const __m128i mins_0 = _mm_cvtepi8_epi16(mins16);
  4957. const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16));
  4958. // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2
  4959. const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0]));
  4960. const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8]));
  4961. // sumf += -dmin * summs in 32bits*8
  4962. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc);
  4963. const __m128i scales_0 = _mm_cvtepi8_epi16(scales16);
  4964. const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16));
  4965. const __m128i scales[2] = { scales_0, scales_1 };
  4966. __m128i sumi_0 = _mm_setzero_si128();
  4967. __m128i sumi_1 = _mm_setzero_si128();
  4968. for (int j = 0; j < QK_K/128; ++j) {
  4969. // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K]
  4970. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4971. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4972. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4973. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4974. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4975. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4976. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4977. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4978. // load 2bits*16*8 from block_q2_K.qs[QK_K/4]
  4979. __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
  4980. const __m128i q2_0 = _mm_and_si128(q2bits, m3);
  4981. const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  4982. const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  4983. const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  4984. q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
  4985. const __m128i q2_1 = _mm_and_si128(q2bits, m3);
  4986. const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  4987. const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  4988. const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  4989. // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8
  4990. __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0);
  4991. __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1);
  4992. __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2);
  4993. __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3);
  4994. __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4);
  4995. __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5);
  4996. __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6);
  4997. __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7);
  4998. // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8
  4999. __m128i shuffle = _mm_set1_epi16(0x0100);
  5000. p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0);
  5001. shuffle = _mm_add_epi16(shuffle, m2);
  5002. p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1);
  5003. shuffle = _mm_add_epi16(shuffle, m2);
  5004. p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2);
  5005. shuffle = _mm_add_epi16(shuffle, m2);
  5006. p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3);
  5007. shuffle = _mm_add_epi16(shuffle, m2);
  5008. p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4);
  5009. shuffle = _mm_add_epi16(shuffle, m2);
  5010. p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5);
  5011. shuffle = _mm_add_epi16(shuffle, m2);
  5012. p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6);
  5013. shuffle = _mm_add_epi16(shuffle, m2);
  5014. p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7);
  5015. p0 = _mm_add_epi32(p0, p1);
  5016. p2 = _mm_add_epi32(p2, p3);
  5017. p4 = _mm_add_epi32(p4, p5);
  5018. p6 = _mm_add_epi32(p6, p7);
  5019. // isum in 32bits*4*2
  5020. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2));
  5021. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6));
  5022. }
  5023. // sumf += dall * isum - dmin * summs in 32bits
  5024. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  5025. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc);
  5026. }
  5027. *s = hsum_float_8(acc);
  5028. #elif defined __riscv_v_intrinsic
  5029. float sumf = 0;
  5030. uint8_t temp_01[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  5031. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
  5032. for (int i = 0; i < nb; ++i) {
  5033. const uint8_t * q2 = x[i].qs;
  5034. const int8_t * q8 = y[i].qs;
  5035. const uint8_t * sc = x[i].scales;
  5036. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5037. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5038. size_t vl = 16;
  5039. vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl);
  5040. vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl);
  5041. vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl);
  5042. vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl);
  5043. vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl);
  5044. vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl));
  5045. vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl);
  5046. vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  5047. sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums);
  5048. vl = 32;
  5049. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  5050. vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl);
  5051. uint8_t is=0;
  5052. int isum=0;
  5053. for (int j = 0; j < QK_K/128; ++j) {
  5054. // load Q2
  5055. vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl);
  5056. vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl);
  5057. vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03 , vl);
  5058. vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03 , vl);
  5059. vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03 , vl);
  5060. // duplicate scale elements for product
  5061. vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0+is, vl), vl);
  5062. vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2+is, vl), vl);
  5063. vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4+is, vl), vl);
  5064. vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6+is, vl), vl);
  5065. vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl));
  5066. vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl));
  5067. vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl));
  5068. vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl));
  5069. // load Q8
  5070. vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
  5071. vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
  5072. vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8+64, vl);
  5073. vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8+96, vl);
  5074. vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl);
  5075. vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl);
  5076. vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl);
  5077. vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl);
  5078. vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl);
  5079. vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl);
  5080. isum += __riscv_vmv_x_s_i32m1_i32(isum1);
  5081. q2+=32; q8+=128; is=8;
  5082. }
  5083. sumf += dall * isum;
  5084. }
  5085. *s = sumf;
  5086. #else
  5087. float sumf = 0;
  5088. for (int i = 0; i < nb; ++i) {
  5089. const uint8_t * q2 = x[i].qs;
  5090. const int8_t * q8 = y[i].qs;
  5091. const uint8_t * sc = x[i].scales;
  5092. int summs = 0;
  5093. for (int j = 0; j < 16; ++j) {
  5094. summs += y[i].bsums[j] * (sc[j] >> 4);
  5095. }
  5096. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5097. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5098. int isum = 0;
  5099. int is = 0;
  5100. int d;
  5101. for (int k = 0; k < QK_K/128; ++k) {
  5102. int shift = 0;
  5103. for (int j = 0; j < 4; ++j) {
  5104. d = sc[is++] & 0xF;
  5105. int isuml = 0;
  5106. for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
  5107. isum += d * isuml;
  5108. d = sc[is++] & 0xF;
  5109. isuml = 0;
  5110. for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
  5111. isum += d * isuml;
  5112. shift += 2;
  5113. q8 += 32;
  5114. }
  5115. q2 += 32;
  5116. }
  5117. sumf += dall * isum - dmin * summs;
  5118. }
  5119. *s = sumf;
  5120. #endif
  5121. }
  5122. #else
  5123. void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  5124. assert(nrc == 1);
  5125. UNUSED(nrc);
  5126. UNUSED(bx);
  5127. UNUSED(by);
  5128. UNUSED(bs);
  5129. const block_q2_K * restrict x = vx;
  5130. const block_q8_K * restrict y = vy;
  5131. const int nb = n / QK_K;
  5132. #ifdef __ARM_NEON
  5133. const uint8x16_t m3 = vdupq_n_u8(0x3);
  5134. const int32x4_t vzero = vdupq_n_s32(0);
  5135. ggml_int8x16x4_t q2bytes;
  5136. uint32_t aux32[2];
  5137. const uint8_t * scales = (const uint8_t *)aux32;
  5138. float sum = 0;
  5139. for (int i = 0; i < nb; ++i) {
  5140. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5141. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5142. const uint8_t * restrict q2 = x[i].qs;
  5143. const int8_t * restrict q8 = y[i].qs;
  5144. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  5145. aux32[0] = sc[0] & 0x0f0f0f0f;
  5146. aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
  5147. sum += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
  5148. int isum1 = 0, isum2 = 0;
  5149. const uint8x16_t q2bits = vld1q_u8(q2);
  5150. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  5151. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits, m3));
  5152. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 2), m3));
  5153. q2bytes.val[2] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 4), m3));
  5154. q2bytes.val[3] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 6), m3));
  5155. isum1 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * scales[0];
  5156. isum2 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * scales[1];
  5157. isum1 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[2], q8bytes.val[2])) * scales[2];
  5158. isum2 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[3], q8bytes.val[3])) * scales[3];
  5159. sum += d * (isum1 + isum2);
  5160. }
  5161. *s = sum;
  5162. #elif defined __AVX2__
  5163. const __m256i m3 = _mm256_set1_epi8(3);
  5164. __m256 acc = _mm256_setzero_ps();
  5165. uint32_t ud, um;
  5166. const uint8_t * restrict db = (const uint8_t *)&ud;
  5167. const uint8_t * restrict mb = (const uint8_t *)&um;
  5168. float summs = 0;
  5169. // TODO: optimize this
  5170. for (int i = 0; i < nb; ++i) {
  5171. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5172. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5173. const uint8_t * restrict q2 = x[i].qs;
  5174. const int8_t * restrict q8 = y[i].qs;
  5175. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  5176. ud = (sc[0] >> 0) & 0x0f0f0f0f;
  5177. um = (sc[0] >> 4) & 0x0f0f0f0f;
  5178. int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
  5179. summs += dmin * smin;
  5180. const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
  5181. const __m256i q2_0 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 2), q2bits), m3);
  5182. const __m256i q2_1 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 6), _mm_srli_epi16(q2bits, 4)), m3);
  5183. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5184. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5185. const __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
  5186. const __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
  5187. const __m256i p_0 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 0));
  5188. const __m256i p_1 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 1));
  5189. const __m256i p_2 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 0));
  5190. const __m256i p_3 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 1));
  5191. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0), acc);
  5192. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1), acc);
  5193. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2), acc);
  5194. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3), acc);
  5195. }
  5196. *s = hsum_float_8(acc) + summs;
  5197. #elif defined __AVX__
  5198. const __m128i m3 = _mm_set1_epi8(3);
  5199. __m256 acc = _mm256_setzero_ps();
  5200. uint32_t ud, um;
  5201. const uint8_t * restrict db = (const uint8_t *)&ud;
  5202. const uint8_t * restrict mb = (const uint8_t *)&um;
  5203. float summs = 0;
  5204. // TODO: optimize this
  5205. for (int i = 0; i < nb; ++i) {
  5206. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5207. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5208. const uint8_t * restrict q2 = x[i].qs;
  5209. const int8_t * restrict q8 = y[i].qs;
  5210. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  5211. ud = (sc[0] >> 0) & 0x0f0f0f0f;
  5212. um = (sc[0] >> 4) & 0x0f0f0f0f;
  5213. int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
  5214. summs += dmin * smin;
  5215. const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
  5216. const __m128i q2_0 = _mm_and_si128(q2bits, m3);
  5217. const __m128i q2_1 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  5218. const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  5219. const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  5220. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5221. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5222. const __m128i p0 = _mm_maddubs_epi16(q2_0, _mm256_extractf128_si256(q8_0, 0));
  5223. const __m128i p1 = _mm_maddubs_epi16(q2_1, _mm256_extractf128_si256(q8_0, 1));
  5224. const __m128i p2 = _mm_maddubs_epi16(q2_2, _mm256_extractf128_si256(q8_1, 0));
  5225. const __m128i p3 = _mm_maddubs_epi16(q2_3, _mm256_extractf128_si256(q8_1, 1));
  5226. const __m256i p_0 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p0, p0)), _mm_cvtepi16_epi32(p0));
  5227. const __m256i p_1 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p1, p1)), _mm_cvtepi16_epi32(p1));
  5228. const __m256i p_2 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p2, p2)), _mm_cvtepi16_epi32(p2));
  5229. const __m256i p_3 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p3, p3)), _mm_cvtepi16_epi32(p3));
  5230. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0)), acc);
  5231. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1)), acc);
  5232. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2)), acc);
  5233. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3)), acc);
  5234. }
  5235. *s = hsum_float_8(acc) + summs;
  5236. #elif defined __riscv_v_intrinsic
  5237. uint32_t aux32[2];
  5238. const uint8_t * scales = (const uint8_t *)aux32;
  5239. float sumf = 0;
  5240. for (int i = 0; i < nb; ++i) {
  5241. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5242. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5243. const uint8_t * restrict q2 = x[i].qs;
  5244. const int8_t * restrict q8 = y[i].qs;
  5245. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  5246. aux32[0] = sc[0] & 0x0f0f0f0f;
  5247. aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
  5248. sumf += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
  5249. int isum1 = 0;
  5250. int isum2 = 0;
  5251. size_t vl = 16;
  5252. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  5253. // load Q2
  5254. vuint8mf2_t q2_x = __riscv_vle8_v_u8mf2(q2, vl);
  5255. vint8mf2_t q2_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q2_x, 0x03, vl));
  5256. vint8mf2_t q2_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x2, vl), 0x03 , vl));
  5257. vint8mf2_t q2_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x4, vl), 0x03 , vl));
  5258. vint8mf2_t q2_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x6, vl), 0x03 , vl));
  5259. // load Q8, and take product with Q2
  5260. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q2_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  5261. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q2_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  5262. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q2_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  5263. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q2_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  5264. vint16m1_t vs_0 = __riscv_vredsum_vs_i16m1_i16m1(p0, vzero, vl);
  5265. vint16m1_t vs_1 = __riscv_vredsum_vs_i16m1_i16m1(p1, vzero, vl);
  5266. vint16m1_t vs_2 = __riscv_vredsum_vs_i16m1_i16m1(p2, vzero, vl);
  5267. vint16m1_t vs_3 = __riscv_vredsum_vs_i16m1_i16m1(p3, vzero, vl);
  5268. isum1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[0];
  5269. isum2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[1];
  5270. isum1 += __riscv_vmv_x_s_i16m1_i16(vs_2) * scales[2];
  5271. isum2 += __riscv_vmv_x_s_i16m1_i16(vs_3) * scales[3];
  5272. sumf += d * (isum1 + isum2);
  5273. }
  5274. *s = sumf;
  5275. #else
  5276. float sumf = 0;
  5277. int isum[QK_K/16];
  5278. for (int i = 0; i < nb; ++i) {
  5279. const uint8_t * q2 = x[i].qs;
  5280. const int8_t * q8 = y[i].qs;
  5281. const uint8_t * sc = x[i].scales;
  5282. int summs = 0;
  5283. for (int j = 0; j < QK_K/16; ++j) {
  5284. summs += y[i].bsums[j] * (sc[j] >> 4);
  5285. }
  5286. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5287. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5288. memset(isum, 0, (QK_K/16)*sizeof(int));
  5289. for (int l = 0; l < 16; ++l) {
  5290. isum[0] += q8[l+ 0] * ((q2[l] >> 0) & 3);
  5291. isum[1] += q8[l+16] * ((q2[l] >> 2) & 3);
  5292. isum[2] += q8[l+32] * ((q2[l] >> 4) & 3);
  5293. isum[3] += q8[l+48] * ((q2[l] >> 6) & 3);
  5294. }
  5295. for (int l = 0; l < QK_K/16; ++l) {
  5296. isum[l] *= (sc[l] & 0xF);
  5297. }
  5298. sumf += dall * (isum[0] + isum[1] + isum[2] + isum[3]) - dmin * summs;
  5299. }
  5300. *s = sumf;
  5301. #endif
  5302. }
  5303. #endif
  5304. #if QK_K == 256
  5305. void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  5306. assert(n % QK_K == 0);
  5307. assert(nrc == 1);
  5308. UNUSED(nrc);
  5309. UNUSED(bx);
  5310. UNUSED(by);
  5311. UNUSED(bs);
  5312. const uint32_t kmask1 = 0x03030303;
  5313. const uint32_t kmask2 = 0x0f0f0f0f;
  5314. const block_q3_K * restrict x = vx;
  5315. const block_q8_K * restrict y = vy;
  5316. const int nb = n / QK_K;
  5317. #ifdef __ARM_NEON
  5318. uint32_t aux[3];
  5319. uint32_t utmp[4];
  5320. const uint8x16_t m3b = vdupq_n_u8(0x3);
  5321. const int32x4_t vzero = vdupq_n_s32(0);
  5322. const uint8x16_t m0 = vdupq_n_u8(1);
  5323. const uint8x16_t m1 = vshlq_n_u8(m0, 1);
  5324. const uint8x16_t m2 = vshlq_n_u8(m0, 2);
  5325. const uint8x16_t m3 = vshlq_n_u8(m0, 3);
  5326. const int8_t m32 = 32;
  5327. ggml_int8x16x4_t q3bytes;
  5328. float sum = 0;
  5329. for (int i = 0; i < nb; ++i) {
  5330. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5331. const uint8_t * restrict q3 = x[i].qs;
  5332. const uint8_t * restrict qh = x[i].hmask;
  5333. const int8_t * restrict q8 = y[i].qs;
  5334. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
  5335. ggml_uint8x16x4_t q3h;
  5336. int32_t isum = 0;
  5337. // Set up scales
  5338. memcpy(aux, x[i].scales, 12);
  5339. utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
  5340. utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
  5341. utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
  5342. utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
  5343. int8_t * scale = (int8_t *)utmp;
  5344. for (int j = 0; j < 16; ++j) scale[j] -= m32;
  5345. for (int j = 0; j < QK_K/128; ++j) {
  5346. const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32;
  5347. const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64;
  5348. const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64;
  5349. q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2);
  5350. q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2);
  5351. q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1);
  5352. q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1);
  5353. q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0]));
  5354. q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1]));
  5355. q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
  5356. q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
  5357. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0];
  5358. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1];
  5359. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2];
  5360. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3];
  5361. scale += 4;
  5362. q3h.val[0] = vbicq_u8(m2, qhbits.val[0]);
  5363. q3h.val[1] = vbicq_u8(m2, qhbits.val[1]);
  5364. q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1);
  5365. q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1);
  5366. q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0]));
  5367. q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1]));
  5368. q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
  5369. q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
  5370. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0];
  5371. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1];
  5372. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2];
  5373. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3];
  5374. scale += 4;
  5375. if (j == 0) {
  5376. qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4);
  5377. qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4);
  5378. }
  5379. }
  5380. sum += d * isum;
  5381. }
  5382. *s = sum;
  5383. #elif defined __AVX2__
  5384. const __m256i m3 = _mm256_set1_epi8(3);
  5385. const __m256i mone = _mm256_set1_epi8(1);
  5386. const __m128i m32 = _mm_set1_epi8(32);
  5387. __m256 acc = _mm256_setzero_ps();
  5388. uint32_t aux[3];
  5389. for (int i = 0; i < nb; ++i) {
  5390. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5391. const uint8_t * restrict q3 = x[i].qs;
  5392. const int8_t * restrict q8 = y[i].qs;
  5393. // Set up scales
  5394. memcpy(aux, x[i].scales, 12);
  5395. __m128i scales128 = _mm_set_epi32(
  5396. ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
  5397. ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
  5398. (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
  5399. (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
  5400. scales128 = _mm_sub_epi8(scales128, m32);
  5401. const __m256i all_scales = _mm256_cvtepi8_epi16(scales128);
  5402. const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
  5403. const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
  5404. const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
  5405. // high bit
  5406. const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask);
  5407. // integer accumulator
  5408. __m256i sumi = _mm256_setzero_si256();
  5409. int bit = 0;
  5410. int is = 0;
  5411. for (int j = 0; j < QK_K/128; ++j) {
  5412. // load low 2 bits
  5413. const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32;
  5414. // prepare low and high bits
  5415. const __m256i q3l_0 = _mm256_and_si256(q3bits, m3);
  5416. const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  5417. ++bit;
  5418. const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3);
  5419. const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  5420. ++bit;
  5421. const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3);
  5422. const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  5423. ++bit;
  5424. const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3);
  5425. const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  5426. ++bit;
  5427. // load Q8 quants
  5428. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5429. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5430. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5431. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5432. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  5433. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  5434. // and 2 if the high bit was set)
  5435. __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
  5436. __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
  5437. __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2);
  5438. __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3);
  5439. __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
  5440. __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
  5441. __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2);
  5442. __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3);
  5443. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  5444. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  5445. p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
  5446. p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
  5447. // multiply with scales
  5448. p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0);
  5449. p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1);
  5450. p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2);
  5451. p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3);
  5452. // accumulate
  5453. p16_0 = _mm256_add_epi32(p16_0, p16_1);
  5454. p16_2 = _mm256_add_epi32(p16_2, p16_3);
  5455. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2));
  5456. }
  5457. // multiply with block scale and accumulate
  5458. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  5459. }
  5460. *s = hsum_float_8(acc);
  5461. #elif defined __AVX__
  5462. const __m128i m3 = _mm_set1_epi8(3);
  5463. const __m128i mone = _mm_set1_epi8(1);
  5464. const __m128i m32 = _mm_set1_epi8(32);
  5465. const __m128i m2 = _mm_set1_epi8(2);
  5466. __m256 acc = _mm256_setzero_ps();
  5467. const uint32_t *aux;
  5468. for (int i = 0; i < nb; ++i) {
  5469. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5470. const uint8_t * restrict q3 = x[i].qs;
  5471. const int8_t * restrict q8 = y[i].qs;
  5472. // Set up scales
  5473. aux = (const uint32_t *)x[i].scales;
  5474. __m128i scales128 = _mm_set_epi32(
  5475. ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
  5476. ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
  5477. (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
  5478. (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
  5479. scales128 = _mm_sub_epi8(scales128, m32);
  5480. const __m128i scales_0 = _mm_cvtepi8_epi16(scales128);
  5481. const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128));
  5482. const __m128i scales[2] = { scales_0, scales_1 };
  5483. // high bit *128*2 from block_q3_K.hmask[QK_K/8]
  5484. const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]);
  5485. const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]);
  5486. // integer accumulator
  5487. __m128i sumi_0 = _mm_setzero_si128();
  5488. __m128i sumi_1 = _mm_setzero_si128();
  5489. for (int j = 0; j < QK_K/128; ++j) {
  5490. // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4]
  5491. const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
  5492. const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
  5493. // prepare low and high bits
  5494. const int bit = j << 2;
  5495. const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3);
  5496. const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3);
  5497. const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2);
  5498. const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2);
  5499. const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3);
  5500. const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3);
  5501. const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
  5502. const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
  5503. const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3);
  5504. const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3);
  5505. const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
  5506. const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
  5507. const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3);
  5508. const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3);
  5509. const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
  5510. const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
  5511. // load Q8 quants from block_q8_K.qs[QK_K]
  5512. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5513. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5514. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5515. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5516. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5517. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5518. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5519. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5520. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  5521. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  5522. // and 2 if the high bit was set)
  5523. __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0);
  5524. __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1);
  5525. __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2);
  5526. __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3);
  5527. __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4);
  5528. __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5);
  5529. __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6);
  5530. __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7);
  5531. __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0);
  5532. __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1);
  5533. __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2);
  5534. __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3);
  5535. __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4);
  5536. __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5);
  5537. __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6);
  5538. __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7);
  5539. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  5540. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  5541. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  5542. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  5543. p16_4 = _mm_sub_epi16(p16_4, q8s_4);
  5544. p16_5 = _mm_sub_epi16(p16_5, q8s_5);
  5545. p16_6 = _mm_sub_epi16(p16_6, q8s_6);
  5546. p16_7 = _mm_sub_epi16(p16_7, q8s_7);
  5547. // multiply with scales
  5548. __m128i shuffle = _mm_set1_epi16(0x0100);
  5549. p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0);
  5550. shuffle = _mm_add_epi16(shuffle, m2);
  5551. p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1);
  5552. shuffle = _mm_add_epi16(shuffle, m2);
  5553. p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2);
  5554. shuffle = _mm_add_epi16(shuffle, m2);
  5555. p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3);
  5556. shuffle = _mm_add_epi16(shuffle, m2);
  5557. p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4);
  5558. shuffle = _mm_add_epi16(shuffle, m2);
  5559. p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5);
  5560. shuffle = _mm_add_epi16(shuffle, m2);
  5561. p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6);
  5562. shuffle = _mm_add_epi16(shuffle, m2);
  5563. p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7);
  5564. // accumulate
  5565. p16_0 = _mm_add_epi32(p16_0, p16_1);
  5566. p16_2 = _mm_add_epi32(p16_2, p16_3);
  5567. p16_4 = _mm_add_epi32(p16_4, p16_5);
  5568. p16_6 = _mm_add_epi32(p16_6, p16_7);
  5569. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  5570. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6));
  5571. }
  5572. // multiply with block scale and accumulate
  5573. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  5574. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
  5575. }
  5576. *s = hsum_float_8(acc);
  5577. #elif defined __riscv_v_intrinsic
  5578. uint32_t aux[3];
  5579. uint32_t utmp[4];
  5580. float sumf = 0;
  5581. for (int i = 0; i < nb; ++i) {
  5582. const uint8_t * restrict q3 = x[i].qs;
  5583. const uint8_t * restrict qh = x[i].hmask;
  5584. const int8_t * restrict q8 = y[i].qs;
  5585. memcpy(aux, x[i].scales, 12);
  5586. utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
  5587. utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
  5588. utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
  5589. utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
  5590. int8_t * scale = (int8_t *)utmp;
  5591. for (int j = 0; j < 16; ++j) scale[j] -= 32;
  5592. size_t vl = 32;
  5593. uint8_t m = 1;
  5594. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  5595. vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl);
  5596. int sum_t = 0;
  5597. for (int j = 0; j < QK_K; j += 128) {
  5598. vl = 32;
  5599. // load Q3
  5600. vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl);
  5601. vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl));
  5602. vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03 , vl));
  5603. vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03 , vl));
  5604. vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03 , vl));
  5605. // compute mask for subtraction
  5606. vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl);
  5607. vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl);
  5608. vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_m(vmask_0, q3_0, 0x4, vl);
  5609. m <<= 1;
  5610. vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
  5611. vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl);
  5612. vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_m(vmask_1, q3_1, 0x4, vl);
  5613. m <<= 1;
  5614. vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
  5615. vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl);
  5616. vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_m(vmask_2, q3_2, 0x4, vl);
  5617. m <<= 1;
  5618. vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl);
  5619. vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl);
  5620. vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_m(vmask_3, q3_3, 0x4, vl);
  5621. m <<= 1;
  5622. // load Q8 and take product with Q3
  5623. vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl);
  5624. vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  5625. vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
  5626. vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
  5627. vl = 16;
  5628. // retrieve lane to multiply with scale
  5629. vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl);
  5630. vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl);
  5631. vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl);
  5632. vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl);
  5633. vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl);
  5634. vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl);
  5635. vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl);
  5636. vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl);
  5637. vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl);
  5638. vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl);
  5639. vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl);
  5640. vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl);
  5641. sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
  5642. q3 += 32; q8 += 128; scale += 8;
  5643. }
  5644. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5645. sumf += d*sum_t;
  5646. }
  5647. *s = sumf;
  5648. #else
  5649. // scalar version
  5650. // This function is written like this so the compiler can manage to vectorize most of it
  5651. // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
  5652. // manually vectorized version above. Every other version I tried would run at least 4 times slower.
  5653. // The ideal situation would be if we could just write the code once, and the compiler would
  5654. // automatically produce the best possible set of machine instructions, instead of us having to manually
  5655. // write vectorized versions for AVX, ARM_NEON, etc.
  5656. int8_t aux8[QK_K];
  5657. int16_t aux16[8];
  5658. float sums [8];
  5659. int32_t aux32[8];
  5660. memset(sums, 0, 8*sizeof(float));
  5661. uint32_t auxs[4];
  5662. const int8_t * scales = (const int8_t*)auxs;
  5663. float sumf = 0;
  5664. for (int i = 0; i < nb; ++i) {
  5665. const uint8_t * restrict q3 = x[i].qs;
  5666. const uint8_t * restrict hm = x[i].hmask;
  5667. const int8_t * restrict q8 = y[i].qs;
  5668. memset(aux32, 0, 8*sizeof(int32_t));
  5669. int8_t * restrict a = aux8;
  5670. uint8_t m = 1;
  5671. for (int j = 0; j < QK_K; j += 128) {
  5672. for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
  5673. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  5674. a += 32; m <<= 1;
  5675. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
  5676. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  5677. a += 32; m <<= 1;
  5678. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
  5679. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  5680. a += 32; m <<= 1;
  5681. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
  5682. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  5683. a += 32; m <<= 1;
  5684. q3 += 32;
  5685. }
  5686. a = aux8;
  5687. memcpy(auxs, x[i].scales, 12);
  5688. uint32_t tmp = auxs[2];
  5689. auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
  5690. auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
  5691. auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
  5692. auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
  5693. for (int j = 0; j < QK_K/16; ++j) {
  5694. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5695. for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
  5696. q8 += 8; a += 8;
  5697. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5698. for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
  5699. q8 += 8; a += 8;
  5700. }
  5701. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5702. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  5703. }
  5704. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5705. *s = sumf;
  5706. #endif
  5707. }
  5708. #else
  5709. void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  5710. assert(n % QK_K == 0);
  5711. assert(nrc == 1);
  5712. UNUSED(nrc);
  5713. UNUSED(bx);
  5714. UNUSED(by);
  5715. UNUSED(bs);
  5716. const block_q3_K * restrict x = vx;
  5717. const block_q8_K * restrict y = vy;
  5718. const int nb = n / QK_K;
  5719. #ifdef __ARM_NEON
  5720. const int32x4_t vzero = vdupq_n_s32(0);
  5721. const uint8x16_t m3b = vdupq_n_u8(0x3);
  5722. const uint8x16_t mh = vdupq_n_u8(4);
  5723. ggml_int8x16x4_t q3bytes;
  5724. uint16_t aux16[2];
  5725. int8_t * scales = (int8_t *)aux16;
  5726. float sum = 0;
  5727. for (int i = 0; i < nb; ++i) {
  5728. ggml_uint8x16x4_t q3h;
  5729. const uint8x8_t hbits = vld1_u8(x[i].hmask);
  5730. const uint8x16_t q3bits = vld1q_u8(x[i].qs);
  5731. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(y[i].qs);
  5732. const uint16_t a = *(const uint16_t *)x[i].scales;
  5733. aux16[0] = a & 0x0f0f;
  5734. aux16[1] = (a >> 4) & 0x0f0f;
  5735. for (int j = 0; j < 4; ++j) scales[j] -= 8;
  5736. int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
  5737. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5738. const uint8x16_t htmp = vcombine_u8(hbits, vshr_n_u8(hbits, 1));
  5739. q3h.val[0] = vandq_u8(mh, vshlq_n_u8(htmp, 2));
  5740. q3h.val[1] = vandq_u8(mh, htmp);
  5741. q3h.val[2] = vandq_u8(mh, vshrq_n_u8(htmp, 2));
  5742. q3h.val[3] = vandq_u8(mh, vshrq_n_u8(htmp, 4));
  5743. q3bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q3bits, m3b), q3h.val[0]));
  5744. q3bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 2), m3b), q3h.val[1]));
  5745. q3bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 4), m3b), q3h.val[2]));
  5746. q3bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q3bits, 6), q3h.val[3]));
  5747. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes.val[0])) * scales[0];
  5748. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes.val[1])) * scales[2];
  5749. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes.val[2])) * scales[1];
  5750. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes.val[3])) * scales[3];
  5751. sum += d * isum;
  5752. }
  5753. *s = sum;
  5754. #elif defined __AVX2__
  5755. const __m256i m3 = _mm256_set1_epi8(3);
  5756. const __m256i m1 = _mm256_set1_epi8(1);
  5757. __m256 acc = _mm256_setzero_ps();
  5758. uint64_t aux64;
  5759. uint16_t aux16[2];
  5760. const int8_t * aux8 = (const int8_t *)aux16;
  5761. for (int i = 0; i < nb; ++i) {
  5762. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5763. const uint8_t * restrict q3 = x[i].qs;
  5764. const int8_t * restrict q8 = y[i].qs;
  5765. const uint16_t a = *(const uint16_t *)x[i].scales;
  5766. aux16[0] = a & 0x0f0f;
  5767. aux16[1] = (a >> 4) & 0x0f0f;
  5768. const __m256i scale_0 = MM256_SET_M128I(_mm_set1_epi16(aux8[2] - 8), _mm_set1_epi16(aux8[0] - 8));
  5769. const __m256i scale_1 = MM256_SET_M128I(_mm_set1_epi16(aux8[3] - 8), _mm_set1_epi16(aux8[1] - 8));
  5770. memcpy(&aux64, x[i].hmask, 8);
  5771. const __m128i haux = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
  5772. __m256i q3h_0 = MM256_SET_M128I(_mm_srli_epi16(haux, 2), haux);
  5773. __m256i q3h_1 = _mm256_srli_epi16(q3h_0, 4);
  5774. q3h_0 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_0, m1), 2);
  5775. q3h_1 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_1, m1), 2);
  5776. // load low 2 bits
  5777. const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
  5778. // prepare low and high bits
  5779. const __m256i q3aux = MM256_SET_M128I(_mm_srli_epi16(q3bits, 2), q3bits);
  5780. const __m256i q3l_0 = _mm256_and_si256(q3aux, m3);
  5781. const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3aux, 4), m3);
  5782. // load Q8 quants
  5783. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5784. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5785. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  5786. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  5787. // and 2 if the high bit was set)
  5788. const __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
  5789. const __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
  5790. __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
  5791. __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
  5792. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  5793. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  5794. // multiply with scales
  5795. p16_0 = _mm256_madd_epi16(scale_0, p16_0);
  5796. p16_1 = _mm256_madd_epi16(scale_1, p16_1);
  5797. p16_0 = _mm256_add_epi32(p16_0, p16_1);
  5798. // multiply with block scale and accumulate
  5799. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16_0), acc);
  5800. }
  5801. *s = hsum_float_8(acc);
  5802. #elif defined __AVX__
  5803. const __m128i m3 = _mm_set1_epi8(3);
  5804. const __m128i m1 = _mm_set1_epi8(1);
  5805. __m256 acc = _mm256_setzero_ps();
  5806. uint64_t aux64;
  5807. uint16_t aux16[2];
  5808. const int8_t * aux8 = (const int8_t *)aux16;
  5809. for (int i = 0; i < nb; ++i) {
  5810. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5811. const uint8_t * restrict q3 = x[i].qs;
  5812. const int8_t * restrict q8 = y[i].qs;
  5813. const uint16_t a = *(const uint16_t *)x[i].scales;
  5814. aux16[0] = a & 0x0f0f;
  5815. aux16[1] = (a >> 4) & 0x0f0f;
  5816. const __m128i scale_0 = _mm_set1_epi16(aux8[0] - 8);
  5817. const __m128i scale_1 = _mm_set1_epi16(aux8[2] - 8);
  5818. const __m128i scale_2 = _mm_set1_epi16(aux8[1] - 8);
  5819. const __m128i scale_3 = _mm_set1_epi16(aux8[3] - 8);
  5820. memcpy(&aux64, x[i].hmask, 8);
  5821. __m128i q3h_0 = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
  5822. __m128i q3h_1 = _mm_srli_epi16(q3h_0, 2);
  5823. __m128i q3h_2 = _mm_srli_epi16(q3h_0, 4);
  5824. __m128i q3h_3 = _mm_srli_epi16(q3h_0, 6);
  5825. q3h_0 = _mm_slli_epi16(_mm_andnot_si128(q3h_0, m1), 2);
  5826. q3h_1 = _mm_slli_epi16(_mm_andnot_si128(q3h_1, m1), 2);
  5827. q3h_2 = _mm_slli_epi16(_mm_andnot_si128(q3h_2, m1), 2);
  5828. q3h_3 = _mm_slli_epi16(_mm_andnot_si128(q3h_3, m1), 2);
  5829. // load low 2 bits
  5830. const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
  5831. // prepare low and high bits
  5832. const __m128i q3l_0 = _mm_and_si128(q3bits, m3);
  5833. const __m128i q3l_1 = _mm_and_si128(_mm_srli_epi16(q3bits, 2), m3);
  5834. const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits, 4), m3);
  5835. const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits, 6), m3);
  5836. // load Q8 quants
  5837. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5838. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5839. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm_maddubs_epi16,
  5840. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  5841. // and 2 if the high bit was set)
  5842. const __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, _mm256_extractf128_si256(q8_0, 0));
  5843. const __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, _mm256_extractf128_si256(q8_0, 1));
  5844. const __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, _mm256_extractf128_si256(q8_1, 0));
  5845. const __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, _mm256_extractf128_si256(q8_1, 1));
  5846. __m128i p16_0 = _mm_maddubs_epi16(q3l_0, _mm256_extractf128_si256(q8_0, 0));
  5847. __m128i p16_1 = _mm_maddubs_epi16(q3l_1, _mm256_extractf128_si256(q8_0, 1));
  5848. __m128i p16_2 = _mm_maddubs_epi16(q3l_2, _mm256_extractf128_si256(q8_1, 0));
  5849. __m128i p16_3 = _mm_maddubs_epi16(q3l_3, _mm256_extractf128_si256(q8_1, 1));
  5850. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  5851. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  5852. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  5853. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  5854. // multiply with scales
  5855. p16_0 = _mm_madd_epi16(scale_0, p16_0);
  5856. p16_1 = _mm_madd_epi16(scale_1, p16_1);
  5857. p16_2 = _mm_madd_epi16(scale_2, p16_2);
  5858. p16_3 = _mm_madd_epi16(scale_3, p16_3);
  5859. p16_0 = _mm_add_epi32(p16_0, p16_2);
  5860. p16_1 = _mm_add_epi32(p16_1, p16_3);
  5861. __m256i p16 = MM256_SET_M128I(p16_1, p16_0);
  5862. // multiply with block scale and accumulate
  5863. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16)), acc);
  5864. }
  5865. *s = hsum_float_8(acc);
  5866. #elif defined __riscv_v_intrinsic
  5867. uint16_t aux16[2];
  5868. int8_t * scales = (int8_t *)aux16;
  5869. float sumf = 0;
  5870. for (int i = 0; i < nb; ++i) {
  5871. const uint8_t * restrict q3 = x[i].qs;
  5872. const int8_t * restrict q8 = y[i].qs;
  5873. const uint16_t a = *(const uint16_t *)x[i].scales;
  5874. aux16[0] = a & 0x0f0f;
  5875. aux16[1] = (a >> 4) & 0x0f0f;
  5876. for (int j = 0; j < 4; ++j) scales[j] -= 8;
  5877. int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
  5878. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5879. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  5880. // load qh
  5881. vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(x[i].hmask, 8);
  5882. vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
  5883. size_t vl = 16;
  5884. // extend and combine both qh_x1 and qh_x2
  5885. vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
  5886. vuint8mf2_t qh_0 = __riscv_vand_vx_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
  5887. vuint8mf2_t qh_1 = __riscv_vand_vx_u8mf2(qh_x, 0x4, vl);
  5888. vuint8mf2_t qh_2 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
  5889. vuint8mf2_t qh_3 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), 0x4, vl);
  5890. // load Q3
  5891. vuint8mf2_t q3_x = __riscv_vle8_v_u8mf2(q3, vl);
  5892. vuint8mf2_t q3h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q3_x, 0x3, vl), qh_0, vl);
  5893. vuint8mf2_t q3h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 2, vl), 0x3, vl), qh_1, vl);
  5894. vuint8mf2_t q3h_2 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 4, vl), 0x3, vl), qh_2, vl);
  5895. vuint8mf2_t q3h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 0x6, vl), qh_3, vl);
  5896. vint8mf2_t q3_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_0);
  5897. vint8mf2_t q3_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_1);
  5898. vint8mf2_t q3_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_2);
  5899. vint8mf2_t q3_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_3);
  5900. // load Q8 and take product with Q3
  5901. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q3_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  5902. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q3_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  5903. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q3_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  5904. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q3_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  5905. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  5906. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  5907. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  5908. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  5909. isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scales[0];
  5910. isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scales[2];
  5911. isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scales[1];
  5912. isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scales[3];
  5913. sumf += d * isum;
  5914. }
  5915. *s = sumf;
  5916. #else
  5917. int8_t aux8[QK_K];
  5918. int16_t aux16[8];
  5919. float sums [8];
  5920. int32_t aux32[8];
  5921. int32_t scales[4];
  5922. memset(sums, 0, 8*sizeof(float));
  5923. float sumf = 0;
  5924. for (int i = 0; i < nb; ++i) {
  5925. const uint8_t * restrict q3 = x[i].qs;
  5926. const uint8_t * restrict hm = x[i].hmask;
  5927. const int8_t * restrict q8 = y[i].qs;
  5928. int8_t * restrict a = aux8;
  5929. for (int l = 0; l < 8; ++l) {
  5930. a[l+ 0] = (int8_t)((q3[l+0] >> 0) & 3) - (hm[l] & 0x01 ? 0 : 4);
  5931. a[l+ 8] = (int8_t)((q3[l+8] >> 0) & 3) - (hm[l] & 0x02 ? 0 : 4);
  5932. a[l+16] = (int8_t)((q3[l+0] >> 2) & 3) - (hm[l] & 0x04 ? 0 : 4);
  5933. a[l+24] = (int8_t)((q3[l+8] >> 2) & 3) - (hm[l] & 0x08 ? 0 : 4);
  5934. a[l+32] = (int8_t)((q3[l+0] >> 4) & 3) - (hm[l] & 0x10 ? 0 : 4);
  5935. a[l+40] = (int8_t)((q3[l+8] >> 4) & 3) - (hm[l] & 0x20 ? 0 : 4);
  5936. a[l+48] = (int8_t)((q3[l+0] >> 6) & 3) - (hm[l] & 0x40 ? 0 : 4);
  5937. a[l+56] = (int8_t)((q3[l+8] >> 6) & 3) - (hm[l] & 0x80 ? 0 : 4);
  5938. }
  5939. scales[0] = (x[i].scales[0] & 0xF) - 8;
  5940. scales[1] = (x[i].scales[0] >> 4) - 8;
  5941. scales[2] = (x[i].scales[1] & 0xF) - 8;
  5942. scales[3] = (x[i].scales[1] >> 4) - 8;
  5943. memset(aux32, 0, 8*sizeof(int32_t));
  5944. for (int j = 0; j < QK_K/16; ++j) {
  5945. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5946. q8 += 8; a += 8;
  5947. for (int l = 0; l < 8; ++l) aux16[l] += q8[l] * a[l];
  5948. q8 += 8; a += 8;
  5949. for (int l = 0; l < 8; ++l) aux32[l] += scales[j] * aux16[l];
  5950. }
  5951. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5952. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  5953. }
  5954. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5955. *s = sumf;
  5956. #endif
  5957. }
  5958. #endif
  5959. #if QK_K == 256
  5960. void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  5961. assert(n % QK_K == 0);
  5962. assert(nrc == 1);
  5963. UNUSED(nrc);
  5964. UNUSED(bx);
  5965. UNUSED(by);
  5966. UNUSED(bs);
  5967. const block_q4_K * restrict x = vx;
  5968. const block_q8_K * restrict y = vy;
  5969. const int nb = n / QK_K;
  5970. static const uint32_t kmask1 = 0x3f3f3f3f;
  5971. static const uint32_t kmask2 = 0x0f0f0f0f;
  5972. static const uint32_t kmask3 = 0x03030303;
  5973. uint32_t utmp[4];
  5974. #ifdef __ARM_NEON
  5975. const uint8x16_t m4b = vdupq_n_u8(0xf);
  5976. const int32x4_t mzero = vdupq_n_s32(0);
  5977. ggml_int8x16x2_t q4bytes;
  5978. ggml_int8x16x2_t q8bytes;
  5979. float sumf = 0;
  5980. for (int i = 0; i < nb; ++i) {
  5981. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5982. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5983. const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
  5984. memcpy(utmp, x[i].scales, 12);
  5985. uint32x2_t mins8 = { 0 };
  5986. mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0);
  5987. mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1);
  5988. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5989. utmp[0] &= kmask1;
  5990. const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8)));
  5991. const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
  5992. vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
  5993. sumf -= dmin * vaddvq_s32(prod);
  5994. const uint8_t * scales = (const uint8_t *)utmp;
  5995. const uint8_t * restrict q4 = x[i].qs;
  5996. const int8_t * restrict q8 = y[i].qs;
  5997. int32_t sumi1 = 0;
  5998. int32_t sumi2 = 0;
  5999. for (int j = 0; j < QK_K/64; ++j) {
  6000. const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32;
  6001. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  6002. q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
  6003. q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
  6004. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  6005. sumi1 += vaddvq_s32(p1) * scales[2*j+0];
  6006. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  6007. q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
  6008. q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
  6009. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  6010. sumi2 += vaddvq_s32(p2) * scales[2*j+1];
  6011. }
  6012. sumf += d * (sumi1 + sumi2);
  6013. }
  6014. *s = sumf;
  6015. #elif defined __AVX2__
  6016. const __m256i m4 = _mm256_set1_epi8(0xF);
  6017. __m256 acc = _mm256_setzero_ps();
  6018. __m128 acc_m = _mm_setzero_ps();
  6019. for (int i = 0; i < nb; ++i) {
  6020. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6021. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  6022. memcpy(utmp, x[i].scales, 12);
  6023. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6024. const uint32_t uaux = utmp[1] & kmask1;
  6025. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6026. utmp[2] = uaux;
  6027. utmp[0] &= kmask1;
  6028. const uint8_t * restrict q4 = x[i].qs;
  6029. const int8_t * restrict q8 = y[i].qs;
  6030. const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
  6031. const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
  6032. const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
  6033. const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
  6034. acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m);
  6035. const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
  6036. const __m256i scales = MM256_SET_M128I(sc128, sc128);
  6037. __m256i sumi = _mm256_setzero_si256();
  6038. for (int j = 0; j < QK_K/64; ++j) {
  6039. const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
  6040. const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
  6041. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  6042. const __m256i q4l = _mm256_and_si256(q4bits, m4);
  6043. const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
  6044. const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6045. __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
  6046. p16l = _mm256_madd_epi16(scale_l, p16l);
  6047. const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6048. __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
  6049. p16h = _mm256_madd_epi16(scale_h, p16h);
  6050. const __m256i sumj = _mm256_add_epi32(p16l, p16h);
  6051. sumi = _mm256_add_epi32(sumi, sumj);
  6052. }
  6053. __m256 vd = _mm256_set1_ps(d);
  6054. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
  6055. }
  6056. acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
  6057. acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
  6058. *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
  6059. #elif defined __AVX__
  6060. const __m128i m4 = _mm_set1_epi8(0xF);
  6061. const __m128i m2 = _mm_set1_epi8(0x2);
  6062. __m256 acc = _mm256_setzero_ps();
  6063. __m128 acc_m = _mm_setzero_ps();
  6064. for (int i = 0; i < nb; ++i) {
  6065. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6066. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  6067. const uint8_t * restrict q4 = x[i].qs;
  6068. const int8_t * restrict q8 = y[i].qs;
  6069. memcpy(utmp, x[i].scales, 12);
  6070. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6071. const uint32_t uaux = utmp[1] & kmask1;
  6072. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6073. utmp[2] = uaux;
  6074. utmp[0] &= kmask1;
  6075. const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
  6076. const __m128i scales = _mm_cvtepu8_epi16(utmps);
  6077. const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
  6078. const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
  6079. const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
  6080. const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
  6081. const __m128i prod = _mm_madd_epi16(mins, q8s);
  6082. acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m);
  6083. __m128i sumi_0 = _mm_setzero_si128();
  6084. __m128i sumi_1 = _mm_setzero_si128();
  6085. __m128i shuffle = _mm_set1_epi16(0x0100);
  6086. for (int j = 0; j < QK_K/64; ++j) {
  6087. const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle);
  6088. shuffle = _mm_add_epi16(shuffle, m2);
  6089. const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle);
  6090. shuffle = _mm_add_epi16(shuffle, m2);
  6091. __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  6092. const __m128i q4l_0 = _mm_and_si128(q4bits, m4);
  6093. const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
  6094. q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  6095. const __m128i q4l_1 = _mm_and_si128(q4bits, m4);
  6096. const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
  6097. const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6098. __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0);
  6099. p16l = _mm_madd_epi16(scale_l, p16l);
  6100. sumi_0 = _mm_add_epi32(sumi_0, p16l);
  6101. const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6102. p16l = _mm_maddubs_epi16(q4l_1, q8l_1);
  6103. p16l = _mm_madd_epi16(scale_l, p16l);
  6104. sumi_1 = _mm_add_epi32(sumi_1, p16l);
  6105. const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6106. __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0);
  6107. p16h = _mm_madd_epi16(scale_h, p16h);
  6108. sumi_0 = _mm_add_epi32(sumi_0, p16h);
  6109. const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6110. p16h = _mm_maddubs_epi16(q4h_1, q8h_1);
  6111. p16h = _mm_madd_epi16(scale_h, p16h);
  6112. sumi_1 = _mm_add_epi32(sumi_1, p16h);
  6113. }
  6114. __m256 vd = _mm256_set1_ps(d);
  6115. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  6116. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
  6117. }
  6118. acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
  6119. acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
  6120. *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
  6121. #elif defined __riscv_v_intrinsic
  6122. const uint8_t * scales = (const uint8_t*)&utmp[0];
  6123. const uint8_t * mins = (const uint8_t*)&utmp[2];
  6124. float sumf = 0;
  6125. for (int i = 0; i < nb; ++i) {
  6126. size_t vl = 8;
  6127. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6128. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  6129. vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
  6130. vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
  6131. vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
  6132. memcpy(utmp, x[i].scales, 12);
  6133. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6134. const uint32_t uaux = utmp[1] & kmask1;
  6135. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6136. utmp[2] = uaux;
  6137. utmp[0] &= kmask1;
  6138. vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
  6139. vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
  6140. vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
  6141. vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  6142. sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
  6143. const uint8_t * restrict q4 = x[i].qs;
  6144. const int8_t * restrict q8 = y[i].qs;
  6145. vl = 32;
  6146. int32_t sum_1 = 0;
  6147. int32_t sum_2 = 0;
  6148. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  6149. for (int j = 0; j < QK_K/64; ++j) {
  6150. // load Q4
  6151. vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
  6152. // load Q8 and multiply it with lower Q4 nibble
  6153. vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
  6154. vint8m1_t q4_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
  6155. vint16m2_t qv_0 = __riscv_vwmul_vv_i16m2(q4_0, q8_0, vl);
  6156. vint16m1_t vs_0 = __riscv_vredsum_vs_i16m2_i16m1(qv_0, vzero, vl);
  6157. sum_1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[2*j+0];
  6158. // load Q8 and multiply it with upper Q4 nibble
  6159. vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
  6160. vint8m1_t q4_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
  6161. vint16m2_t qv_1 = __riscv_vwmul_vv_i16m2(q4_1, q8_1, vl);
  6162. vint16m1_t vs_1 = __riscv_vredsum_vs_i16m2_i16m1(qv_1, vzero, vl);
  6163. sum_2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[2*j+1];
  6164. q4 += 32; q8 += 64;
  6165. }
  6166. sumf += d*(sum_1 + sum_2);
  6167. }
  6168. *s = sumf;
  6169. #else
  6170. const uint8_t * scales = (const uint8_t*)&utmp[0];
  6171. const uint8_t * mins = (const uint8_t*)&utmp[2];
  6172. int8_t aux8[QK_K];
  6173. int16_t aux16[8];
  6174. float sums [8];
  6175. int32_t aux32[8];
  6176. memset(sums, 0, 8*sizeof(float));
  6177. float sumf = 0;
  6178. for (int i = 0; i < nb; ++i) {
  6179. const uint8_t * restrict q4 = x[i].qs;
  6180. const int8_t * restrict q8 = y[i].qs;
  6181. memset(aux32, 0, 8*sizeof(int32_t));
  6182. int8_t * restrict a = aux8;
  6183. for (int j = 0; j < QK_K/64; ++j) {
  6184. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
  6185. a += 32;
  6186. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
  6187. a += 32; q4 += 32;
  6188. }
  6189. memcpy(utmp, x[i].scales, 12);
  6190. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6191. const uint32_t uaux = utmp[1] & kmask1;
  6192. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6193. utmp[2] = uaux;
  6194. utmp[0] &= kmask1;
  6195. int sumi = 0;
  6196. for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
  6197. a = aux8;
  6198. int is = 0;
  6199. for (int j = 0; j < QK_K/32; ++j) {
  6200. int32_t scale = scales[is++];
  6201. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6202. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6203. q8 += 8; a += 8;
  6204. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6205. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6206. q8 += 8; a += 8;
  6207. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6208. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6209. q8 += 8; a += 8;
  6210. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6211. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6212. q8 += 8; a += 8;
  6213. }
  6214. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6215. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  6216. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  6217. sumf -= dmin * sumi;
  6218. }
  6219. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6220. *s = sumf;
  6221. #endif
  6222. }
  6223. #else
  6224. void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6225. assert(n % QK_K == 0);
  6226. assert(nrc == 1);
  6227. UNUSED(nrc);
  6228. UNUSED(bx);
  6229. UNUSED(by);
  6230. UNUSED(bs);
  6231. const block_q4_K * restrict x = vx;
  6232. const block_q8_K * restrict y = vy;
  6233. const int nb = n / QK_K;
  6234. #ifdef __ARM_NEON
  6235. const uint8x16_t m4b = vdupq_n_u8(0xf);
  6236. const int32x4_t mzero = vdupq_n_s32(0);
  6237. float sumf = 0;
  6238. ggml_int8x16x2_t q4bytes;
  6239. ggml_int8x16x4_t q8bytes;
  6240. float sum_mins = 0.f;
  6241. uint16_t aux16[2];
  6242. const uint8_t * restrict scales = (const uint8_t *)aux16;
  6243. for (int i = 0; i < nb; ++i) {
  6244. const uint8_t * restrict q4 = x[i].qs;
  6245. const int8_t * restrict q8 = y[i].qs;
  6246. const uint16_t * restrict a = (const uint16_t *)x[i].scales;
  6247. aux16[0] = a[0] & 0x0f0f;
  6248. aux16[1] = (a[0] >> 4) & 0x0f0f;
  6249. const int32_t summi = scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]);
  6250. sum_mins += y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * summi;
  6251. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
  6252. const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4);
  6253. q8bytes = ggml_vld1q_s8_x4(q8);
  6254. q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
  6255. q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
  6256. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  6257. const int32_t sumi1 = vaddvq_s32(p1) * scales[0];
  6258. q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
  6259. q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
  6260. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[2]), q4bytes.val[1], q8bytes.val[3]);
  6261. const int32_t sumi2 = vaddvq_s32(p2) * scales[1];
  6262. sumf += d * (sumi1 + sumi2);
  6263. }
  6264. *s = sumf - sum_mins;
  6265. #elif defined __AVX2__
  6266. const __m256i m4 = _mm256_set1_epi8(0xF);
  6267. __m256 acc = _mm256_setzero_ps();
  6268. float summs = 0;
  6269. uint16_t aux16[2];
  6270. const uint8_t * scales = (const uint8_t *)aux16;
  6271. for (int i = 0; i < nb; ++i) {
  6272. const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
  6273. const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
  6274. const __m256 vd = _mm256_set1_ps(d);
  6275. const uint16_t * a = (const uint16_t *)x[i].scales;
  6276. aux16[0] = a[0] & 0x0f0f;
  6277. aux16[1] = (a[0] >> 4) & 0x0f0f;
  6278. summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  6279. const uint8_t * restrict q4 = x[i].qs;
  6280. const int8_t * restrict q8 = y[i].qs;
  6281. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
  6282. const __m256i q4l = _mm256_and_si256(q4bits, m4);
  6283. const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
  6284. const __m256i q8l = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6285. const __m256i q8h = _mm256_loadu_si256((const __m256i*)(q8+32));
  6286. const __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
  6287. const __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
  6288. const __m256i p32l = _mm256_madd_epi16(_mm256_set1_epi16(scales[0]), p16l);
  6289. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32l), acc);
  6290. const __m256i p32h = _mm256_madd_epi16(_mm256_set1_epi16(scales[1]), p16h);
  6291. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32h), acc);
  6292. }
  6293. *s = hsum_float_8(acc) - summs;
  6294. #elif defined __AVX__
  6295. const __m128i m4 = _mm_set1_epi8(0xF);
  6296. __m256 acc = _mm256_setzero_ps();
  6297. float summs = 0;
  6298. uint16_t aux16[2];
  6299. const uint8_t * scales = (const uint8_t *)aux16;
  6300. for (int i = 0; i < nb; ++i) {
  6301. const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
  6302. const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
  6303. const __m256 vd = _mm256_set1_ps(d);
  6304. const uint16_t * a = (const uint16_t *)x[i].scales;
  6305. aux16[0] = a[0] & 0x0f0f;
  6306. aux16[1] = (a[0] >> 4) & 0x0f0f;
  6307. summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  6308. const uint8_t * restrict q4 = x[i].qs;
  6309. const int8_t * restrict q8 = y[i].qs;
  6310. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
  6311. const __m128i q4bits_0 = _mm256_extractf128_si256(q4bits, 0);
  6312. const __m128i q4bits_1 = _mm256_extractf128_si256(q4bits, 1);
  6313. const __m128i q4_0 = _mm_and_si128(q4bits_0, m4);
  6314. const __m128i q4_1 = _mm_and_si128(q4bits_1, m4);
  6315. const __m128i q4_2 = _mm_and_si128(_mm_srli_epi16(q4bits_0, 4), m4);
  6316. const __m128i q4_3 = _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4);
  6317. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6318. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6319. const __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
  6320. const __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
  6321. const __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
  6322. const __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
  6323. const __m128i p32_0 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_0);
  6324. const __m128i p32_1 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_1);
  6325. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_1, p32_0))), acc);
  6326. const __m128i p32_2 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_2);
  6327. const __m128i p32_3 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_3);
  6328. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_3, p32_2))), acc);
  6329. }
  6330. *s = hsum_float_8(acc) - summs;
  6331. #elif defined __riscv_v_intrinsic
  6332. uint16_t s16[2];
  6333. const uint8_t * restrict scales = (const uint8_t *)s16;
  6334. float sumf = 0;
  6335. for (int i = 0; i < nb; ++i) {
  6336. const uint8_t * restrict q4 = x[i].qs;
  6337. const int8_t * restrict q8 = y[i].qs;
  6338. const uint16_t * restrict b = (const uint16_t *)x[i].scales;
  6339. s16[0] = b[0] & 0x0f0f;
  6340. s16[1] = (b[0] >> 4) & 0x0f0f;
  6341. sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  6342. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
  6343. size_t vl = 32;
  6344. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  6345. // load Q4
  6346. vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
  6347. // load Q8 and multiply it with lower Q4 nibble
  6348. vint8m1_t q4_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
  6349. vint16m2_t va_0 = __riscv_vwmul_vv_i16m2(q4_a, __riscv_vle8_v_i8m1(q8, vl), vl);
  6350. vint16m1_t aux1 = __riscv_vredsum_vs_i16m2_i16m1(va_0, vzero, vl);
  6351. sumf += d*scales[0]*__riscv_vmv_x_s_i16m1_i16(aux1);
  6352. // load Q8 and multiply it with upper Q4 nibble
  6353. vint8m1_t q4_s = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
  6354. vint16m2_t va_1 = __riscv_vwmul_vv_i16m2(q4_s, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  6355. vint16m1_t aux2 = __riscv_vredsum_vs_i16m2_i16m1(va_1, vzero, vl);
  6356. sumf += d*scales[1]*__riscv_vmv_x_s_i16m1_i16(aux2);
  6357. }
  6358. *s = sumf;
  6359. #else
  6360. uint8_t aux8[QK_K];
  6361. int16_t aux16[16];
  6362. float sums [8];
  6363. memset(sums, 0, 8*sizeof(float));
  6364. uint16_t s16[2];
  6365. const uint8_t * restrict scales = (const uint8_t *)s16;
  6366. float sumf = 0;
  6367. for (int i = 0; i < nb; ++i) {
  6368. const uint8_t * restrict q4 = x[i].qs;
  6369. const int8_t * restrict q8 = y[i].qs;
  6370. uint8_t * restrict a = aux8;
  6371. for (int l = 0; l < 32; ++l) a[l+ 0] = q4[l] & 0xF;
  6372. for (int l = 0; l < 32; ++l) a[l+32] = q4[l] >> 4;
  6373. const uint16_t * restrict b = (const uint16_t *)x[i].scales;
  6374. s16[0] = b[0] & 0x0f0f;
  6375. s16[1] = (b[0] >> 4) & 0x0f0f;
  6376. sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  6377. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
  6378. for (int j = 0; j < QK_K/32; ++j) {
  6379. for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
  6380. q8 += 16; a += 16;
  6381. for (int l = 0; l < 16; ++l) aux16[l] += q8[l] * a[l];
  6382. q8 += 16; a += 16;
  6383. const float dl = d * scales[j];
  6384. for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[l+8]);
  6385. }
  6386. }
  6387. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6388. *s = sumf;
  6389. #endif
  6390. }
  6391. #endif
  6392. #if QK_K == 256
  6393. void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6394. assert(n % QK_K == 0);
  6395. assert(nrc == 1);
  6396. UNUSED(nrc);
  6397. UNUSED(bx);
  6398. UNUSED(by);
  6399. UNUSED(bs);
  6400. const block_q5_K * restrict x = vx;
  6401. const block_q8_K * restrict y = vy;
  6402. const int nb = n / QK_K;
  6403. static const uint32_t kmask1 = 0x3f3f3f3f;
  6404. static const uint32_t kmask2 = 0x0f0f0f0f;
  6405. static const uint32_t kmask3 = 0x03030303;
  6406. uint32_t utmp[4];
  6407. #ifdef __ARM_NEON
  6408. const uint8x16_t m4b = vdupq_n_u8(0xf);
  6409. const uint8x16_t mone = vdupq_n_u8(1);
  6410. const uint8x16_t mtwo = vdupq_n_u8(2);
  6411. const int32x4_t mzero = vdupq_n_s32(0);
  6412. ggml_int8x16x4_t q5bytes;
  6413. float sumf = 0;
  6414. for (int i = 0; i < nb; ++i) {
  6415. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6416. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  6417. const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
  6418. memcpy(utmp, x[i].scales, 12);
  6419. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6420. const uint32_t uaux = utmp[1] & kmask1;
  6421. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6422. utmp[2] = uaux;
  6423. utmp[0] &= kmask1;
  6424. const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8);
  6425. const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8));
  6426. const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
  6427. vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
  6428. int32_t sumi_mins = vaddvq_s32(prod);
  6429. const uint8_t * scales = (const uint8_t *)utmp;
  6430. const uint8_t * restrict q5 = x[i].qs;
  6431. const uint8_t * restrict qh = x[i].qh;
  6432. const int8_t * restrict q8 = y[i].qs;
  6433. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
  6434. ggml_uint8x16x4_t q5h;
  6435. int32_t sumi = 0;
  6436. for (int j = 0; j < QK_K/64; ++j) {
  6437. const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32;
  6438. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  6439. q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
  6440. q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
  6441. q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3);
  6442. q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3);
  6443. qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2);
  6444. qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2);
  6445. q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0]));
  6446. q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1]));
  6447. q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2]));
  6448. q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3]));
  6449. sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++;
  6450. sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++;
  6451. }
  6452. sumf += d * sumi - dmin * sumi_mins;
  6453. }
  6454. *s = sumf;
  6455. #elif defined __AVX2__
  6456. const __m256i m4 = _mm256_set1_epi8(0xF);
  6457. const __m128i mzero = _mm_setzero_si128();
  6458. const __m256i mone = _mm256_set1_epi8(1);
  6459. __m256 acc = _mm256_setzero_ps();
  6460. float summs = 0.f;
  6461. for (int i = 0; i < nb; ++i) {
  6462. const uint8_t * restrict q5 = x[i].qs;
  6463. const int8_t * restrict q8 = y[i].qs;
  6464. #if QK_K == 256
  6465. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6466. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  6467. memcpy(utmp, x[i].scales, 12);
  6468. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6469. const uint32_t uaux = utmp[1] & kmask1;
  6470. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6471. utmp[2] = uaux;
  6472. utmp[0] &= kmask1;
  6473. #else
  6474. // TODO
  6475. const float d = 0, dmin = 0;
  6476. #endif
  6477. const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
  6478. const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
  6479. const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
  6480. const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
  6481. const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
  6482. summs += dmin * _mm_extract_epi32(hsum, 0);
  6483. const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
  6484. const __m256i scales = MM256_SET_M128I(sc128, sc128);
  6485. const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh);
  6486. __m256i hmask = mone;
  6487. __m256i sumi = _mm256_setzero_si256();
  6488. int bit = 0;
  6489. for (int j = 0; j < QK_K/64; ++j) {
  6490. const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
  6491. const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
  6492. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32;
  6493. const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
  6494. const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
  6495. const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0);
  6496. hmask = _mm256_slli_epi16(hmask, 1);
  6497. const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
  6498. const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
  6499. const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1);
  6500. hmask = _mm256_slli_epi16(hmask, 1);
  6501. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6502. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6503. __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0);
  6504. __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1);
  6505. p16_0 = _mm256_madd_epi16(scale_0, p16_0);
  6506. p16_1 = _mm256_madd_epi16(scale_1, p16_1);
  6507. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  6508. }
  6509. __m256 vd = _mm256_set1_ps(d);
  6510. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
  6511. }
  6512. *s = hsum_float_8(acc) + summs;
  6513. #elif defined __AVX__
  6514. const __m128i m4 = _mm_set1_epi8(0xF);
  6515. const __m128i mzero = _mm_setzero_si128();
  6516. const __m128i mone = _mm_set1_epi8(1);
  6517. const __m128i m2 = _mm_set1_epi8(2);
  6518. __m256 acc = _mm256_setzero_ps();
  6519. float summs = 0.f;
  6520. for (int i = 0; i < nb; ++i) {
  6521. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6522. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  6523. const uint8_t * restrict q5 = x[i].qs;
  6524. const int8_t * restrict q8 = y[i].qs;
  6525. memcpy(utmp, x[i].scales, 12);
  6526. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6527. const uint32_t uaux = utmp[1] & kmask1;
  6528. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6529. utmp[2] = uaux;
  6530. utmp[0] &= kmask1;
  6531. const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
  6532. const __m128i scales = _mm_cvtepu8_epi16(utmps);
  6533. const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
  6534. const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
  6535. const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
  6536. const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
  6537. const __m128i prod = _mm_madd_epi16(mins, q8s);
  6538. const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
  6539. summs += dmin * _mm_extract_epi32(hsum, 0);
  6540. const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]);
  6541. const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]);
  6542. __m128i hmask = mone;
  6543. __m128i sumi_0 = _mm_setzero_si128();
  6544. __m128i sumi_1 = _mm_setzero_si128();
  6545. int bit = 0;
  6546. __m128i shuffle = _mm_set1_epi16(0x0100);
  6547. for (int j = 0; j < QK_K/64; ++j) {
  6548. const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
  6549. shuffle = _mm_add_epi16(shuffle, m2);
  6550. const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
  6551. shuffle = _mm_add_epi16(shuffle, m2);
  6552. const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
  6553. const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
  6554. __m128i q5l_0 = _mm_and_si128(q5bits_0, m4);
  6555. __m128i q5l_1 = _mm_and_si128(q5bits_1, m4);
  6556. __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
  6557. __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
  6558. __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0);
  6559. __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1);
  6560. hmask = _mm_slli_epi16(hmask, 1);
  6561. __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6562. __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6563. __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0);
  6564. __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1);
  6565. p16_0 = _mm_madd_epi16(scale_0, p16_0);
  6566. p16_1 = _mm_madd_epi16(scale_0, p16_1);
  6567. q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4);
  6568. q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4);
  6569. q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
  6570. q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
  6571. q5_0 = _mm_add_epi8(q5l_0, q5h_0);
  6572. q5_1 = _mm_add_epi8(q5l_1, q5h_1);
  6573. hmask = _mm_slli_epi16(hmask, 1);
  6574. q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6575. q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6576. __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0);
  6577. __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1);
  6578. p16_2 = _mm_madd_epi16(scale_1, p16_2);
  6579. p16_3 = _mm_madd_epi16(scale_1, p16_3);
  6580. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  6581. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  6582. }
  6583. __m256 vd = _mm256_set1_ps(d);
  6584. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  6585. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
  6586. }
  6587. *s = hsum_float_8(acc) + summs;
  6588. #elif defined __riscv_v_intrinsic
  6589. const uint8_t * scales = (const uint8_t*)&utmp[0];
  6590. const uint8_t * mins = (const uint8_t*)&utmp[2];
  6591. float sumf = 0;
  6592. float sums = 0.0;
  6593. size_t vl;
  6594. for (int i = 0; i < nb; ++i) {
  6595. vl = 8;
  6596. const uint8_t * restrict q5 = x[i].qs;
  6597. const uint8_t * restrict hm = x[i].qh;
  6598. const int8_t * restrict q8 = y[i].qs;
  6599. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6600. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  6601. vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
  6602. vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
  6603. vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
  6604. memcpy(utmp, x[i].scales, 12);
  6605. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6606. const uint32_t uaux = utmp[1] & kmask1;
  6607. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6608. utmp[2] = uaux;
  6609. utmp[0] &= kmask1;
  6610. vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
  6611. vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
  6612. vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
  6613. vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  6614. sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
  6615. vl = 32;
  6616. int32_t aux32 = 0;
  6617. int is = 0;
  6618. uint8_t m = 1;
  6619. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  6620. vuint8m1_t vqh = __riscv_vle8_v_u8m1(hm, vl);
  6621. for (int j = 0; j < QK_K/64; ++j) {
  6622. // load Q5 and Q8
  6623. vuint8m1_t q5_x = __riscv_vle8_v_u8m1(q5, vl);
  6624. vint8m1_t q8_y1 = __riscv_vle8_v_i8m1(q8, vl);
  6625. vint8m1_t q8_y2 = __riscv_vle8_v_i8m1(q8+32, vl);
  6626. // compute mask for addition
  6627. vint8m1_t q5_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q5_x, 0x0F, vl));
  6628. vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
  6629. vbool8_t vmask_1 = __riscv_vmsne_vx_u8m1_b8(qh_m1, 0, vl);
  6630. vint8m1_t q5_m1 = __riscv_vadd_vx_i8m1_m(vmask_1, q5_a, 16, vl);
  6631. m <<= 1;
  6632. vint8m1_t q5_l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q5_x, 0x04, vl));
  6633. vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
  6634. vbool8_t vmask_2 = __riscv_vmsne_vx_u8m1_b8(qh_m2, 0, vl);
  6635. vint8m1_t q5_m2 = __riscv_vadd_vx_i8m1_m(vmask_2, q5_l, 16, vl);
  6636. m <<= 1;
  6637. vint16m2_t v0 = __riscv_vwmul_vv_i16m2(q5_m1, q8_y1, vl);
  6638. vint16m2_t v1 = __riscv_vwmul_vv_i16m2(q5_m2, q8_y2, vl);
  6639. vint32m4_t vs1 = __riscv_vwmul_vx_i32m4(v0, scales[is++], vl);
  6640. vint32m4_t vs2 = __riscv_vwmul_vx_i32m4(v1, scales[is++], vl);
  6641. vint32m1_t vacc1 = __riscv_vredsum_vs_i32m4_i32m1(vs1, vzero, vl);
  6642. vint32m1_t vacc2 = __riscv_vredsum_vs_i32m4_i32m1(vs2, vzero, vl);
  6643. aux32 += __riscv_vmv_x_s_i32m1_i32(vacc1) + __riscv_vmv_x_s_i32m1_i32(vacc2);
  6644. q5 += 32; q8 += 64;
  6645. }
  6646. vfloat32m1_t vaux = __riscv_vfmul_vf_f32m1(__riscv_vfmv_v_f_f32m1(aux32, 1), d, 1);
  6647. sums += __riscv_vfmv_f_s_f32m1_f32(vaux);
  6648. }
  6649. *s = sumf+sums;
  6650. #else
  6651. const uint8_t * scales = (const uint8_t*)&utmp[0];
  6652. const uint8_t * mins = (const uint8_t*)&utmp[2];
  6653. int8_t aux8[QK_K];
  6654. int16_t aux16[8];
  6655. float sums [8];
  6656. int32_t aux32[8];
  6657. memset(sums, 0, 8*sizeof(float));
  6658. float sumf = 0;
  6659. for (int i = 0; i < nb; ++i) {
  6660. const uint8_t * restrict q4 = x[i].qs;
  6661. const uint8_t * restrict hm = x[i].qh;
  6662. const int8_t * restrict q8 = y[i].qs;
  6663. memset(aux32, 0, 8*sizeof(int32_t));
  6664. int8_t * restrict a = aux8;
  6665. uint8_t m = 1;
  6666. for (int j = 0; j < QK_K/64; ++j) {
  6667. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
  6668. for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
  6669. a += 32; m <<= 1;
  6670. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
  6671. for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
  6672. a += 32; m <<= 1;
  6673. q4 += 32;
  6674. }
  6675. memcpy(utmp, x[i].scales, 12);
  6676. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6677. const uint32_t uaux = utmp[1] & kmask1;
  6678. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6679. utmp[2] = uaux;
  6680. utmp[0] &= kmask1;
  6681. int sumi = 0;
  6682. for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
  6683. a = aux8;
  6684. int is = 0;
  6685. for (int j = 0; j < QK_K/32; ++j) {
  6686. int32_t scale = scales[is++];
  6687. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6688. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6689. q8 += 8; a += 8;
  6690. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6691. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6692. q8 += 8; a += 8;
  6693. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6694. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6695. q8 += 8; a += 8;
  6696. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6697. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6698. q8 += 8; a += 8;
  6699. }
  6700. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6701. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  6702. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  6703. sumf -= dmin * sumi;
  6704. }
  6705. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6706. *s = sumf;
  6707. #endif
  6708. }
  6709. #else
  6710. void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6711. assert(n % QK_K == 0);
  6712. assert(nrc == 1);
  6713. UNUSED(nrc);
  6714. UNUSED(bx);
  6715. UNUSED(by);
  6716. UNUSED(bs);
  6717. const block_q5_K * restrict x = vx;
  6718. const block_q8_K * restrict y = vy;
  6719. const int nb = n / QK_K;
  6720. #ifdef __ARM_NEON
  6721. const uint8x16_t m4b = vdupq_n_u8(0xf);
  6722. const uint8x16_t mh = vdupq_n_u8(16);
  6723. const int32x4_t mzero = vdupq_n_s32(0);
  6724. ggml_int8x16x4_t q5bytes;
  6725. ggml_uint8x16x4_t q5h;
  6726. float sumf = 0;
  6727. for (int i = 0; i < nb; ++i) {
  6728. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6729. const int8_t * sc = x[i].scales;
  6730. const uint8_t * restrict q5 = x[i].qs;
  6731. const uint8_t * restrict qh = x[i].qh;
  6732. const int8_t * restrict q8 = y[i].qs;
  6733. const uint8x8_t qhbits = vld1_u8(qh);
  6734. const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5);
  6735. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  6736. const uint8x16_t htmp = vcombine_u8(qhbits, vshr_n_u8(qhbits, 1));
  6737. q5h.val[0] = vbicq_u8(mh, vshlq_n_u8(htmp, 4));
  6738. q5h.val[1] = vbicq_u8(mh, vshlq_n_u8(htmp, 2));
  6739. q5h.val[2] = vbicq_u8(mh, htmp);
  6740. q5h.val[3] = vbicq_u8(mh, vshrq_n_u8(htmp, 2));
  6741. q5bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[0], m4b)), vreinterpretq_s8_u8(q5h.val[0]));
  6742. q5bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[1], m4b)), vreinterpretq_s8_u8(q5h.val[1]));
  6743. q5bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[0], 4)), vreinterpretq_s8_u8(q5h.val[2]));
  6744. q5bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[1], 4)), vreinterpretq_s8_u8(q5h.val[3]));
  6745. int32_t sumi1 = sc[0] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]));
  6746. int32_t sumi2 = sc[1] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[1], q8bytes.val[1]));
  6747. int32_t sumi3 = sc[2] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]));
  6748. int32_t sumi4 = sc[3] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[3], q8bytes.val[3]));
  6749. sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
  6750. }
  6751. *s = sumf;
  6752. #elif defined __AVX2__
  6753. const __m256i m4 = _mm256_set1_epi8(0xF);
  6754. const __m256i mone = _mm256_set1_epi8(1);
  6755. __m256 acc = _mm256_setzero_ps();
  6756. for (int i = 0; i < nb; ++i) {
  6757. const uint8_t * restrict q5 = x[i].qs;
  6758. const int8_t * restrict q8 = y[i].qs;
  6759. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6760. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
  6761. const __m256i scale_l = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[1]), _mm_set1_epi16(x[i].scales[0]));
  6762. const __m256i scale_h = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[3]), _mm_set1_epi16(x[i].scales[2]));
  6763. int64_t aux64;
  6764. memcpy(&aux64, x[i].qh, 8);
  6765. const __m128i haux128 = _mm_set_epi64x(aux64 >> 1, aux64);
  6766. const __m256i haux256 = MM256_SET_M128I(_mm_srli_epi16(haux128, 2), haux128);
  6767. const __m256i q5h_0 = _mm256_slli_epi16(_mm256_andnot_si256(haux256, mone), 4);
  6768. const __m256i q5h_1 = _mm256_slli_epi16(_mm256_andnot_si256(_mm256_srli_epi16(haux256, 4), mone), 4);
  6769. const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
  6770. const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
  6771. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6772. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6773. const __m256i p16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5l_0, q8_0));
  6774. const __m256i p16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5l_1, q8_1));
  6775. const __m256i s16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5h_0, q8_0));
  6776. const __m256i s16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5h_1, q8_1));
  6777. const __m256i dot = _mm256_sub_epi32(_mm256_add_epi32(p16_0, p16_1), _mm256_add_epi32(s16_0, s16_1));
  6778. acc = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(dot), acc);
  6779. }
  6780. *s = hsum_float_8(acc);
  6781. #elif defined __AVX__
  6782. const __m128i m4 = _mm_set1_epi8(0xF);
  6783. const __m128i mone = _mm_set1_epi8(1);
  6784. __m256 acc = _mm256_setzero_ps();
  6785. for (int i = 0; i < nb; ++i) {
  6786. const uint8_t * restrict q5 = x[i].qs;
  6787. const int8_t * restrict q8 = y[i].qs;
  6788. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6789. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
  6790. const __m128i scale_0 = _mm_set1_epi16(x[i].scales[0]);
  6791. const __m128i scale_1 = _mm_set1_epi16(x[i].scales[1]);
  6792. const __m128i scale_2 = _mm_set1_epi16(x[i].scales[2]);
  6793. const __m128i scale_3 = _mm_set1_epi16(x[i].scales[3]);
  6794. int64_t aux64;
  6795. memcpy(&aux64, x[i].qh, 8);
  6796. const __m128i haux128_0 = _mm_set_epi64x(aux64 >> 1, aux64);
  6797. const __m128i haux128_1 = _mm_srli_epi16(haux128_0, 2);
  6798. const __m128i q5h_0 = _mm_slli_epi16(_mm_andnot_si128(haux128_0, mone), 4);
  6799. const __m128i q5h_1 = _mm_slli_epi16(_mm_andnot_si128(haux128_1, mone), 4);
  6800. const __m128i q5h_2 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_0, 4), mone), 4);
  6801. const __m128i q5h_3 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_1, 4), mone), 4);
  6802. const __m128i q5l_0 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 0), m4);
  6803. const __m128i q5l_1 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 1), m4);
  6804. const __m128i q5l_2 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 0), 4), m4);
  6805. const __m128i q5l_3 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 1), 4), m4);
  6806. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6807. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6808. const __m128i p16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5l_0, _mm256_extractf128_si256(q8_0, 0)));
  6809. const __m128i p16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5l_1, _mm256_extractf128_si256(q8_0, 1)));
  6810. const __m128i p16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5l_2, _mm256_extractf128_si256(q8_1, 0)));
  6811. const __m128i p16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5l_3, _mm256_extractf128_si256(q8_1, 1)));
  6812. const __m128i s16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5h_0, _mm256_extractf128_si256(q8_0, 0)));
  6813. const __m128i s16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5h_1, _mm256_extractf128_si256(q8_0, 1)));
  6814. const __m128i s16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5h_2, _mm256_extractf128_si256(q8_1, 0)));
  6815. const __m128i s16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5h_3, _mm256_extractf128_si256(q8_1, 1)));
  6816. const __m128i dot_0 = _mm_sub_epi32(_mm_add_epi32(p16_0, p16_2), _mm_add_epi32(s16_0, s16_2));
  6817. const __m128i dot_1 = _mm_sub_epi32(_mm_add_epi32(p16_1, p16_3), _mm_add_epi32(s16_1, s16_3));
  6818. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(dot_1, dot_0))), acc);
  6819. }
  6820. *s = hsum_float_8(acc);
  6821. #elif defined __riscv_v_intrinsic
  6822. float sumf = 0;
  6823. for (int i = 0; i < nb; ++i) {
  6824. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6825. const int8_t * sc = x[i].scales;
  6826. const uint8_t * restrict q5 = x[i].qs;
  6827. const uint8_t * restrict qh = x[i].qh;
  6828. const int8_t * restrict q8 = y[i].qs;
  6829. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  6830. // load qh
  6831. vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(qh, 8);
  6832. vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
  6833. size_t vl = 16;
  6834. // combine both qh_1 and qh_2
  6835. vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
  6836. vuint8mf2_t qh_h0 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
  6837. vuint8mf2_t qh_h1 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), vl), 16, vl);
  6838. vuint8mf2_t qh_h2 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(qh_x, vl), 16, vl);
  6839. vuint8mf2_t qh_h3 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
  6840. vint8mf2_t qh_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h0);
  6841. vint8mf2_t qh_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h1);
  6842. vint8mf2_t qh_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h2);
  6843. vint8mf2_t qh_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h3);
  6844. // load q5
  6845. vuint8mf2_t q5_x1 = __riscv_vle8_v_u8mf2(q5, vl);
  6846. vuint8mf2_t q5_x2 = __riscv_vle8_v_u8mf2(q5+16, vl);
  6847. vint8mf2_t q5s_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x1, 0xF, vl));
  6848. vint8mf2_t q5s_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x2, 0xF, vl));
  6849. vint8mf2_t q5s_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x1, 0x4, vl));
  6850. vint8mf2_t q5s_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x2, 0x4, vl));
  6851. vint8mf2_t q5_0 = __riscv_vsub_vv_i8mf2(q5s_0, qh_0, vl);
  6852. vint8mf2_t q5_1 = __riscv_vsub_vv_i8mf2(q5s_1, qh_1, vl);
  6853. vint8mf2_t q5_2 = __riscv_vsub_vv_i8mf2(q5s_2, qh_2, vl);
  6854. vint8mf2_t q5_3 = __riscv_vsub_vv_i8mf2(q5s_3, qh_3, vl);
  6855. // load Q8 and multiply it with Q5
  6856. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q5_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  6857. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q5_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  6858. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q5_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  6859. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q5_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  6860. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  6861. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  6862. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  6863. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  6864. int32_t sumi1 = sc[0] * __riscv_vmv_x_s_i32m1_i32(vs_0);
  6865. int32_t sumi2 = sc[1] * __riscv_vmv_x_s_i32m1_i32(vs_1);
  6866. int32_t sumi3 = sc[2] * __riscv_vmv_x_s_i32m1_i32(vs_2);
  6867. int32_t sumi4 = sc[3] * __riscv_vmv_x_s_i32m1_i32(vs_3);
  6868. sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
  6869. }
  6870. *s = sumf;
  6871. #else
  6872. int8_t aux8[QK_K];
  6873. int16_t aux16[16];
  6874. float sums [8];
  6875. memset(sums, 0, 8*sizeof(float));
  6876. float sumf = 0;
  6877. for (int i = 0; i < nb; ++i) {
  6878. const uint8_t * restrict q4 = x[i].qs;
  6879. const uint8_t * restrict hm = x[i].qh;
  6880. const int8_t * restrict q8 = y[i].qs;
  6881. int8_t * restrict a = aux8;
  6882. for (int l = 0; l < 32; ++l) {
  6883. a[l+ 0] = q4[l] & 0xF;
  6884. a[l+32] = q4[l] >> 4;
  6885. }
  6886. for (int is = 0; is < 8; ++is) {
  6887. uint8_t m = 1 << is;
  6888. for (int l = 0; l < 8; ++l) a[8*is + l] -= (hm[l] & m ? 0 : 16);
  6889. }
  6890. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6891. const int8_t * restrict sc = x[i].scales;
  6892. for (int j = 0; j < QK_K/16; ++j) {
  6893. const float dl = d * sc[j];
  6894. for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
  6895. for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[8+l]);
  6896. q8 += 16; a += 16;
  6897. }
  6898. }
  6899. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6900. *s = sumf;
  6901. #endif
  6902. }
  6903. #endif
  6904. #if QK_K == 256
  6905. void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6906. assert(n % QK_K == 0);
  6907. assert(nrc == 1);
  6908. UNUSED(nrc);
  6909. UNUSED(bx);
  6910. UNUSED(by);
  6911. UNUSED(bs);
  6912. const block_q6_K * restrict x = vx;
  6913. const block_q8_K * restrict y = vy;
  6914. const int nb = n / QK_K;
  6915. #ifdef __ARM_NEON
  6916. float sum = 0;
  6917. const uint8x16_t m4b = vdupq_n_u8(0xF);
  6918. const int32x4_t vzero = vdupq_n_s32(0);
  6919. //const int8x16_t m32s = vdupq_n_s8(32);
  6920. const uint8x16_t mone = vdupq_n_u8(3);
  6921. ggml_int8x16x4_t q6bytes;
  6922. ggml_uint8x16x4_t q6h;
  6923. for (int i = 0; i < nb; ++i) {
  6924. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  6925. const uint8_t * restrict q6 = x[i].ql;
  6926. const uint8_t * restrict qh = x[i].qh;
  6927. const int8_t * restrict q8 = y[i].qs;
  6928. const int8_t * restrict scale = x[i].scales;
  6929. const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
  6930. const int8x16_t scales = vld1q_s8(scale);
  6931. const ggml_int16x8x2_t q6scales = {{vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}};
  6932. const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])),
  6933. vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))),
  6934. vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])),
  6935. vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1]))));
  6936. int32_t isum_mins = vaddvq_s32(prod);
  6937. int32_t isum = 0;
  6938. for (int j = 0; j < QK_K/128; ++j) {
  6939. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32;
  6940. ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64;
  6941. ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  6942. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
  6943. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
  6944. uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2);
  6945. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6946. shifted = vshrq_n_u8(qhbits.val[1], 2);
  6947. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6948. //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
  6949. //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
  6950. //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s);
  6951. //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s);
  6952. q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0]));
  6953. q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1]));
  6954. q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2]));
  6955. q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3]));
  6956. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  6957. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  6958. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  6959. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  6960. scale += 4;
  6961. q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  6962. shifted = vshrq_n_u8(qhbits.val[0], 4);
  6963. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6964. shifted = vshrq_n_u8(qhbits.val[1], 4);
  6965. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6966. shifted = vshrq_n_u8(qhbits.val[0], 6);
  6967. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6968. shifted = vshrq_n_u8(qhbits.val[1], 6);
  6969. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6970. //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s);
  6971. //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s);
  6972. //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s);
  6973. //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s);
  6974. q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0]));
  6975. q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1]));
  6976. q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2]));
  6977. q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3]));
  6978. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  6979. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  6980. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  6981. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  6982. scale += 4;
  6983. }
  6984. //sum += isum * d_all * y[i].d;
  6985. sum += d_all * y[i].d * (isum - 32 * isum_mins);
  6986. }
  6987. *s = sum;
  6988. #elif defined __AVX2__
  6989. const __m256i m4 = _mm256_set1_epi8(0xF);
  6990. const __m256i m2 = _mm256_set1_epi8(3);
  6991. const __m256i m32s = _mm256_set1_epi8(32);
  6992. __m256 acc = _mm256_setzero_ps();
  6993. for (int i = 0; i < nb; ++i) {
  6994. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6995. const uint8_t * restrict q4 = x[i].ql;
  6996. const uint8_t * restrict qh = x[i].qh;
  6997. const int8_t * restrict q8 = y[i].qs;
  6998. const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  6999. __m256i sumi = _mm256_setzero_si256();
  7000. int is = 0;
  7001. for (int j = 0; j < QK_K/128; ++j) {
  7002. const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0));
  7003. const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1));
  7004. const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2));
  7005. const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3));
  7006. is += 4;
  7007. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  7008. const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  7009. const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32;
  7010. const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4);
  7011. const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4);
  7012. const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4);
  7013. const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4);
  7014. const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
  7015. const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1);
  7016. const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2);
  7017. const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3);
  7018. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  7019. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  7020. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  7021. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  7022. __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
  7023. __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
  7024. __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2);
  7025. __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3);
  7026. __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
  7027. __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
  7028. __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2);
  7029. __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3);
  7030. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  7031. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  7032. p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
  7033. p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
  7034. p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
  7035. p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
  7036. p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2);
  7037. p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3);
  7038. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  7039. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3));
  7040. }
  7041. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  7042. }
  7043. *s = hsum_float_8(acc);
  7044. #elif defined __AVX__
  7045. const __m128i m4 = _mm_set1_epi8(0xF);
  7046. const __m128i m3 = _mm_set1_epi8(3);
  7047. const __m128i m32s = _mm_set1_epi8(32);
  7048. const __m128i m2 = _mm_set1_epi8(2);
  7049. __m256 acc = _mm256_setzero_ps();
  7050. for (int i = 0; i < nb; ++i) {
  7051. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  7052. const uint8_t * restrict q4 = x[i].ql;
  7053. const uint8_t * restrict qh = x[i].qh;
  7054. const int8_t * restrict q8 = y[i].qs;
  7055. const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  7056. __m128i sumi_0 = _mm_setzero_si128();
  7057. __m128i sumi_1 = _mm_setzero_si128();
  7058. __m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  7059. for (int j = 0; j < QK_K/128; ++j) {
  7060. const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
  7061. const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
  7062. const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4);
  7063. const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4);
  7064. const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4);
  7065. const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4);
  7066. const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4);
  7067. const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4);
  7068. const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4);
  7069. const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4);
  7070. const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  7071. const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  7072. const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  7073. const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  7074. const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0);
  7075. const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1);
  7076. const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2);
  7077. const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3);
  7078. const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4);
  7079. const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5);
  7080. const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6);
  7081. const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7);
  7082. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7083. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7084. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7085. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7086. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7087. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7088. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7089. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7090. __m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0);
  7091. __m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1);
  7092. __m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2);
  7093. __m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3);
  7094. __m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4);
  7095. __m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5);
  7096. __m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6);
  7097. __m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7);
  7098. __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0);
  7099. __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1);
  7100. __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2);
  7101. __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3);
  7102. __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4);
  7103. __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5);
  7104. __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6);
  7105. __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7);
  7106. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  7107. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  7108. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  7109. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  7110. p16_4 = _mm_sub_epi16(p16_4, q8s_4);
  7111. p16_5 = _mm_sub_epi16(p16_5, q8s_5);
  7112. p16_6 = _mm_sub_epi16(p16_6, q8s_6);
  7113. p16_7 = _mm_sub_epi16(p16_7, q8s_7);
  7114. const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
  7115. shuffle = _mm_add_epi8(shuffle, m2);
  7116. const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
  7117. shuffle = _mm_add_epi8(shuffle, m2);
  7118. const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle);
  7119. shuffle = _mm_add_epi8(shuffle, m2);
  7120. const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle);
  7121. shuffle = _mm_add_epi8(shuffle, m2);
  7122. p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
  7123. p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
  7124. p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
  7125. p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
  7126. p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4);
  7127. p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5);
  7128. p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6);
  7129. p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7);
  7130. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  7131. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  7132. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6));
  7133. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7));
  7134. }
  7135. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  7136. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
  7137. }
  7138. *s = hsum_float_8(acc);
  7139. #elif defined __riscv_v_intrinsic
  7140. float sumf = 0;
  7141. for (int i = 0; i < nb; ++i) {
  7142. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7143. const uint8_t * restrict q6 = x[i].ql;
  7144. const uint8_t * restrict qh = x[i].qh;
  7145. const int8_t * restrict q8 = y[i].qs;
  7146. const int8_t * restrict scale = x[i].scales;
  7147. size_t vl;
  7148. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  7149. int sum_t = 0;
  7150. int is = 0;
  7151. for (int j = 0; j < QK_K/128; ++j) {
  7152. vl = 32;
  7153. // load qh
  7154. vuint8m1_t qh_x = __riscv_vle8_v_u8m1(qh, vl);
  7155. // load Q6
  7156. vuint8m1_t q6_0 = __riscv_vle8_v_u8m1(q6, vl);
  7157. vuint8m1_t q6_1 = __riscv_vle8_v_u8m1(q6+32, vl);
  7158. vuint8m1_t q6a_0 = __riscv_vand_vx_u8m1(q6_0, 0x0F, vl);
  7159. vuint8m1_t q6a_1 = __riscv_vand_vx_u8m1(q6_1, 0x0F, vl);
  7160. vuint8m1_t q6s_0 = __riscv_vsrl_vx_u8m1(q6_0, 0x04, vl);
  7161. vuint8m1_t q6s_1 = __riscv_vsrl_vx_u8m1(q6_1, 0x04, vl);
  7162. vuint8m1_t qh_0 = __riscv_vand_vx_u8m1(qh_x, 0x03, vl);
  7163. vuint8m1_t qh_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x2, vl), 0x03 , vl);
  7164. vuint8m1_t qh_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x4, vl), 0x03 , vl);
  7165. vuint8m1_t qh_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x6, vl), 0x03 , vl);
  7166. vuint8m1_t qhi_0 = __riscv_vor_vv_u8m1(q6a_0, __riscv_vsll_vx_u8m1(qh_0, 0x04, vl), vl);
  7167. vuint8m1_t qhi_1 = __riscv_vor_vv_u8m1(q6a_1, __riscv_vsll_vx_u8m1(qh_1, 0x04, vl), vl);
  7168. vuint8m1_t qhi_2 = __riscv_vor_vv_u8m1(q6s_0, __riscv_vsll_vx_u8m1(qh_2, 0x04, vl), vl);
  7169. vuint8m1_t qhi_3 = __riscv_vor_vv_u8m1(q6s_1, __riscv_vsll_vx_u8m1(qh_3, 0x04, vl), vl);
  7170. vint8m1_t a_0 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_0), 32, vl);
  7171. vint8m1_t a_1 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_1), 32, vl);
  7172. vint8m1_t a_2 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_2), 32, vl);
  7173. vint8m1_t a_3 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_3), 32, vl);
  7174. // load Q8 and take product
  7175. vint16m2_t va_q_0 = __riscv_vwmul_vv_i16m2(a_0, __riscv_vle8_v_i8m1(q8, vl), vl);
  7176. vint16m2_t va_q_1 = __riscv_vwmul_vv_i16m2(a_1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  7177. vint16m2_t va_q_2 = __riscv_vwmul_vv_i16m2(a_2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
  7178. vint16m2_t va_q_3 = __riscv_vwmul_vv_i16m2(a_3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
  7179. vl = 16;
  7180. vint32m2_t vaux_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 0), scale[is+0], vl);
  7181. vint32m2_t vaux_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 1), scale[is+1], vl);
  7182. vint32m2_t vaux_2 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 0), scale[is+2], vl);
  7183. vint32m2_t vaux_3 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 1), scale[is+3], vl);
  7184. vint32m2_t vaux_4 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 0), scale[is+4], vl);
  7185. vint32m2_t vaux_5 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 1), scale[is+5], vl);
  7186. vint32m2_t vaux_6 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 0), scale[is+6], vl);
  7187. vint32m2_t vaux_7 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 1), scale[is+7], vl);
  7188. vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_0, vaux_1, vl), vzero, vl);
  7189. vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_2, vaux_3, vl), isum0, vl);
  7190. vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_4, vaux_5, vl), isum1, vl);
  7191. vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_6, vaux_7, vl), isum2, vl);
  7192. sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
  7193. q6 += 64; qh += 32; q8 += 128; is=8;
  7194. }
  7195. sumf += d * sum_t;
  7196. }
  7197. *s = sumf;
  7198. #else
  7199. int8_t aux8[QK_K];
  7200. int16_t aux16[8];
  7201. float sums [8];
  7202. int32_t aux32[8];
  7203. memset(sums, 0, 8*sizeof(float));
  7204. float sumf = 0;
  7205. for (int i = 0; i < nb; ++i) {
  7206. const uint8_t * restrict q4 = x[i].ql;
  7207. const uint8_t * restrict qh = x[i].qh;
  7208. const int8_t * restrict q8 = y[i].qs;
  7209. memset(aux32, 0, 8*sizeof(int32_t));
  7210. int8_t * restrict a = aux8;
  7211. for (int j = 0; j < QK_K; j += 128) {
  7212. for (int l = 0; l < 32; ++l) {
  7213. a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  7214. a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  7215. a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  7216. a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  7217. }
  7218. a += 128;
  7219. q4 += 64;
  7220. qh += 32;
  7221. }
  7222. a = aux8;
  7223. int is = 0;
  7224. for (int j = 0; j < QK_K/16; ++j) {
  7225. int scale = x[i].scales[is++];
  7226. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  7227. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  7228. q8 += 8; a += 8;
  7229. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  7230. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  7231. q8 += 8; a += 8;
  7232. }
  7233. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7234. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  7235. }
  7236. for (int l = 0; l < 8; ++l) sumf += sums[l];
  7237. *s = sumf;
  7238. #endif
  7239. }
  7240. #else
  7241. void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7242. assert(n % QK_K == 0);
  7243. assert(nrc == 1);
  7244. UNUSED(nrc);
  7245. UNUSED(bx);
  7246. UNUSED(by);
  7247. UNUSED(bs);
  7248. const block_q6_K * restrict x = vx;
  7249. const block_q8_K * restrict y = vy;
  7250. const int nb = n / QK_K;
  7251. #ifdef __ARM_NEON
  7252. float sum = 0;
  7253. const uint8x16_t m4b = vdupq_n_u8(0xF);
  7254. const int8x16_t m32s = vdupq_n_s8(32);
  7255. const int32x4_t vzero = vdupq_n_s32(0);
  7256. const uint8x16_t mone = vdupq_n_u8(3);
  7257. ggml_int8x16x4_t q6bytes;
  7258. ggml_uint8x16x4_t q6h;
  7259. for (int i = 0; i < nb; ++i) {
  7260. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  7261. const uint8_t * restrict q6 = x[i].ql;
  7262. const uint8_t * restrict qh = x[i].qh;
  7263. const int8_t * restrict q8 = y[i].qs;
  7264. const int8_t * restrict scale = x[i].scales;
  7265. int32_t isum = 0;
  7266. uint8x16_t qhbits = vld1q_u8(qh);
  7267. ggml_uint8x16x2_t q6bits = ggml_vld1q_u8_x2(q6);
  7268. ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  7269. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits), 4);
  7270. uint8x16_t shifted = vshrq_n_u8(qhbits, 2);
  7271. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  7272. shifted = vshrq_n_u8(qhbits, 4);
  7273. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  7274. shifted = vshrq_n_u8(qhbits, 6);
  7275. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  7276. q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
  7277. q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
  7278. q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[2])), m32s);
  7279. q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[3])), m32s);
  7280. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  7281. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  7282. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  7283. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  7284. sum += isum * d_all * y[i].d;
  7285. }
  7286. *s = sum;
  7287. #elif defined __AVX2__
  7288. const __m256i m4 = _mm256_set1_epi8(0xF);
  7289. const __m256i m2 = _mm256_set1_epi8(3);
  7290. const __m256i m32s = _mm256_set1_epi8(32);
  7291. __m256 acc = _mm256_setzero_ps();
  7292. for (int i = 0; i < nb; ++i) {
  7293. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  7294. const uint8_t * restrict q4 = x[i].ql;
  7295. const uint8_t * restrict qh = x[i].qh;
  7296. const int8_t * restrict q8 = y[i].qs;
  7297. const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
  7298. const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
  7299. const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
  7300. const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
  7301. __m256i sumi = _mm256_setzero_si256();
  7302. const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
  7303. const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
  7304. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
  7305. const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
  7306. const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4);
  7307. const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4);
  7308. const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
  7309. const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_1);
  7310. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  7311. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  7312. __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
  7313. __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
  7314. __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
  7315. __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
  7316. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  7317. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  7318. p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
  7319. p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
  7320. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  7321. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  7322. }
  7323. *s = hsum_float_8(acc);
  7324. #elif defined __AVX__
  7325. const __m128i m4 = _mm_set1_epi8(0xF);
  7326. const __m128i m2 = _mm_set1_epi8(3);
  7327. const __m128i m32s = _mm_set1_epi8(32);
  7328. __m256 acc = _mm256_setzero_ps();
  7329. for (int i = 0; i < nb; ++i) {
  7330. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  7331. const uint8_t * restrict q4 = x[i].ql;
  7332. const uint8_t * restrict qh = x[i].qh;
  7333. const int8_t * restrict q8 = y[i].qs;
  7334. const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
  7335. const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
  7336. const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
  7337. const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
  7338. __m128i sumi_0 = _mm_setzero_si128();
  7339. __m128i sumi_1 = _mm_setzero_si128();
  7340. const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
  7341. const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
  7342. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
  7343. const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
  7344. const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH, m2), 4);
  7345. const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 2), m2), 4);
  7346. const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 4), m2), 4);
  7347. const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 6), m2), 4);
  7348. const __m128i q4_0 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 0), m4), q4h_0);
  7349. const __m128i q4_1 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 1), m4), q4h_1);
  7350. const __m128i q4_2 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 0), 4), m4), q4h_2);
  7351. const __m128i q4_3 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 1), 4), m4), q4h_3);
  7352. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  7353. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  7354. __m128i q8s_0 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 0));
  7355. __m128i q8s_1 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 1));
  7356. __m128i q8s_2 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 0));
  7357. __m128i q8s_3 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 1));
  7358. __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
  7359. __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
  7360. __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
  7361. __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
  7362. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  7363. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  7364. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  7365. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  7366. p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
  7367. p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
  7368. p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
  7369. p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
  7370. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  7371. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  7372. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi_1, sumi_0))), acc);
  7373. }
  7374. *s = hsum_float_8(acc);
  7375. #elif defined __riscv_v_intrinsic
  7376. float sumf = 0;
  7377. for (int i = 0; i < nb; ++i) {
  7378. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  7379. const uint8_t * restrict q6 = x[i].ql;
  7380. const uint8_t * restrict qh = x[i].qh;
  7381. const int8_t * restrict q8 = y[i].qs;
  7382. const int8_t * restrict scale = x[i].scales;
  7383. int32_t isum = 0;
  7384. size_t vl = 16;
  7385. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  7386. // load Q6
  7387. vuint8mf2_t q6_0 = __riscv_vle8_v_u8mf2(q6, vl);
  7388. vuint8mf2_t q6_1 = __riscv_vle8_v_u8mf2(q6+16, vl);
  7389. // load qh
  7390. vuint8mf2_t qh_x = __riscv_vle8_v_u8mf2(qh, vl);
  7391. vuint8mf2_t qh0 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  7392. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  7393. vuint8mf2_t qh1 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  7394. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  7395. vuint8mf2_t qh2 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  7396. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  7397. vuint8mf2_t qh3 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  7398. vuint8mf2_t q6h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_0, 0xF, vl), qh0, vl);
  7399. vuint8mf2_t q6h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_1, 0xF, vl), qh1, vl);
  7400. vuint8mf2_t q6h_2 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_0, 0x4, vl), qh2, vl);
  7401. vuint8mf2_t q6h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_1, 0x4, vl), qh3, vl);
  7402. vint8mf2_t q6v_0 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_0), 32, vl);
  7403. vint8mf2_t q6v_1 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_1), 32, vl);
  7404. vint8mf2_t q6v_2 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_2), 32, vl);
  7405. vint8mf2_t q6v_3 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_3), 32, vl);
  7406. // load Q8 and take product
  7407. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q6v_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  7408. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q6v_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  7409. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q6v_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  7410. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q6v_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  7411. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  7412. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  7413. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  7414. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  7415. isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scale[0];
  7416. isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scale[1];
  7417. isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scale[2];
  7418. isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scale[3];
  7419. sumf += isum * d_all * y[i].d;
  7420. }
  7421. *s = sumf;
  7422. #else
  7423. int8_t aux8[QK_K];
  7424. int16_t aux16[8];
  7425. float sums [8];
  7426. int32_t aux32[8];
  7427. memset(sums, 0, 8*sizeof(float));
  7428. float sumf = 0;
  7429. for (int i = 0; i < nb; ++i) {
  7430. const uint8_t * restrict q4 = x[i].ql;
  7431. const uint8_t * restrict qh = x[i].qh;
  7432. const int8_t * restrict q8 = y[i].qs;
  7433. memset(aux32, 0, 8*sizeof(int32_t));
  7434. int8_t * restrict a = aux8;
  7435. for (int l = 0; l < 16; ++l) {
  7436. a[l+ 0] = (int8_t)((q4[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  7437. a[l+16] = (int8_t)((q4[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  7438. a[l+32] = (int8_t)((q4[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  7439. a[l+48] = (int8_t)((q4[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  7440. }
  7441. int is = 0;
  7442. for (int j = 0; j < QK_K/16; ++j) {
  7443. int scale = x[i].scales[is++];
  7444. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  7445. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  7446. q8 += 8; a += 8;
  7447. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  7448. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  7449. q8 += 8; a += 8;
  7450. }
  7451. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7452. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  7453. }
  7454. for (int l = 0; l < 8; ++l) sumf += sums[l];
  7455. *s = sumf;
  7456. #endif
  7457. }
  7458. #endif
  7459. #if defined (__AVX2__) || defined (__ARM_NEON)
  7460. static const int8_t keven_signs_q2xs[1024] = {
  7461. 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1,
  7462. 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1,
  7463. 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1,
  7464. 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1,
  7465. 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1,
  7466. 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1,
  7467. 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1,
  7468. 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1,
  7469. 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1,
  7470. 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1,
  7471. 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1,
  7472. 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1,
  7473. 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1,
  7474. 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1,
  7475. 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1,
  7476. 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1,
  7477. 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1,
  7478. 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1,
  7479. 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1,
  7480. 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1,
  7481. 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1,
  7482. 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1,
  7483. 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1,
  7484. 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1,
  7485. 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1,
  7486. 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
  7487. 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,
  7488. 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1,
  7489. 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1,
  7490. 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1,
  7491. 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,
  7492. 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1,
  7493. };
  7494. #endif
  7495. void ggml_vec_dot_iq2_xxs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7496. assert(n % QK_K == 0);
  7497. assert(nrc == 1);
  7498. UNUSED(nrc);
  7499. UNUSED(bx);
  7500. UNUSED(by);
  7501. UNUSED(bs);
  7502. const block_iq2_xxs * restrict x = vx;
  7503. const block_q8_K * restrict y = vy;
  7504. const int nb = n / QK_K;
  7505. #if defined(__ARM_NEON)
  7506. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  7507. uint32_t aux32[4];
  7508. const uint8_t * aux8 = (const uint8_t *)aux32;
  7509. ggml_int8x16x4_t q2u;
  7510. ggml_int8x16x4_t q2s;
  7511. ggml_int8x16x4_t q8b;
  7512. float sumf = 0;
  7513. for (int i = 0; i < nb; ++i) {
  7514. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7515. const uint16_t * restrict q2 = x[i].qs;
  7516. const int8_t * restrict q8 = y[i].qs;
  7517. float sumf1 = 0, sumf2 = 0;
  7518. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7519. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  7520. memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8;
  7521. q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 0])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 1])));
  7522. q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 2])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 3])));
  7523. q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 8])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 9])));
  7524. q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[10])), vld1_s8((const void *)(iq2xxs_grid + aux8[11])));
  7525. q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127))));
  7526. q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127))));
  7527. q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 7) & 127))));
  7528. q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 21) & 127))));
  7529. q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]);
  7530. q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]);
  7531. q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]);
  7532. q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]);
  7533. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]);
  7534. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]);
  7535. sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28));
  7536. sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28));
  7537. }
  7538. sumf += d*(sumf1 + sumf2);
  7539. }
  7540. *s = 0.25f * sumf;
  7541. #elif defined(__AVX2__)
  7542. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  7543. uint32_t aux32[4];
  7544. const uint8_t * aux8 = (const uint8_t *)aux32;
  7545. __m256 accumf = _mm256_setzero_ps();
  7546. for (int i = 0; i < nb; ++i) {
  7547. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7548. const uint16_t * restrict q2 = x[i].qs;
  7549. const int8_t * restrict q8 = y[i].qs;
  7550. __m256i sumi1 = _mm256_setzero_si256();
  7551. __m256i sumi2 = _mm256_setzero_si256();
  7552. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7553. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7554. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7555. memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8;
  7556. const __m256i q2_1 = _mm256_set_epi64x(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]);
  7557. const __m256i q2_2 = _mm256_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]);
  7558. const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127],
  7559. signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]);
  7560. const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127],
  7561. signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]);
  7562. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1);
  7563. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2);
  7564. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  7565. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  7566. const uint16_t ls1 = aux32[1] >> 28;
  7567. const uint16_t ls2 = aux32[3] >> 28;
  7568. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
  7569. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
  7570. sumi1 = _mm256_add_epi32(sumi1, p1);
  7571. sumi2 = _mm256_add_epi32(sumi2, p2);
  7572. }
  7573. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  7574. }
  7575. *s = 0.125f * hsum_float_8(accumf);
  7576. #else
  7577. uint32_t aux32[2];
  7578. const uint8_t * aux8 = (const uint8_t *)aux32;
  7579. float sumf = 0.f;
  7580. for (int i = 0; i < nb; ++i) {
  7581. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7582. const uint16_t * restrict q2 = x[i].qs;
  7583. const int8_t * restrict q8 = y[i].qs;
  7584. int32_t bsum = 0;
  7585. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  7586. memcpy(aux32, q2, 2*sizeof(uint32_t));
  7587. q2 += 4;
  7588. const uint32_t ls = 2*(aux32[1] >> 28) + 1;
  7589. int32_t sumi = 0;
  7590. for (int l = 0; l < 4; ++l) {
  7591. const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
  7592. const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
  7593. for (int j = 0; j < 8; ++j) {
  7594. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  7595. }
  7596. q8 += 8;
  7597. }
  7598. bsum += sumi * ls;
  7599. }
  7600. sumf += d * bsum;
  7601. }
  7602. *s = 0.125f * sumf;
  7603. #endif
  7604. }
  7605. void ggml_vec_dot_iq2_xs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7606. assert(n % QK_K == 0);
  7607. assert(nrc == 1);
  7608. UNUSED(nrc);
  7609. UNUSED(bx);
  7610. UNUSED(by);
  7611. UNUSED(bs);
  7612. const block_iq2_xs * restrict x = vx;
  7613. const block_q8_K * restrict y = vy;
  7614. const int nb = n / QK_K;
  7615. #if defined(__ARM_NEON)
  7616. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  7617. ggml_int8x16x4_t q2u;
  7618. ggml_int8x16x4_t q2s;
  7619. ggml_int8x16x4_t q8b;
  7620. int32x4x4_t scales32;
  7621. float sumf = 0;
  7622. for (int i = 0; i < nb; ++i) {
  7623. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7624. const uint16_t * restrict q2 = x[i].qs;
  7625. const int8_t * restrict q8 = y[i].qs;
  7626. const uint8x8_t scales8 = vld1_u8(x[i].scales);
  7627. const uint8x8_t scales_l = vand_u8(scales8, vdup_n_u8(0xf));
  7628. const uint8x8_t scales_h = vshr_n_u8(scales8, 4);
  7629. uint8x16_t scales = vcombine_u8(vzip1_u8(scales_l, scales_h), vzip2_u8(scales_l, scales_h));
  7630. scales = vaddq_u8(vshlq_n_u8(scales, 1), vdupq_n_u8(1));
  7631. const uint16x8_t scales1 = vmovl_u8(vget_low_u8(scales));
  7632. const uint16x8_t scales2 = vmovl_u8(vget_high_u8(scales));
  7633. scales32.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales1)));
  7634. scales32.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales1)));
  7635. scales32.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales2)));
  7636. scales32.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales2)));
  7637. int32x4_t sumi = vdupq_n_s32(0);
  7638. for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
  7639. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  7640. q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[0] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[1] & 511))));
  7641. q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[2] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[3] & 511))));
  7642. q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[4] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[5] & 511))));
  7643. q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[6] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[7] & 511))));
  7644. q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[0] >> 9))), vld1_s8((const void *)(signs64 + (q2[1] >> 9))));
  7645. q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[2] >> 9))), vld1_s8((const void *)(signs64 + (q2[3] >> 9))));
  7646. q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[4] >> 9))), vld1_s8((const void *)(signs64 + (q2[5] >> 9))));
  7647. q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[6] >> 9))), vld1_s8((const void *)(signs64 + (q2[7] >> 9))));
  7648. q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]);
  7649. q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]);
  7650. q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]);
  7651. q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]);
  7652. const int32x4_t p1 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]);
  7653. const int32x4_t p2 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[1], q8b.val[1]);
  7654. const int32x4_t p3 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]);
  7655. const int32x4_t p4 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[3], q8b.val[3]);
  7656. const int32x4_t p = vpaddq_s32(vpaddq_s32(p1, p2), vpaddq_s32(p3, p4));
  7657. sumi = vmlaq_s32(sumi, p, scales32.val[ib64]);
  7658. q2 += 8;
  7659. }
  7660. sumf += d*vaddvq_s32(sumi);
  7661. }
  7662. *s = 0.125f * sumf;
  7663. #elif defined(__AVX2__)
  7664. const __m256i mone = _mm256_set1_epi8(1);
  7665. static const char block_sign_shuffle_mask_1[32] = {
  7666. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
  7667. 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
  7668. };
  7669. static const char block_sign_shuffle_mask_2[32] = {
  7670. 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
  7671. 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
  7672. };
  7673. static const uint8_t bit_selector_mask_bytes[32] = {
  7674. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7675. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7676. };
  7677. const __m256i bit_selector_mask = _mm256_loadu_si256((const __m256i*)bit_selector_mask_bytes);
  7678. const __m256i block_sign_shuffle_1 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_1);
  7679. const __m256i block_sign_shuffle_2 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_2);
  7680. #if QK_K == 64
  7681. static const uint8_t k_bit_helper[16] = {
  7682. 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
  7683. };
  7684. const __m128i bit_helper = _mm_loadu_si128((const __m128i*)k_bit_helper);
  7685. const __m128i m511 = _mm_set1_epi16(511);
  7686. typedef union {
  7687. __m128i vec_index;
  7688. uint16_t index[8];
  7689. } index_t;
  7690. index_t idx;
  7691. __m256 accumf = _mm256_setzero_ps();
  7692. for (int i = 0; i < nb; ++i) {
  7693. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7694. const __m128i q2_data = _mm_loadu_si128((const __m128i*)x[i].qs);
  7695. idx.vec_index = _mm_and_si128(q2_data, m511);
  7696. const __m128i partial_sign_bits = _mm_srli_epi16(q2_data, 9);
  7697. const __m128i partial_sign_bits_upper = _mm_srli_epi16(q2_data, 13);
  7698. const __m128i partial_sign_bits_for_counting = _mm_xor_si128(partial_sign_bits, partial_sign_bits_upper);
  7699. const __m128i odd_bits = _mm_shuffle_epi8(bit_helper, partial_sign_bits_for_counting);
  7700. const __m128i full_sign_bits = _mm_or_si128(partial_sign_bits, odd_bits);
  7701. const __m256i full_signs = _mm256_set_m128i(full_sign_bits, full_sign_bits);
  7702. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)y[i].qs);
  7703. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)(y[i].qs+32));
  7704. const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[idx.index[3]], iq2xs_grid[idx.index[2]],
  7705. iq2xs_grid[idx.index[1]], iq2xs_grid[idx.index[0]]);
  7706. const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[idx.index[7]], iq2xs_grid[idx.index[6]],
  7707. iq2xs_grid[idx.index[5]], iq2xs_grid[idx.index[4]]);
  7708. __m256i signs;
  7709. signs = _mm256_shuffle_epi8(full_signs, block_sign_shuffle_1);
  7710. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7711. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone));
  7712. signs = _mm256_shuffle_epi8(full_signs, block_sign_shuffle_2);
  7713. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7714. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone));
  7715. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  7716. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  7717. const __m256i sc1 = _mm256_set_m128i(_mm_set1_epi16(2*(x[i].scales[0] >> 4)+1), _mm_set1_epi16(2*(x[i].scales[0] & 0xf)+1));
  7718. const __m256i sc2 = _mm256_set_m128i(_mm_set1_epi16(2*(x[i].scales[1] >> 4)+1), _mm_set1_epi16(2*(x[i].scales[1] & 0xf)+1));
  7719. const __m256i sum = _mm256_add_epi32(_mm256_madd_epi16(sc1, dot1), _mm256_madd_epi16(sc2, dot2));
  7720. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sum), accumf);
  7721. }
  7722. *s = 0.125f * hsum_float_8(accumf);
  7723. #else
  7724. static const uint8_t k_bit_helper[32] = {
  7725. 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
  7726. 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
  7727. };
  7728. const __m256i bit_helper = _mm256_loadu_si256((const __m256i*)k_bit_helper);
  7729. const __m256i m511 = _mm256_set1_epi16(511);
  7730. const __m128i m4 = _mm_set1_epi8(0xf);
  7731. const __m128i m1 = _mm_set1_epi8(1);
  7732. uint64_t aux64;
  7733. // somewhat hacky, but gives a significant boost in performance
  7734. __m256i aux_gindex;
  7735. const uint16_t * gindex = (const uint16_t *)&aux_gindex;
  7736. __m256 accumf = _mm256_setzero_ps();
  7737. for (int i = 0; i < nb; ++i) {
  7738. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7739. const uint16_t * restrict q2 = x[i].qs;
  7740. const int8_t * restrict q8 = y[i].qs;
  7741. memcpy(&aux64, x[i].scales, 8);
  7742. __m128i stmp = _mm_set1_epi64x(aux64);
  7743. stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4));
  7744. const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1);
  7745. __m256i sumi1 = _mm256_setzero_si256();
  7746. __m256i sumi2 = _mm256_setzero_si256();
  7747. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) {
  7748. const __m256i q2_data = _mm256_loadu_si256((const __m256i*)q2); q2 += 16;
  7749. aux_gindex = _mm256_and_si256(q2_data, m511);
  7750. const __m256i partial_sign_bits = _mm256_srli_epi16(q2_data, 9);
  7751. const __m256i partial_sign_bits_upper = _mm256_srli_epi16(q2_data, 13);
  7752. const __m256i partial_sign_bits_for_counting = _mm256_xor_si256(partial_sign_bits, partial_sign_bits_upper);
  7753. const __m256i odd_bits = _mm256_shuffle_epi8(bit_helper, partial_sign_bits_for_counting);
  7754. const __m256i full_sign_bits = _mm256_or_si256(partial_sign_bits, odd_bits);
  7755. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7756. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7757. const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7758. const __m256i q8_4 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7759. const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]],
  7760. iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]);
  7761. const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]],
  7762. iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]);
  7763. const __m256i q2_3 = _mm256_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]],
  7764. iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]);
  7765. const __m256i q2_4 = _mm256_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]],
  7766. iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]);
  7767. const __m128i full_signs_l = _mm256_castsi256_si128(full_sign_bits);
  7768. const __m128i full_signs_h = _mm256_extractf128_si256(full_sign_bits, 1);
  7769. const __m256i full_signs_1 = _mm256_set_m128i(full_signs_l, full_signs_l);
  7770. const __m256i full_signs_2 = _mm256_set_m128i(full_signs_h, full_signs_h);
  7771. __m256i signs;
  7772. signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_1);
  7773. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7774. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone));
  7775. signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_2);
  7776. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7777. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone));
  7778. signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_1);
  7779. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7780. const __m256i q8s_3 = _mm256_sign_epi8(q8_3, _mm256_or_si256(signs, mone));
  7781. signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_2);
  7782. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7783. const __m256i q8s_4 = _mm256_sign_epi8(q8_4, _mm256_or_si256(signs, mone));
  7784. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  7785. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  7786. const __m256i dot3 = _mm256_maddubs_epi16(q2_3, q8s_3);
  7787. const __m256i dot4 = _mm256_maddubs_epi16(q2_4, q8s_4);
  7788. const __m256i sc1 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0)));
  7789. const __m256i sc2 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1)));
  7790. const __m256i sc3 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2)));
  7791. const __m256i sc4 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3)));
  7792. sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot1, sc1));
  7793. sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot2, sc2));
  7794. sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot3, sc3));
  7795. sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot4, sc4));
  7796. }
  7797. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  7798. }
  7799. *s = 0.125f * hsum_float_8(accumf);
  7800. #endif
  7801. #else
  7802. float sumf = 0.f;
  7803. for (int i = 0; i < nb; ++i) {
  7804. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7805. const uint16_t * restrict q2 = x[i].qs;
  7806. const uint8_t * restrict sc = x[i].scales;
  7807. const int8_t * restrict q8 = y[i].qs;
  7808. int32_t bsum = 0;
  7809. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  7810. const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1;
  7811. const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1;
  7812. int32_t sumi = 0;
  7813. for (int l = 0; l < 2; ++l) {
  7814. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
  7815. const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
  7816. for (int j = 0; j < 8; ++j) {
  7817. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  7818. }
  7819. q8 += 8;
  7820. }
  7821. bsum += sumi * ls1;
  7822. sumi = 0;
  7823. for (int l = 2; l < 4; ++l) {
  7824. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
  7825. const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
  7826. for (int j = 0; j < 8; ++j) {
  7827. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  7828. }
  7829. q8 += 8;
  7830. }
  7831. bsum += sumi * ls2;
  7832. q2 += 4;
  7833. }
  7834. sumf += d * bsum;
  7835. }
  7836. *s = 0.125f * sumf;
  7837. #endif
  7838. }
  7839. void ggml_vec_dot_iq2_s_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7840. assert(n % QK_K == 0);
  7841. assert(nrc == 1);
  7842. UNUSED(nrc);
  7843. UNUSED(bx);
  7844. UNUSED(by);
  7845. UNUSED(bs);
  7846. const block_iq2_s * restrict x = vx;
  7847. const block_q8_K * restrict y = vy;
  7848. const int nb = n / QK_K;
  7849. #if defined(__ARM_NEON)
  7850. static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
  7851. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
  7852. };
  7853. static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,};
  7854. const uint8x16x2_t mask1 = vld1q_u8_x2(k_mask1);
  7855. const uint8x16_t mask2 = vld1q_u8(k_mask2);
  7856. const uint8x16_t m1 = vdupq_n_u8(1);
  7857. const int32x4_t vzero = vdupq_n_s32(0);
  7858. uint8x16x2_t vs;
  7859. ggml_int8x16x4_t q2s;
  7860. ggml_int8x16x4_t q8b;
  7861. float sumf = 0;
  7862. for (int i = 0; i < nb; ++i) {
  7863. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7864. const uint8_t * restrict qs = x[i].qs;
  7865. const uint8_t * restrict qh = x[i].qh;
  7866. const uint16_t * restrict signs = (const uint16_t *)(x[i].qs + QK_K/8);
  7867. const int8_t * restrict q8 = y[i].qs;
  7868. int sumi1 = 0, sumi2 = 0;
  7869. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7870. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  7871. q2s.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[0] | ((qh[ib32+0] << 8) & 0x300)))),
  7872. vld1_s8((const int8_t *)(iq2s_grid + (qs[1] | ((qh[ib32+0] << 6) & 0x300)))));
  7873. q2s.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[2] | ((qh[ib32+0] << 4) & 0x300)))),
  7874. vld1_s8((const int8_t *)(iq2s_grid + (qs[3] | ((qh[ib32+0] << 2) & 0x300)))));
  7875. q2s.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[4] | ((qh[ib32+1] << 8) & 0x300)))),
  7876. vld1_s8((const int8_t *)(iq2s_grid + (qs[5] | ((qh[ib32+1] << 6) & 0x300)))));
  7877. q2s.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[6] | ((qh[ib32+1] << 4) & 0x300)))),
  7878. vld1_s8((const int8_t *)(iq2s_grid + (qs[7] | ((qh[ib32+1] << 2) & 0x300)))));
  7879. qs += 8;
  7880. vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | (signs[1] << 16)));
  7881. vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
  7882. vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
  7883. vs.val[0] = vceqq_u8(vs.val[0], mask2);
  7884. vs.val[1] = vceqq_u8(vs.val[1], mask2);
  7885. q2s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[0]);
  7886. q2s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[1]);
  7887. vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | (signs[3] << 16)));
  7888. vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
  7889. vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
  7890. vs.val[0] = vceqq_u8(vs.val[0], mask2);
  7891. vs.val[1] = vceqq_u8(vs.val[1], mask2);
  7892. signs += 4;
  7893. q2s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[2]);
  7894. q2s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[3]);
  7895. const int32x4_t p1 = ggml_vdotq_s32(vzero, q2s.val[0], q8b.val[0]);
  7896. const int32x4_t p2 = ggml_vdotq_s32(vzero, q2s.val[1], q8b.val[1]);
  7897. const int32x4_t p3 = ggml_vdotq_s32(vzero, q2s.val[2], q8b.val[2]);
  7898. const int32x4_t p4 = ggml_vdotq_s32(vzero, q2s.val[3], q8b.val[3]);
  7899. sumi1 += vaddvq_s32(p1) * (1 + 2*(x[i].scales[ib32+0] & 0xf));
  7900. sumi2 += vaddvq_s32(p2) * (1 + 2*(x[i].scales[ib32+0] >> 4));
  7901. sumi1 += vaddvq_s32(p3) * (1 + 2*(x[i].scales[ib32+1] & 0xf));
  7902. sumi2 += vaddvq_s32(p4) * (1 + 2*(x[i].scales[ib32+1] >> 4));
  7903. }
  7904. sumf += d*(sumi1 + sumi2);
  7905. }
  7906. *s = 0.125f * sumf;
  7907. #elif defined(__AVX2__)
  7908. static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
  7909. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
  7910. };
  7911. static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7912. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7913. };
  7914. const __m128i m4 = _mm_set1_epi8(0xf);
  7915. const __m128i m1 = _mm_set1_epi8(1);
  7916. const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1);
  7917. const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2);
  7918. uint64_t aux64;
  7919. __m256 accumf = _mm256_setzero_ps();
  7920. for (int i = 0; i < nb; ++i) {
  7921. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7922. const uint8_t * restrict qs = x[i].qs;
  7923. const uint8_t * restrict qh = x[i].qh;
  7924. const uint16_t * restrict signs = (const uint16_t *)(x[i].qs + QK_K/8);
  7925. const int8_t * restrict q8 = y[i].qs;
  7926. memcpy(&aux64, x[i].scales, 8);
  7927. const __m128i scales8 = _mm_add_epi8(_mm_slli_epi16(_mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), m4), 1), m1);
  7928. const __m256i scales16 = _mm256_cvtepi8_epi16(scales8); // 0 2 4 6 8 10 12 14 1 3 5 7 9 11 13 15
  7929. __m256i sumi1 = _mm256_setzero_si256();
  7930. __m256i sumi2 = _mm256_setzero_si256();
  7931. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7932. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7933. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7934. const __m256i q2_1 = _mm256_set_epi64x(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)],
  7935. iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)],
  7936. iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)],
  7937. iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]);
  7938. const __m256i q2_2 = _mm256_set_epi64x(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)],
  7939. iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)],
  7940. iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)],
  7941. iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]);
  7942. qs += 8;
  7943. __m256i aux256 = _mm256_set1_epi32(signs[0] | (signs[1] << 16));
  7944. aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2);
  7945. const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2);
  7946. const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1);
  7947. aux256 = _mm256_set1_epi32(signs[2] | (signs[3] << 16));
  7948. aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2);
  7949. const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2);
  7950. const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2);
  7951. signs += 4;
  7952. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); // blocks 2*ib32+0, 2*ib32+1
  7953. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); // blocks 2*ib32+2, 2*ib32+3
  7954. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+0)));
  7955. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+1)));
  7956. sumi1 = _mm256_add_epi32(sumi1, p1);
  7957. sumi2 = _mm256_add_epi32(sumi2, p2);
  7958. }
  7959. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  7960. }
  7961. *s = 0.125f * hsum_float_8(accumf);
  7962. #else
  7963. float sumf = 0;
  7964. for (int i = 0; i < nb; i++) {
  7965. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7966. const int8_t * q8 = y[i].qs;
  7967. const uint8_t * qs = x[i].qs;
  7968. const uint8_t * qh = x[i].qh;
  7969. const uint8_t * signs = qs + QK_K/8;
  7970. int bsum = 0;
  7971. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  7972. int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf);
  7973. int ls2 = 1 + 2*(x[i].scales[ib32] >> 4);
  7974. int sumi1 = 0, sumi2 = 0;
  7975. for (int l = 0; l < 2; ++l) {
  7976. const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
  7977. for (int j = 0; j < 8; ++j) {
  7978. sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
  7979. }
  7980. q8 += 8;
  7981. }
  7982. for (int l = 2; l < 4; ++l) {
  7983. const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
  7984. for (int j = 0; j < 8; ++j) {
  7985. sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
  7986. }
  7987. q8 += 8;
  7988. }
  7989. bsum += ls1 * sumi1 + ls2 * sumi2;
  7990. qs += 4;
  7991. signs += 4;
  7992. }
  7993. sumf += d * bsum;
  7994. }
  7995. *s = 0.125f * sumf;
  7996. #endif
  7997. }
  7998. void ggml_vec_dot_iq3_xxs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7999. assert(n % QK_K == 0);
  8000. assert(nrc == 1);
  8001. UNUSED(nrc);
  8002. UNUSED(bx);
  8003. UNUSED(by);
  8004. UNUSED(bs);
  8005. const block_iq3_xxs * restrict x = vx;
  8006. const block_q8_K * restrict y = vy;
  8007. const int nb = n / QK_K;
  8008. #if defined(__ARM_NEON)
  8009. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  8010. uint32_t aux32[2];
  8011. ggml_int8x16x4_t q3s;
  8012. ggml_int8x16x4_t q8b;
  8013. float sumf = 0;
  8014. for (int i = 0; i < nb; ++i) {
  8015. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8016. const uint8_t * restrict q3 = x[i].qs;
  8017. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  8018. const int8_t * restrict q8 = y[i].qs;
  8019. float sumf1 = 0, sumf2 = 0;
  8020. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  8021. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  8022. memcpy(aux32, gas, 2*sizeof(uint32_t)); gas += 2*sizeof(uint32_t);
  8023. const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]);
  8024. const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]);
  8025. const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]);
  8026. const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]);
  8027. q3 += 16;
  8028. q3s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 7) & 127))));
  8029. q3s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 21) & 127))));
  8030. q3s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127))));
  8031. q3s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127))));
  8032. q3s.val[0] = vmulq_s8(q3s.val[0], vreinterpretq_s8_u32(aux32x4_0));
  8033. q3s.val[1] = vmulq_s8(q3s.val[1], vreinterpretq_s8_u32(aux32x4_1));
  8034. q3s.val[2] = vmulq_s8(q3s.val[2], vreinterpretq_s8_u32(aux32x4_2));
  8035. q3s.val[3] = vmulq_s8(q3s.val[3], vreinterpretq_s8_u32(aux32x4_3));
  8036. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]);
  8037. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]);
  8038. sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[0] >> 28));
  8039. sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[1] >> 28));
  8040. }
  8041. sumf += d*(sumf1 + sumf2);
  8042. }
  8043. *s = 0.5f * sumf;
  8044. #elif defined(__AVX2__)
  8045. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  8046. uint32_t aux32[2];
  8047. __m256 accumf = _mm256_setzero_ps();
  8048. for (int i = 0; i < nb; ++i) {
  8049. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8050. const uint8_t * restrict q3 = x[i].qs;
  8051. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  8052. const int8_t * restrict q8 = y[i].qs;
  8053. __m256i sumi1 = _mm256_setzero_si256();
  8054. __m256i sumi2 = _mm256_setzero_si256();
  8055. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  8056. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8057. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8058. const __m256i q2_1 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]],
  8059. iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]);
  8060. q3 += 8;
  8061. const __m256i q2_2 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]],
  8062. iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]);
  8063. q3 += 8;
  8064. memcpy(aux32, gas, 8); gas += 8;
  8065. const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127],
  8066. signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]);
  8067. const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127],
  8068. signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]);
  8069. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1);
  8070. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2);
  8071. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  8072. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  8073. const uint16_t ls1 = aux32[0] >> 28;
  8074. const uint16_t ls2 = aux32[1] >> 28;
  8075. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
  8076. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
  8077. sumi1 = _mm256_add_epi32(sumi1, p1);
  8078. sumi2 = _mm256_add_epi32(sumi2, p2);
  8079. }
  8080. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  8081. }
  8082. *s = 0.25f * hsum_float_8(accumf);
  8083. #else
  8084. uint32_t aux32;
  8085. float sumf = 0.f;
  8086. for (int i = 0; i < nb; ++i) {
  8087. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8088. const uint8_t * restrict q3 = x[i].qs;
  8089. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  8090. const int8_t * restrict q8 = y[i].qs;
  8091. int32_t bsum = 0;
  8092. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  8093. memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t);
  8094. const uint32_t ls = 2*(aux32 >> 28) + 1;
  8095. int32_t sumi = 0;
  8096. for (int l = 0; l < 4; ++l) {
  8097. const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]);
  8098. const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]);
  8099. const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
  8100. for (int j = 0; j < 4; ++j) {
  8101. sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1);
  8102. sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1);
  8103. }
  8104. q8 += 8;
  8105. }
  8106. q3 += 8;
  8107. bsum += sumi * ls;
  8108. }
  8109. sumf += d * bsum;
  8110. }
  8111. *s = 0.25f * sumf;
  8112. #endif
  8113. }
  8114. void ggml_vec_dot_iq3_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
  8115. assert(n % QK_K == 0);
  8116. assert(nrc == 1);
  8117. UNUSED(nrc);
  8118. UNUSED(bx);
  8119. UNUSED(by);
  8120. UNUSED(bs);
  8121. const block_iq3_s * restrict x = vx;
  8122. const block_q8_K * restrict y = vy;
  8123. const int nb = n / QK_K;
  8124. #if defined(__ARM_NEON)
  8125. static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
  8126. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
  8127. };
  8128. static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,};
  8129. const uint8x16x2_t mask1 = vld1q_u8_x2(k_mask1);
  8130. const uint8x16_t mask2 = vld1q_u8(k_mask2);
  8131. uint8x16x2_t vs;
  8132. ggml_int8x16x4_t q3s;
  8133. ggml_int8x16x4_t q8b;
  8134. float sumf = 0;
  8135. for (int i = 0; i < nb; ++i) {
  8136. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8137. const uint8_t * restrict qs = x[i].qs;
  8138. const uint8_t * restrict qh = x[i].qh;
  8139. const uint16_t * restrict signs = (const uint16_t *)x[i].signs;
  8140. const int8_t * restrict q8 = y[i].qs;
  8141. int sumi1 = 0, sumi2 = 0;
  8142. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  8143. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  8144. const uint32x4_t aux32x4_0 = {iq3xs_grid[qs[ 0] | ((qh[ib32+0] << 8) & 256)], iq3xs_grid[qs[ 1] | ((qh[ib32+0] << 7) & 256)],
  8145. iq3xs_grid[qs[ 2] | ((qh[ib32+0] << 6) & 256)], iq3xs_grid[qs[ 3] | ((qh[ib32+0] << 5) & 256)]};
  8146. const uint32x4_t aux32x4_1 = {iq3xs_grid[qs[ 4] | ((qh[ib32+0] << 4) & 256)], iq3xs_grid[qs[ 5] | ((qh[ib32+0] << 3) & 256)],
  8147. iq3xs_grid[qs[ 6] | ((qh[ib32+0] << 2) & 256)], iq3xs_grid[qs[ 7] | ((qh[ib32+0] << 1) & 256)]};
  8148. const uint32x4_t aux32x4_2 = {iq3xs_grid[qs[ 8] | ((qh[ib32+1] << 8) & 256)], iq3xs_grid[qs[ 9] | ((qh[ib32+1] << 7) & 256)],
  8149. iq3xs_grid[qs[10] | ((qh[ib32+1] << 6) & 256)], iq3xs_grid[qs[11] | ((qh[ib32+1] << 5) & 256)]};
  8150. const uint32x4_t aux32x4_3 = {iq3xs_grid[qs[12] | ((qh[ib32+1] << 4) & 256)], iq3xs_grid[qs[13] | ((qh[ib32+1] << 3) & 256)],
  8151. iq3xs_grid[qs[14] | ((qh[ib32+1] << 2) & 256)], iq3xs_grid[qs[15] | ((qh[ib32+1] << 1) & 256)]};
  8152. qs += 16;
  8153. vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | (signs[1] << 16)));
  8154. vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
  8155. vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
  8156. vs.val[0] = vceqq_u8(vs.val[0], mask2);
  8157. vs.val[1] = vceqq_u8(vs.val[1], mask2);
  8158. q3s.val[0] = vsubq_s8(vreinterpretq_s8_u8(veorq_u8(vs.val[0], vreinterpretq_u8_u32(aux32x4_0))), vreinterpretq_s8_u8(vs.val[0]));
  8159. q3s.val[1] = vsubq_s8(vreinterpretq_s8_u8(veorq_u8(vs.val[1], vreinterpretq_u8_u32(aux32x4_1))), vreinterpretq_s8_u8(vs.val[1]));
  8160. vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | (signs[3] << 16)));
  8161. vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
  8162. vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
  8163. vs.val[0] = vceqq_u8(vs.val[0], mask2);
  8164. vs.val[1] = vceqq_u8(vs.val[1], mask2);
  8165. signs += 4;
  8166. q3s.val[2] = vsubq_s8(vreinterpretq_s8_u8(veorq_u8(vs.val[0], vreinterpretq_u8_u32(aux32x4_2))), vreinterpretq_s8_u8(vs.val[0]));
  8167. q3s.val[3] = vsubq_s8(vreinterpretq_s8_u8(veorq_u8(vs.val[1], vreinterpretq_u8_u32(aux32x4_3))), vreinterpretq_s8_u8(vs.val[1]));
  8168. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]);
  8169. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]);
  8170. sumi1 += vaddvq_s32(p1) * (1 + 2*(x[i].scales[ib32/2] & 0xf));
  8171. sumi2 += vaddvq_s32(p2) * (1 + 2*(x[i].scales[ib32/2] >> 4));
  8172. }
  8173. sumf += d*(sumi1 + sumi2);
  8174. }
  8175. *s = 0.25f * sumf;
  8176. #elif defined(__AVX2__)
  8177. static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
  8178. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
  8179. };
  8180. static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  8181. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  8182. };
  8183. const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1);
  8184. const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2);
  8185. __m256 accumf = _mm256_setzero_ps();
  8186. for (int i = 0; i < nb; ++i) {
  8187. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8188. const uint8_t * restrict qs = x[i].qs;
  8189. const uint8_t * restrict qh = x[i].qh;
  8190. const uint16_t * restrict signs = (const uint16_t *)x[i].signs;
  8191. const int8_t * restrict q8 = y[i].qs;
  8192. __m256i sumi1 = _mm256_setzero_si256();
  8193. __m256i sumi2 = _mm256_setzero_si256();
  8194. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  8195. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8196. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8197. const __m256i q2_1 = _mm256_set_epi32(iq3xs_grid[qs[7] | ((qh[ib32+0] << 1) & 256)],
  8198. iq3xs_grid[qs[6] | ((qh[ib32+0] << 2) & 256)],
  8199. iq3xs_grid[qs[5] | ((qh[ib32+0] << 3) & 256)],
  8200. iq3xs_grid[qs[4] | ((qh[ib32+0] << 4) & 256)],
  8201. iq3xs_grid[qs[3] | ((qh[ib32+0] << 5) & 256)],
  8202. iq3xs_grid[qs[2] | ((qh[ib32+0] << 6) & 256)],
  8203. iq3xs_grid[qs[1] | ((qh[ib32+0] << 7) & 256)],
  8204. iq3xs_grid[qs[0] | ((qh[ib32+0] << 8) & 256)]);
  8205. qs += 8;
  8206. const __m256i q2_2 = _mm256_set_epi32(iq3xs_grid[qs[7] | ((qh[ib32+1] << 1) & 256)],
  8207. iq3xs_grid[qs[6] | ((qh[ib32+1] << 2) & 256)],
  8208. iq3xs_grid[qs[5] | ((qh[ib32+1] << 3) & 256)],
  8209. iq3xs_grid[qs[4] | ((qh[ib32+1] << 4) & 256)],
  8210. iq3xs_grid[qs[3] | ((qh[ib32+1] << 5) & 256)],
  8211. iq3xs_grid[qs[2] | ((qh[ib32+1] << 6) & 256)],
  8212. iq3xs_grid[qs[1] | ((qh[ib32+1] << 7) & 256)],
  8213. iq3xs_grid[qs[0] | ((qh[ib32+1] << 8) & 256)]);
  8214. qs += 8;
  8215. __m256i aux256 = _mm256_set1_epi32(signs[0] | (signs[1] << 16));
  8216. aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2);
  8217. const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2);
  8218. const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1);
  8219. aux256 = _mm256_set1_epi32(signs[2] | (signs[3] << 16));
  8220. aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2);
  8221. const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2);
  8222. const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2);
  8223. signs += 4;
  8224. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  8225. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  8226. const uint16_t ls1 = x[i].scales[ib32/2] & 0xf;
  8227. const uint16_t ls2 = x[i].scales[ib32/2] >> 4;
  8228. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
  8229. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
  8230. sumi1 = _mm256_add_epi32(sumi1, p1);
  8231. sumi2 = _mm256_add_epi32(sumi2, p2);
  8232. }
  8233. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  8234. }
  8235. *s = 0.25f * hsum_float_8(accumf);
  8236. #else
  8237. float sumf = 0.f;
  8238. for (int i = 0; i < nb; ++i) {
  8239. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8240. const uint8_t * restrict qs = x[i].qs;
  8241. const uint8_t * restrict qh = x[i].qh;
  8242. const uint8_t * restrict signs = x[i].signs;
  8243. const int8_t * restrict q8 = y[i].qs;
  8244. int32_t bsum = 0;
  8245. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  8246. const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1;
  8247. const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1;
  8248. int32_t sumi = 0;
  8249. for (int l = 0; l < 4; ++l) {
  8250. const uint8_t * grid1 = (const uint8_t *)(iq3xs_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256)));
  8251. const uint8_t * grid2 = (const uint8_t *)(iq3xs_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256)));
  8252. for (int j = 0; j < 4; ++j) {
  8253. sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
  8254. sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
  8255. }
  8256. q8 += 8;
  8257. }
  8258. qs += 8;
  8259. signs += 4;
  8260. bsum += sumi * ls1;
  8261. sumi = 0;
  8262. for (int l = 0; l < 4; ++l) {
  8263. const uint8_t * grid1 = (const uint8_t *)(iq3xs_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256)));
  8264. const uint8_t * grid2 = (const uint8_t *)(iq3xs_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256)));
  8265. for (int j = 0; j < 4; ++j) {
  8266. sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
  8267. sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
  8268. }
  8269. q8 += 8;
  8270. }
  8271. qs += 8;
  8272. signs += 4;
  8273. bsum += sumi * ls2;
  8274. }
  8275. sumf += d * bsum;
  8276. }
  8277. *s = 0.25f * sumf;
  8278. #endif
  8279. }
  8280. #ifdef __AVX2__
  8281. static inline __m256i mul_add_epi8(const __m256i x, const __m256i y) {
  8282. const __m256i ax = _mm256_sign_epi8(x, x);
  8283. const __m256i sy = _mm256_sign_epi8(y, x);
  8284. return _mm256_maddubs_epi16(ax, sy);
  8285. }
  8286. #endif
  8287. void ggml_vec_dot_iq1_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
  8288. assert(n % QK_K == 0);
  8289. assert(nrc == 1);
  8290. UNUSED(nrc);
  8291. UNUSED(bx);
  8292. UNUSED(by);
  8293. UNUSED(bs);
  8294. const block_iq1_s * restrict x = vx;
  8295. const block_q8_K * restrict y = vy;
  8296. const int nb = n / QK_K;
  8297. // TODO: implement for QK_K = 64
  8298. #if defined __ARM_NEON && QK_K == 256
  8299. const uint8x16_t m8 = vdupq_n_u8(0x08);
  8300. const uint8x16_t m7 = vdupq_n_u8(0x07);
  8301. const uint8x16_t m1 = vdupq_n_u8(0x01);
  8302. const int32x4_t vzero = vdupq_n_s32(0);
  8303. uint16_t gindex[8];
  8304. uint16x8x2_t vindex;
  8305. int8x16x4_t q1b;
  8306. ggml_int8x16x4_t q8b;
  8307. uint16x8x4_t scales;
  8308. int32x4x2_t sumi;
  8309. int32x4x2_t dotq;
  8310. float sumf = 0;
  8311. for (int i = 0; i < nb; ++i) {
  8312. const int8_t * q8 = y[i].qs;
  8313. const uint8_t * qs = x[i].qs;
  8314. const uint8_t * sc = x[i].scales;
  8315. sumi.val[0] = sumi.val[1] = vzero;
  8316. for (int i128 = 0; i128 < QK_K/128; ++i128) {
  8317. const uint8x16_t ql = vld1q_u8(qs); qs += 16;
  8318. const uint8x8_t tm1 = vld1_u8 (sc); sc += 8;
  8319. const uint8x8_t tm2 = vshr_n_u8(tm1, 4);
  8320. const uint8x16_t qh = vcombine_u8(vzip1_u8(tm1, tm2), vzip2_u8(tm1, tm2));
  8321. const uint8x16_t hbit = vandq_u8(qh, m8);
  8322. vindex.val[0] = vorrq_u16(vmovl_u8(vget_low_u8 (ql)), vshlq_n_u16(vmovl_u8(vget_low_u8 (hbit)), 5));
  8323. vindex.val[1] = vorrq_u16(vmovl_u8(vget_high_u8(ql)), vshlq_n_u16(vmovl_u8(vget_high_u8(hbit)), 5));
  8324. const uint8x16_t scales8 = vorrq_u8(vshlq_n_u8(vandq_u8(qh, m7), 1), m1);
  8325. scales.val[0] = vmovl_u8(vget_low_u8 (scales8));
  8326. scales.val[1] = vmovl_u8(vget_high_u8 (scales8));
  8327. for (int l = 0; l < 2; ++l) {
  8328. vst1q_u16(gindex+0, vindex.val[l]);
  8329. q1b.val[0] = vcombine_s8(vld1_s8((const void *)(iq1s_grid+gindex[0])), vld1_s8((const void *)(iq1s_grid+gindex[1])));
  8330. q1b.val[1] = vcombine_s8(vld1_s8((const void *)(iq1s_grid+gindex[2])), vld1_s8((const void *)(iq1s_grid+gindex[3])));
  8331. q1b.val[2] = vcombine_s8(vld1_s8((const void *)(iq1s_grid+gindex[4])), vld1_s8((const void *)(iq1s_grid+gindex[5])));
  8332. q1b.val[3] = vcombine_s8(vld1_s8((const void *)(iq1s_grid+gindex[6])), vld1_s8((const void *)(iq1s_grid+gindex[7])));
  8333. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  8334. dotq.val[0] = vpaddq_s32(ggml_vdotq_s32(vzero, q1b.val[0], q8b.val[0]), ggml_vdotq_s32(vzero, q1b.val[1], q8b.val[1]));
  8335. dotq.val[1] = vpaddq_s32(ggml_vdotq_s32(vzero, q1b.val[2], q8b.val[2]), ggml_vdotq_s32(vzero, q1b.val[3], q8b.val[3]));
  8336. sumi.val[0] = vmlaq_s32(sumi.val[0], dotq.val[0], vreinterpretq_s32_u32(vmovl_u16(vget_low_u16 (scales.val[l]))));
  8337. sumi.val[1] = vmlaq_s32(sumi.val[1], dotq.val[1], vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales.val[l]))));
  8338. }
  8339. }
  8340. sumf += y[i].d * GGML_FP16_TO_FP32(x[i].d) * vaddvq_s32(vaddq_s32(sumi.val[0], sumi.val[1]));
  8341. }
  8342. *s = sumf;
  8343. // TODO: implement for QK_K = 64
  8344. #elif defined __AVX2__ && QK_K == 256
  8345. const __m128i m8 = _mm_set1_epi8(0x08);
  8346. const __m128i m7 = _mm_set1_epi8(0x07);
  8347. const __m128i m1 = _mm_set1_epi8(0x01);
  8348. const __m128i shuffle_h = _mm_set_epi8(15, 7, 14, 6, 13, 5, 12, 4, 11, 3, 10, 2, 9, 1, 8, 0);
  8349. const __m128i shuffle_s[4] = {
  8350. _mm_set_epi32(0x03030303, 0x02020202, 0x01010101, 0x00000000),
  8351. _mm_set_epi32(0x07070707, 0x06060606, 0x05050505, 0x04040404),
  8352. _mm_set_epi32(0x0b0b0b0b, 0x0a0a0a0a, 0x09090909, 0x08080808),
  8353. _mm_set_epi32(0x0f0f0f0f, 0x0e0e0e0e, 0x0d0d0d0d, 0x0c0c0c0c)
  8354. };
  8355. uint64_t aux64;
  8356. typedef union m256i_uint16 {
  8357. __m256i reg;
  8358. uint16_t s[16];
  8359. } m256i_uint16_t;
  8360. m256i_uint16_t v_gindex;
  8361. __m256 accum = _mm256_setzero_ps();
  8362. for (int i = 0; i < nb; ++i) {
  8363. const int8_t * q8 = y[i].qs;
  8364. const uint8_t * qs = x[i].qs;
  8365. const uint8_t * sc = x[i].scales;
  8366. __m256i sumi = _mm256_setzero_si256();
  8367. for (int i128 = 0; i128 < QK_K/128; ++i128) {
  8368. const __m128i ql = _mm_loadu_si128((const __m128i*)qs); qs += 16;
  8369. memcpy(&aux64, sc, 8); sc += 8;
  8370. const __m128i qh = _mm_shuffle_epi8(_mm_set_epi64x(aux64 >> 4, aux64), shuffle_h);
  8371. const __m256i hbit = _mm256_cvtepu8_epi16(_mm_and_si128(qh, m8));
  8372. v_gindex.reg = _mm256_or_si256(_mm256_cvtepu8_epi16(ql), _mm256_slli_epi16(hbit, 5));
  8373. const __m128i scales = _mm_or_si128(_mm_slli_epi16(_mm_and_si128(qh, m7), 1), m1);
  8374. for (int i32 = 0; i32 < 4; ++i32) {
  8375. const __m256i q8b = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  8376. const __m256i q1b = _mm256_set_epi64x(iq1s_grid[v_gindex.s[4*i32+3]], iq1s_grid[v_gindex.s[4*i32+2]],
  8377. iq1s_grid[v_gindex.s[4*i32+1]], iq1s_grid[v_gindex.s[4*i32+0]]);
  8378. const __m256i dot = mul_add_epi8(q1b, q8b);
  8379. const __m256i s16 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, shuffle_s[i32]));
  8380. const __m256i p = _mm256_madd_epi16(s16, dot);
  8381. sumi = _mm256_add_epi32(sumi, p);
  8382. }
  8383. }
  8384. accum = _mm256_fmadd_ps(_mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(x[i].d)), _mm256_cvtepi32_ps(sumi), accum);
  8385. }
  8386. *s = hsum_float_8(accum);
  8387. #else
  8388. int db[4];
  8389. uint16_t idx[4];
  8390. float sumf = 0;
  8391. for (int i = 0; i < nb; ++i) {
  8392. const int8_t * q8 = y[i].qs;
  8393. const uint8_t * qs = x[i].qs;
  8394. const uint8_t * sc = x[i].scales;
  8395. int sumi = 0;
  8396. for (int i32 = 0; i32 < QK_K/32; ++i32) {
  8397. idx[0] = qs[0] | ((sc[0] & 0x08) << 5);
  8398. idx[1] = qs[1] | ((sc[0] & 0x80) << 1);
  8399. idx[2] = qs[2] | ((sc[1] & 0x08) << 5);
  8400. idx[3] = qs[3] | ((sc[1] & 0x80) << 1);
  8401. db[0] = (2*(sc[0] & 7) + 1);
  8402. db[1] = (2*((sc[0] >> 4) & 7) + 1);
  8403. db[2] = (2*(sc[1] & 7) + 1);
  8404. db[3] = (2*((sc[1] >> 4) & 7) + 1);
  8405. for (int l = 0; l < 4; ++l) {
  8406. const int8_t * grid = (const int8_t *)(iq1s_grid + idx[l]);
  8407. int suml = 0;
  8408. for (int j = 0; j < 8; ++j) suml += q8[j] * grid[j];
  8409. sumi += db[l] * suml;
  8410. q8 += 8;
  8411. }
  8412. qs += 4;
  8413. sc += 2;
  8414. }
  8415. sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * sumi;
  8416. }
  8417. *s = sumf;
  8418. #endif
  8419. }
  8420. void ggml_vec_dot_iq4_nl_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  8421. assert(nrc == 1);
  8422. UNUSED(nrc);
  8423. UNUSED(bx);
  8424. UNUSED(by);
  8425. UNUSED(bs);
  8426. assert(n % QK4_NL == 0);
  8427. static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same");
  8428. const block_iq4_nl * restrict x = vx;
  8429. const block_q8_0 * restrict y = vy;
  8430. const int nb = n / QK4_NL;
  8431. #if defined __ARM_NEON
  8432. const int8x16_t values = vld1q_s8(kvalues_iq4nl);
  8433. const uint8x16_t m4b = vdupq_n_u8(0x0f);
  8434. uint8x16x2_t q4bits;
  8435. int8x16x4_t q4b;
  8436. int8x16x4_t q8b;
  8437. int32x4_t prod_1, prod_2;
  8438. float sumf = 0;
  8439. for (int ib = 0; ib < nb; ib += 2) {
  8440. q4bits.val[0] = vld1q_u8(x[ib+0].qs);
  8441. q4bits.val[1] = vld1q_u8(x[ib+1].qs);
  8442. q8b.val[0] = vld1q_s8(y[ib+0].qs);
  8443. q8b.val[1] = vld1q_s8(y[ib+0].qs + 16);
  8444. q8b.val[2] = vld1q_s8(y[ib+1].qs);
  8445. q8b.val[3] = vld1q_s8(y[ib+1].qs + 16);
  8446. q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b));
  8447. q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4));
  8448. q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b));
  8449. q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4));
  8450. prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]);
  8451. prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]);
  8452. sumf +=
  8453. GGML_FP16_TO_FP32(x[ib+0].d) * GGML_FP16_TO_FP32(y[ib+0].d) * vaddvq_s32(prod_1) +
  8454. GGML_FP16_TO_FP32(x[ib+1].d) * GGML_FP16_TO_FP32(y[ib+1].d) * vaddvq_s32(prod_2);
  8455. }
  8456. *s = sumf;
  8457. #elif defined __AVX2__
  8458. const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl);
  8459. const __m128i m4b = _mm_set1_epi8(0x0f);
  8460. const __m256i mone = _mm256_set1_epi16(1);
  8461. __m256 accum1 = _mm256_setzero_ps();
  8462. __m256 accum2 = _mm256_setzero_ps();
  8463. for (int ib = 0; ib < nb; ib += 2) {
  8464. const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)x[0].qs);
  8465. const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)x[1].qs);
  8466. const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)y[0].qs);
  8467. const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)y[1].qs);
  8468. const __m256i q4b_1 = _mm256_set_m128i(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)),
  8469. _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)));
  8470. const __m256i q4b_2 = _mm256_set_m128i(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)),
  8471. _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)));
  8472. const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1);
  8473. const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2);
  8474. const __m256i p_1 = _mm256_madd_epi16(p16_1, mone);
  8475. const __m256i p_2 = _mm256_madd_epi16(p16_2, mone);
  8476. accum1 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[0].d)*GGML_FP16_TO_FP32(x[0].d)),
  8477. _mm256_cvtepi32_ps(p_1), accum1);
  8478. accum2 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[1].d)*GGML_FP16_TO_FP32(x[1].d)),
  8479. _mm256_cvtepi32_ps(p_2), accum2);
  8480. y += 2;
  8481. x += 2;
  8482. }
  8483. *s = hsum_float_8(_mm256_add_ps(accum1, accum2));
  8484. #else
  8485. float sumf = 0;
  8486. for (int ib = 0; ib < nb; ++ib) {
  8487. const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d);
  8488. int sumi1 = 0, sumi2 = 0;
  8489. for (int j = 0; j < QK4_NL/2; ++j) {
  8490. sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf];
  8491. sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4];
  8492. }
  8493. sumf += d * (sumi1 + sumi2);
  8494. }
  8495. *s = sumf;
  8496. #endif
  8497. }
  8498. void ggml_vec_dot_iq4_xs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  8499. assert(nrc == 1);
  8500. UNUSED(nrc);
  8501. UNUSED(bx);
  8502. UNUSED(by);
  8503. UNUSED(bs);
  8504. assert(n % QK_K == 0);
  8505. #if QK_K == 64
  8506. ggml_vec_dot_iq4_nl_q8_0(n, s, bs, vx, bx, vy, by, nrc);
  8507. #else
  8508. const block_iq4_xs * restrict x = vx;
  8509. const block_q8_K * restrict y = vy;
  8510. const int nb = n / QK_K;
  8511. #if defined __ARM_NEON
  8512. const int8x16_t values = vld1q_s8(kvalues_iq4nl);
  8513. const uint8x16_t m4b = vdupq_n_u8(0x0f);
  8514. ggml_uint8x16x2_t q4bits;
  8515. ggml_int8x16x4_t q4b;
  8516. ggml_int8x16x4_t q8b;
  8517. int32x4_t prod_1, prod_2;
  8518. float sumf = 0;
  8519. for (int ibl = 0; ibl < nb; ++ibl) {
  8520. const int8_t * q8 = y[ibl].qs;
  8521. const uint8_t * q4 = x[ibl].qs;
  8522. uint16_t h = x[ibl].scales_h;
  8523. int sumi1 = 0, sumi2 = 0;
  8524. for (int ib = 0; ib < QK_K/64; ++ib) {
  8525. q4bits = ggml_vld1q_u8_x2(q4); q4 += 32;
  8526. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  8527. q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b));
  8528. q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4));
  8529. q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b));
  8530. q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4));
  8531. prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]);
  8532. prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]);
  8533. int ls1 = ((x[ibl].scales_l[ib] & 0xf) | ((h << 4) & 0x30)) - 32;
  8534. int ls2 = ((x[ibl].scales_l[ib] >> 4) | ((h << 2) & 0x30)) - 32;
  8535. h >>= 4;
  8536. sumi1 += vaddvq_s32(prod_1) * ls1;
  8537. sumi2 += vaddvq_s32(prod_2) * ls2;
  8538. }
  8539. sumf += GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2);
  8540. }
  8541. *s = sumf;
  8542. #elif defined __AVX2__
  8543. const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl);
  8544. const __m128i m4b = _mm_set1_epi8(0x0f);
  8545. __m256 accum = _mm256_setzero_ps();
  8546. for (int ibl = 0; ibl < nb; ++ibl) {
  8547. const uint8_t * qs = x[ibl].qs;
  8548. const int8_t * q8 = y[ibl].qs;
  8549. uint16_t sh = x[ibl].scales_h;
  8550. __m256i sumi1 = _mm256_setzero_si256();
  8551. __m256i sumi2 = _mm256_setzero_si256();
  8552. for (int ib = 0; ib < QK_K/32; ib += 2) {
  8553. const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)qs); qs += 16;
  8554. const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)qs); qs += 16;
  8555. const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8556. const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8557. const __m256i q4b_1 = _mm256_set_m128i(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)),
  8558. _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)));
  8559. const __m256i q4b_2 = _mm256_set_m128i(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)),
  8560. _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)));
  8561. const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1);
  8562. const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2);
  8563. const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32;
  8564. const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32;
  8565. sh >>= 4;
  8566. const __m256i p_1 = _mm256_madd_epi16(p16_1, _mm256_set1_epi16(ls1));
  8567. const __m256i p_2 = _mm256_madd_epi16(p16_2, _mm256_set1_epi16(ls2));
  8568. sumi1 = _mm256_add_epi32(p_1, sumi1);
  8569. sumi2 = _mm256_add_epi32(p_2, sumi2);
  8570. }
  8571. accum = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d),
  8572. _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accum);
  8573. }
  8574. *s = hsum_float_8(accum);
  8575. #else
  8576. float sumf = 0;
  8577. for (int ibl = 0; ibl < nb; ++ibl) {
  8578. const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d;
  8579. uint16_t h = x[ibl].scales_h;
  8580. const uint8_t * qs = x[ibl].qs;
  8581. const int8_t * q8 = y[ibl].qs;
  8582. for (int ib = 0; ib < QK_K/32; ib += 2) {
  8583. const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30);
  8584. const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30);
  8585. h >>= 4;
  8586. const float d1 = d4d8*(ls1 - 32);
  8587. const float d2 = d4d8*(ls2 - 32);
  8588. int sumi1 = 0, sumi2 = 0;
  8589. for (int j = 0; j < 16; ++j) {
  8590. sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
  8591. sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
  8592. }
  8593. sumf += d1 * (sumi1 + sumi2);
  8594. qs += 16;
  8595. q8 += 32;
  8596. sumi1 = sumi2 = 0;
  8597. for (int j = 0; j < 16; ++j) {
  8598. sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
  8599. sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
  8600. }
  8601. sumf += d2 * (sumi1 + sumi2);
  8602. qs += 16;
  8603. q8 += 32;
  8604. }
  8605. }
  8606. *s = sumf;
  8607. #endif
  8608. #endif
  8609. }
  8610. // ================================ IQ2 quantization =============================================
  8611. typedef struct {
  8612. uint64_t * grid;
  8613. int * map;
  8614. uint16_t * neighbours;
  8615. } iq2_entry_t;
  8616. static iq2_entry_t iq2_data[4] = {
  8617. {NULL, NULL, NULL},
  8618. {NULL, NULL, NULL},
  8619. {NULL, NULL, NULL},
  8620. {NULL, NULL, NULL},
  8621. };
  8622. static inline int iq2_data_index(enum ggml_type type) {
  8623. GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ2_S);
  8624. return type == GGML_TYPE_IQ2_XXS ? 0 :
  8625. type == GGML_TYPE_IQ2_XS ? 1 :
  8626. type == GGML_TYPE_IQ1_S ? 2 : 3;
  8627. }
  8628. static inline int iq2_grid_size(enum ggml_type type) {
  8629. GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ2_S);
  8630. return type == GGML_TYPE_IQ2_XXS ? 256 :
  8631. type == GGML_TYPE_IQ2_XS ? 512 :
  8632. type == GGML_TYPE_IQ1_S ? 512 : 1024;
  8633. }
  8634. static int iq2_compare_func(const void * left, const void * right) {
  8635. const int * l = (const int *)left;
  8636. const int * r = (const int *)right;
  8637. return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0;
  8638. }
  8639. void iq2xs_init_impl(enum ggml_type type) {
  8640. const int gindex = iq2_data_index(type);
  8641. const int grid_size = iq2_grid_size(type);
  8642. if (iq2_data[gindex].grid) {
  8643. return;
  8644. }
  8645. static const uint16_t kgrid_2bit_256[256] = {
  8646. 0, 2, 5, 8, 10, 17, 20, 32, 34, 40, 42, 65, 68, 80, 88, 97,
  8647. 100, 128, 130, 138, 162, 257, 260, 272, 277, 320, 388, 408, 512, 514, 546, 642,
  8648. 1025, 1028, 1040, 1057, 1060, 1088, 1090, 1096, 1120, 1153, 1156, 1168, 1188, 1280, 1282, 1288,
  8649. 1312, 1350, 1385, 1408, 1425, 1545, 1552, 1600, 1668, 1700, 2048, 2053, 2056, 2068, 2088, 2113,
  8650. 2116, 2128, 2130, 2184, 2308, 2368, 2562, 2580, 4097, 4100, 4112, 4129, 4160, 4192, 4228, 4240,
  8651. 4245, 4352, 4360, 4384, 4432, 4442, 4480, 4644, 4677, 5120, 5128, 5152, 5157, 5193, 5248, 5400,
  8652. 5474, 5632, 5654, 6145, 6148, 6160, 6208, 6273, 6400, 6405, 6560, 6737, 8192, 8194, 8202, 8260,
  8653. 8289, 8320, 8322, 8489, 8520, 8704, 8706, 9217, 9220, 9232, 9280, 9302, 9472, 9537, 9572, 9872,
  8654. 10248, 10272, 10388, 10820, 16385, 16388, 16400, 16408, 16417, 16420, 16448, 16456, 16470, 16480, 16513, 16516,
  8655. 16528, 16640, 16672, 16737, 16768, 16773, 16897, 16912, 16968, 16982, 17000, 17408, 17416, 17440, 17536, 17561,
  8656. 17682, 17700, 17920, 18433, 18436, 18448, 18496, 18501, 18688, 18776, 18785, 18818, 19013, 19088, 20480, 20488,
  8657. 20497, 20505, 20512, 20608, 20616, 20740, 20802, 20900, 21137, 21648, 21650, 21770, 22017, 22100, 22528, 22545,
  8658. 22553, 22628, 22848, 23048, 24580, 24592, 24640, 24680, 24832, 24917, 25112, 25184, 25600, 25605, 25872, 25874,
  8659. 25988, 26690, 32768, 32770, 32778, 32833, 32898, 33028, 33048, 33088, 33297, 33793, 33796, 33808, 33813, 33856,
  8660. 33888, 34048, 34118, 34196, 34313, 34368, 34400, 34818, 35076, 35345, 36868, 36880, 36900, 36928, 37025, 37142,
  8661. 37248, 37445, 37888, 37922, 37956, 38225, 39041, 39200, 40962, 41040, 41093, 41225, 41472, 42008, 43088, 43268,
  8662. };
  8663. static const uint16_t kgrid_2bit_512[512] = {
  8664. 0, 2, 5, 8, 10, 17, 20, 22, 25, 32, 34, 37, 40, 65, 68, 70,
  8665. 73, 80, 82, 85, 88, 97, 100, 128, 130, 133, 136, 145, 148, 153, 160, 257,
  8666. 260, 262, 265, 272, 274, 277, 280, 282, 289, 292, 320, 322, 325, 328, 337, 340,
  8667. 352, 360, 385, 388, 400, 512, 514, 517, 520, 529, 532, 544, 577, 580, 592, 597,
  8668. 640, 650, 1025, 1028, 1030, 1033, 1040, 1042, 1045, 1048, 1057, 1060, 1088, 1090, 1093, 1096,
  8669. 1105, 1108, 1110, 1120, 1153, 1156, 1168, 1280, 1282, 1285, 1288, 1297, 1300, 1312, 1345, 1348,
  8670. 1360, 1377, 1408, 1537, 1540, 1552, 1574, 1600, 1602, 1668, 2048, 2050, 2053, 2056, 2058, 2065,
  8671. 2068, 2080, 2085, 2113, 2116, 2128, 2136, 2176, 2208, 2218, 2305, 2308, 2320, 2368, 2433, 2441,
  8672. 2560, 2592, 2600, 2710, 2720, 4097, 4100, 4102, 4105, 4112, 4114, 4117, 4120, 4129, 4132, 4160,
  8673. 4162, 4165, 4168, 4177, 4180, 4192, 4202, 4225, 4228, 4240, 4352, 4354, 4357, 4360, 4369, 4372,
  8674. 4384, 4417, 4420, 4432, 4480, 4500, 4502, 4609, 4612, 4614, 4624, 4672, 4704, 5120, 5122, 5125,
  8675. 5128, 5137, 5140, 5152, 5185, 5188, 5193, 5200, 5220, 5248, 5377, 5380, 5392, 5440, 5632, 5652,
  8676. 5705, 6145, 6148, 6160, 6162, 6208, 6228, 6278, 6400, 6405, 6502, 6737, 6825, 8192, 8194, 8197,
  8677. 8200, 8202, 8209, 8212, 8224, 8257, 8260, 8272, 8320, 8352, 8449, 8452, 8464, 8512, 8520, 8549,
  8678. 8704, 8738, 8832, 8872, 9217, 9220, 9232, 9257, 9280, 9472, 9537, 9554, 9625, 9729, 9754, 9894,
  8679. 10240, 10248, 10250, 10272, 10325, 10376, 10402, 10600, 10640, 10760, 10784, 10882, 10888, 10890, 16385, 16388,
  8680. 16390, 16393, 16400, 16402, 16405, 16408, 16417, 16420, 16448, 16450, 16453, 16456, 16458, 16465, 16468, 16480,
  8681. 16485, 16513, 16516, 16528, 16640, 16642, 16645, 16648, 16657, 16660, 16672, 16705, 16708, 16720, 16768, 16773,
  8682. 16802, 16897, 16900, 16912, 16914, 16937, 16960, 17408, 17410, 17413, 17416, 17425, 17428, 17433, 17440, 17473,
  8683. 17476, 17488, 17536, 17556, 17665, 17668, 17680, 17700, 17728, 17818, 17920, 17930, 17988, 18000, 18433, 18436,
  8684. 18448, 18496, 18501, 18516, 18530, 18688, 18705, 18756, 18768, 18793, 18948, 20480, 20482, 20485, 20488, 20497,
  8685. 20500, 20512, 20520, 20545, 20548, 20560, 20608, 20737, 20740, 20752, 20757, 20800, 20802, 20992, 21060, 21162,
  8686. 21505, 21508, 21520, 21537, 21568, 21600, 21633, 21665, 21760, 21768, 21888, 21896, 22049, 22120, 22177, 22528,
  8687. 22548, 22593, 22608, 22681, 22810, 22848, 22850, 23173, 24577, 24580, 24592, 24640, 24660, 24674, 24710, 24745,
  8688. 24832, 25124, 25162, 25234, 25600, 25622, 25872, 25920, 25925, 26020, 26625, 26730, 26917, 27142, 27220, 27234,
  8689. 32768, 32770, 32773, 32776, 32785, 32788, 32800, 32810, 32833, 32836, 32848, 32896, 32898, 32936, 32938, 33025,
  8690. 33028, 33030, 33040, 33088, 33105, 33113, 33280, 33312, 33408, 33410, 33440, 33448, 33793, 33796, 33808, 33810,
  8691. 33813, 33856, 33888, 33929, 34048, 34116, 34213, 34328, 34410, 34816, 34824, 34853, 34906, 34944, 34946, 34984,
  8692. 35078, 35362, 35456, 35464, 35478, 35496, 36865, 36868, 36880, 36928, 36950, 36996, 37120, 37154, 37220, 37462,
  8693. 37513, 37888, 37893, 37956, 37968, 37976, 38185, 38288, 38290, 38465, 38993, 39078, 39241, 39445, 39520, 40960,
  8694. 40962, 40968, 40970, 40992, 41002, 41120, 41297, 41305, 41382, 41472, 41474, 41480, 41514, 41600, 41632, 42048,
  8695. 42133, 42597, 42648, 43018, 43040, 43042, 43048, 43168, 43176, 43268, 43396, 43398, 43560, 43562, 43665, 43690,
  8696. };
  8697. static const uint16_t kgrid_1bit_512[512] = {
  8698. 10, 33, 41, 85, 132, 134, 160, 162, 277, 337, 340, 345, 357, 405, 516, 545,
  8699. 553, 598, 641, 650, 681, 1042, 1044, 1097, 1169, 1176, 1320, 1345, 1365, 1378, 1434, 1444,
  8700. 1545, 1617, 1642, 1685, 2053, 2080, 2089, 2133, 2176, 2182, 2208, 2214, 2306, 2384, 2393, 2440,
  8701. 2453, 2581, 2664, 2690, 2721, 4117, 4161, 4182, 4184, 4261, 4357, 4369, 4372, 4377, 4390, 4422,
  8702. 4432, 4437, 4449, 4457, 4485, 4497, 4505, 4629, 4677, 4696, 4774, 5205, 5217, 5225, 5386, 5397,
  8703. 5409, 5445, 5457, 5460, 5461, 5462, 5465, 5472, 5477, 5525, 5545, 5650, 5668, 5717, 5729, 5769,
  8704. 5777, 6212, 6234, 6244, 6293, 6424, 6482, 6485, 6502, 6505, 6529, 6538, 6565, 6656, 6682, 6788,
  8705. 6806, 6820, 8218, 8224, 8226, 8232, 8277, 8326, 8354, 8469, 8521, 8530, 8549, 8596, 8737, 8794,
  8706. 9221, 9253, 9348, 9369, 9380, 9474, 9557, 9633, 9732, 9753, 9793, 9830, 9862, 9880, 10240, 10272,
  8707. 10282, 10321, 10406, 10517, 10530, 10566, 10585, 10645, 10896, 16466, 16468, 16473, 16485, 16646, 16660, 16665,
  8708. 16725, 16793, 16806, 16914, 16969, 16977, 16996, 17028, 17057, 17408, 17416, 17434, 17493, 17512, 17578, 17685,
  8709. 17696, 17733, 17745, 17748, 17749, 17750, 17753, 17765, 17794, 17813, 17946, 17984, 18005, 18072, 18453, 18529,
  8710. 18569, 18722, 18756, 18762, 18773, 18794, 18833, 18853, 18945, 19026, 19033, 19077, 20489, 20497, 20500, 20517,
  8711. 20565, 20586, 20610, 20633, 20757, 20769, 20776, 20805, 20817, 20820, 20821, 20822, 20825, 20837, 20864, 20872,
  8712. 20885, 20896, 21002, 21029, 21077, 21146, 21510, 21525, 21573, 21585, 21588, 21589, 21590, 21593, 21605, 21653,
  8713. 21665, 21765, 21777, 21780, 21781, 21782, 21785, 21797, 21825, 21828, 21829, 21830, 21833, 21840, 21841, 21842,
  8714. 21844, 21846, 21848, 21849, 21850, 21857, 21860, 21861, 21862, 21865, 21893, 21905, 21908, 21909, 21910, 21913,
  8715. 21925, 22024, 22037, 22085, 22097, 22100, 22101, 22102, 22105, 22117, 22165, 22545, 22566, 22568, 22594, 22608,
  8716. 22613, 22676, 22697, 22793, 22805, 22853, 22865, 22868, 22869, 22870, 22873, 22885, 22933, 22946, 23046, 23072,
  8717. 23125, 23209, 24597, 24640, 24665, 24673, 24725, 24833, 24840, 24869, 24917, 24934, 24965, 25001, 25108, 25110,
  8718. 25152, 25184, 25192, 25234, 25616, 25618, 25625, 25685, 25704, 25738, 25744, 25770, 25877, 25897, 25925, 25937,
  8719. 25940, 25941, 25942, 25945, 25957, 25986, 26005, 26186, 26197, 26276, 26632, 26634, 26725, 26757, 26770, 26885,
  8720. 26965, 26976, 26986, 27032, 27153, 27174, 27200, 27208, 27240, 27269, 27282, 27290, 32778, 32800, 32802, 32808,
  8721. 32810, 32853, 32904, 32922, 32930, 32932, 33105, 33110, 33112, 33125, 33157, 33280, 33288, 33301, 33312, 33320,
  8722. 33424, 33797, 33829, 33858, 34068, 34133, 34146, 34176, 34217, 34306, 34342, 34441, 34454, 34468, 34832, 34918,
  8723. 34965, 34984, 35094, 35137, 35161, 35208, 35232, 35332, 35338, 35368, 35429, 36932, 36934, 36953, 37009, 37125,
  8724. 37136, 37138, 37145, 37157, 37205, 37220, 37258, 37290, 37444, 37446, 37465, 37478, 37525, 37905, 37968, 37973,
  8725. 38040, 38054, 38145, 38154, 38165, 38180, 38186, 38213, 38225, 38228, 38229, 38230, 38233, 38245, 38293, 38485,
  8726. 38504, 38530, 38938, 38985, 38993, 39012, 39040, 39173, 39192, 39253, 39265, 39301, 39316, 39322, 39442, 39497,
  8727. 39504, 39590, 40970, 40984, 40992, 41002, 41045, 41120, 41128, 41237, 41289, 41297, 41317, 41364, 41366, 41514,
  8728. 41557, 41633, 41989, 42021, 42056, 42068, 42074, 42113, 42242, 42265, 42274, 42325, 42340, 42402, 42501, 42512,
  8729. 42533, 42624, 42632, 42666, 43040, 43093, 43106, 43168, 43176, 43264, 43286, 43345, 43429, 43590, 43618, 43680,
  8730. };
  8731. static const uint16_t kgrid_2bit_1024[1024] = {
  8732. 0, 2, 5, 8, 10, 17, 20, 22, 25, 32, 34, 37, 40, 65, 68, 70,
  8733. 73, 80, 82, 85, 88, 97, 100, 102, 105, 128, 130, 133, 136, 145, 148, 160,
  8734. 165, 170, 257, 260, 262, 265, 272, 274, 277, 280, 289, 292, 320, 322, 325, 328,
  8735. 337, 340, 342, 345, 352, 357, 360, 385, 388, 400, 402, 405, 417, 420, 512, 514,
  8736. 517, 520, 529, 532, 544, 554, 577, 580, 582, 585, 592, 597, 640, 645, 650, 660,
  8737. 674, 1025, 1028, 1030, 1033, 1040, 1042, 1045, 1048, 1057, 1060, 1062, 1065, 1088, 1090, 1093,
  8738. 1096, 1098, 1105, 1108, 1110, 1113, 1120, 1122, 1125, 1153, 1156, 1158, 1161, 1168, 1173, 1176,
  8739. 1185, 1188, 1280, 1282, 1285, 1288, 1290, 1297, 1300, 1302, 1305, 1312, 1317, 1320, 1345, 1348,
  8740. 1350, 1353, 1360, 1362, 1365, 1368, 1377, 1380, 1408, 1410, 1413, 1416, 1425, 1428, 1440, 1537,
  8741. 1540, 1542, 1545, 1552, 1557, 1600, 1605, 1608, 1617, 1620, 1632, 1665, 1668, 1680, 2048, 2050,
  8742. 2053, 2056, 2065, 2068, 2070, 2073, 2080, 2085, 2090, 2113, 2116, 2118, 2121, 2128, 2130, 2133,
  8743. 2136, 2145, 2148, 2176, 2181, 2196, 2218, 2305, 2308, 2320, 2322, 2325, 2328, 2337, 2368, 2373,
  8744. 2376, 2385, 2388, 2400, 2433, 2448, 2560, 2577, 2580, 2594, 2600, 2602, 2640, 2713, 4097, 4100,
  8745. 4102, 4105, 4112, 4114, 4117, 4120, 4129, 4132, 4134, 4160, 4162, 4165, 4168, 4177, 4180, 4182,
  8746. 4185, 4192, 4194, 4197, 4200, 4225, 4228, 4230, 4240, 4245, 4248, 4257, 4260, 4352, 4354, 4357,
  8747. 4360, 4362, 4369, 4372, 4374, 4377, 4384, 4386, 4389, 4392, 4417, 4420, 4422, 4425, 4432, 4434,
  8748. 4437, 4440, 4449, 4452, 4480, 4482, 4485, 4488, 4497, 4500, 4609, 4612, 4617, 4624, 4629, 4641,
  8749. 4644, 4672, 4677, 4689, 4692, 4737, 4740, 4752, 5120, 5122, 5125, 5128, 5137, 5140, 5142, 5145,
  8750. 5152, 5157, 5160, 5185, 5188, 5190, 5193, 5200, 5202, 5205, 5208, 5217, 5220, 5248, 5250, 5253,
  8751. 5256, 5265, 5268, 5280, 5377, 5380, 5382, 5385, 5392, 5394, 5397, 5400, 5409, 5412, 5440, 5442,
  8752. 5445, 5448, 5457, 5460, 5472, 5505, 5508, 5520, 5632, 5637, 5640, 5649, 5652, 5664, 5697, 5700,
  8753. 5712, 5760, 5802, 6145, 6148, 6150, 6153, 6160, 6165, 6168, 6177, 6208, 6210, 6213, 6216, 6225,
  8754. 6228, 6240, 6273, 6276, 6400, 6402, 6405, 6408, 6417, 6420, 6432, 6465, 6468, 6480, 6505, 6562,
  8755. 6660, 6672, 6720, 6742, 8192, 8194, 8197, 8200, 8209, 8212, 8214, 8217, 8224, 8229, 8234, 8257,
  8756. 8260, 8272, 8274, 8277, 8292, 8320, 8330, 8340, 8362, 8449, 8452, 8464, 8466, 8469, 8481, 8512,
  8757. 8514, 8517, 8529, 8532, 8544, 8577, 8580, 8592, 8704, 8714, 8738, 8744, 8746, 8772, 8784, 8840,
  8758. 8842, 8872, 9217, 9220, 9222, 9225, 9232, 9237, 9240, 9249, 9252, 9280, 9282, 9285, 9288, 9297,
  8759. 9300, 9312, 9345, 9348, 9360, 9472, 9477, 9480, 9489, 9492, 9504, 9537, 9540, 9552, 9574, 9600,
  8760. 9729, 9732, 9744, 9792, 9817, 10240, 10245, 10257, 10260, 10305, 10308, 10320, 10378, 10410, 10497, 10500,
  8761. 10512, 10645, 10762, 10786, 10852, 10888, 10890, 16385, 16388, 16390, 16393, 16400, 16402, 16405, 16408, 16410,
  8762. 16417, 16420, 16422, 16448, 16450, 16453, 16456, 16458, 16465, 16468, 16470, 16473, 16480, 16482, 16485, 16513,
  8763. 16516, 16528, 16533, 16536, 16545, 16548, 16640, 16642, 16645, 16648, 16657, 16660, 16662, 16665, 16672, 16674,
  8764. 16677, 16705, 16708, 16710, 16713, 16720, 16722, 16725, 16728, 16737, 16740, 16768, 16770, 16773, 16776, 16785,
  8765. 16788, 16800, 16897, 16900, 16912, 16914, 16917, 16920, 16932, 16960, 16965, 16968, 16977, 16980, 16992, 17025,
  8766. 17028, 17408, 17410, 17413, 17416, 17418, 17425, 17428, 17430, 17433, 17440, 17442, 17445, 17448, 17473, 17476,
  8767. 17478, 17481, 17488, 17490, 17493, 17496, 17505, 17508, 17536, 17538, 17541, 17544, 17553, 17556, 17568, 17665,
  8768. 17668, 17670, 17673, 17680, 17682, 17685, 17688, 17697, 17700, 17728, 17730, 17733, 17736, 17745, 17748, 17760,
  8769. 17770, 17793, 17796, 17808, 17920, 17922, 17925, 17928, 17937, 17940, 17952, 17985, 17988, 18000, 18048, 18085,
  8770. 18433, 18436, 18441, 18448, 18450, 18453, 18456, 18465, 18468, 18496, 18498, 18501, 18504, 18513, 18516, 18528,
  8771. 18564, 18576, 18688, 18690, 18693, 18696, 18705, 18708, 18720, 18753, 18756, 18768, 18816, 18838, 18945, 18948,
  8772. 18960, 19008, 20480, 20482, 20485, 20488, 20497, 20500, 20502, 20505, 20512, 20514, 20517, 20520, 20545, 20548,
  8773. 20550, 20553, 20560, 20562, 20565, 20568, 20577, 20580, 20608, 20610, 20613, 20616, 20625, 20628, 20737, 20740,
  8774. 20742, 20745, 20752, 20754, 20757, 20760, 20769, 20772, 20800, 20802, 20805, 20808, 20817, 20820, 20832, 20865,
  8775. 20868, 20880, 20992, 20997, 21000, 21009, 21012, 21024, 21057, 21060, 21072, 21097, 21120, 21505, 21508, 21510,
  8776. 21513, 21520, 21522, 21525, 21528, 21537, 21540, 21568, 21570, 21573, 21576, 21585, 21588, 21600, 21633, 21636,
  8777. 21648, 21760, 21762, 21765, 21768, 21777, 21780, 21792, 21825, 21828, 21840, 21888, 22017, 22020, 22032, 22054,
  8778. 22080, 22528, 22530, 22533, 22536, 22545, 22548, 22560, 22593, 22596, 22608, 22618, 22656, 22785, 22788, 22800,
  8779. 22848, 23040, 23065, 23173, 23208, 24577, 24580, 24582, 24592, 24594, 24597, 24600, 24609, 24612, 24640, 24645,
  8780. 24648, 24657, 24660, 24672, 24708, 24720, 24832, 24834, 24837, 24840, 24849, 24852, 24864, 24897, 24900, 24912,
  8781. 24960, 24985, 25092, 25104, 25152, 25174, 25249, 25600, 25605, 25608, 25617, 25620, 25632, 25665, 25668, 25680,
  8782. 25728, 25857, 25860, 25872, 25920, 25930, 25960, 26002, 26112, 26260, 26625, 26628, 26640, 26725, 26776, 26880,
  8783. 26922, 27202, 27297, 32768, 32770, 32773, 32776, 32785, 32788, 32793, 32800, 32805, 32833, 32836, 32848, 32850,
  8784. 32853, 32856, 32865, 32896, 32901, 32913, 32916, 33025, 33028, 33033, 33040, 33042, 33045, 33048, 33057, 33060,
  8785. 33088, 33090, 33093, 33096, 33105, 33108, 33153, 33156, 33168, 33193, 33280, 33285, 33290, 33297, 33300, 33345,
  8786. 33348, 33360, 33793, 33796, 33798, 33801, 33808, 33810, 33813, 33816, 33825, 33856, 33858, 33861, 33864, 33873,
  8787. 33876, 33888, 33921, 33924, 33936, 34048, 34050, 34053, 34056, 34065, 34068, 34080, 34113, 34116, 34128, 34176,
  8788. 34186, 34305, 34308, 34320, 34345, 34368, 34816, 34821, 34833, 34836, 34881, 34884, 34896, 34978, 35073, 35076,
  8789. 35136, 35173, 35362, 35416, 35418, 35458, 35490, 36865, 36868, 36873, 36880, 36882, 36885, 36888, 36900, 36928,
  8790. 36930, 36933, 36936, 36945, 36948, 36960, 36993, 36996, 37008, 37120, 37125, 37137, 37140, 37185, 37188, 37200,
  8791. 37210, 37377, 37380, 37392, 37440, 37542, 37888, 37890, 37893, 37896, 37905, 37908, 37920, 37953, 37956, 37968,
  8792. 38016, 38038, 38145, 38148, 38160, 38208, 38296, 38305, 38400, 38470, 38500, 38913, 38916, 38928, 38950, 38976,
  8793. 39081, 39168, 39241, 39250, 39568, 40960, 40965, 40970, 40980, 40994, 41002, 41025, 41028, 41040, 41122, 41130,
  8794. 41280, 41317, 41474, 41482, 41506, 41512, 41514, 41602, 41608, 41610, 41640, 41985, 41988, 42000, 42048, 42121,
  8795. 42148, 42240, 42265, 42577, 43018, 43048, 43170, 43348, 43398, 43528, 43530, 43552, 43554, 43560, 43656, 43690,
  8796. };
  8797. const int kmap_size = 43692;
  8798. //const int nwant = type == GGML_TYPE_IQ1_S ? 3 : 2;
  8799. const int nwant = type == GGML_TYPE_IQ1_S ? 3 : type == GGML_TYPE_IQ2_S ? 1 : 2;
  8800. const uint16_t * kgrid = type == GGML_TYPE_IQ2_XXS ? kgrid_2bit_256 :
  8801. type == GGML_TYPE_IQ2_XS ? kgrid_2bit_512 :
  8802. type == GGML_TYPE_IQ1_S ? kgrid_1bit_512 : kgrid_2bit_1024;
  8803. uint64_t * kgrid_q2xs;
  8804. int * kmap_q2xs;
  8805. uint16_t * kneighbors_q2xs;
  8806. printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size);
  8807. uint64_t * the_grid = (uint64_t *)malloc(grid_size*sizeof(uint64_t));
  8808. for (int k = 0; k < grid_size; ++k) {
  8809. int8_t * pos = (int8_t *)(the_grid + k);
  8810. for (int i = 0; i < 8; ++i) {
  8811. int l = (kgrid[k] >> 2*i) & 0x3;
  8812. pos[i] = 2*l + 1;
  8813. }
  8814. }
  8815. kgrid_q2xs = the_grid;
  8816. iq2_data[gindex].grid = the_grid;
  8817. kmap_q2xs = (int *)malloc(kmap_size*sizeof(int));
  8818. iq2_data[gindex].map = kmap_q2xs;
  8819. for (int i = 0; i < kmap_size; ++i) kmap_q2xs[i] = -1;
  8820. uint64_t aux64;
  8821. uint8_t * aux8 = (uint8_t *)&aux64;
  8822. for (int i = 0; i < grid_size; ++i) {
  8823. aux64 = kgrid_q2xs[i];
  8824. uint16_t index = 0;
  8825. for (int k=0; k<8; ++k) {
  8826. uint16_t q = (aux8[k] - 1)/2;
  8827. index |= (q << 2*k);
  8828. }
  8829. kmap_q2xs[index] = i;
  8830. }
  8831. int8_t pos[8];
  8832. int * dist2 = (int *)malloc(2*grid_size*sizeof(int));
  8833. int num_neighbors = 0, num_not_in_map = 0;
  8834. for (int i = 0; i < kmap_size; ++i) {
  8835. if (kmap_q2xs[i] >= 0) continue;
  8836. ++num_not_in_map;
  8837. for (int k = 0; k < 8; ++k) {
  8838. int l = (i >> 2*k) & 0x3;
  8839. pos[k] = 2*l + 1;
  8840. }
  8841. for (int j = 0; j < grid_size; ++j) {
  8842. const int8_t * pg = (const int8_t *)(kgrid_q2xs + j);
  8843. int d2 = 0;
  8844. for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  8845. dist2[2*j+0] = d2;
  8846. dist2[2*j+1] = j;
  8847. }
  8848. qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func);
  8849. int n = 0; int d2 = dist2[0];
  8850. int nhave = 1;
  8851. for (int j = 0; j < grid_size; ++j) {
  8852. if (dist2[2*j] > d2) {
  8853. if (nhave == nwant) break;
  8854. d2 = dist2[2*j];
  8855. ++nhave;
  8856. }
  8857. ++n;
  8858. }
  8859. num_neighbors += n;
  8860. }
  8861. printf("%s: %d neighbours in total\n", __func__, num_neighbors);
  8862. kneighbors_q2xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t));
  8863. iq2_data[gindex].neighbours = kneighbors_q2xs;
  8864. int counter = 0;
  8865. for (int i = 0; i < kmap_size; ++i) {
  8866. if (kmap_q2xs[i] >= 0) continue;
  8867. for (int k = 0; k < 8; ++k) {
  8868. int l = (i >> 2*k) & 0x3;
  8869. pos[k] = 2*l + 1;
  8870. }
  8871. for (int j = 0; j < grid_size; ++j) {
  8872. const int8_t * pg = (const int8_t *)(kgrid_q2xs + j);
  8873. int d2 = 0;
  8874. for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  8875. dist2[2*j+0] = d2;
  8876. dist2[2*j+1] = j;
  8877. }
  8878. qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func);
  8879. kmap_q2xs[i] = -(counter + 1);
  8880. int d2 = dist2[0];
  8881. uint16_t * start = &kneighbors_q2xs[counter++];
  8882. int n = 0, nhave = 1;
  8883. for (int j = 0; j < grid_size; ++j) {
  8884. if (dist2[2*j] > d2) {
  8885. if (nhave == nwant) break;
  8886. d2 = dist2[2*j];
  8887. ++nhave;
  8888. }
  8889. kneighbors_q2xs[counter++] = dist2[2*j+1];
  8890. ++n;
  8891. }
  8892. *start = n;
  8893. }
  8894. free(dist2);
  8895. }
  8896. void iq2xs_free_impl(enum ggml_type type) {
  8897. GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ2_S);
  8898. const int gindex = iq2_data_index(type);
  8899. if (iq2_data[gindex].grid) {
  8900. free(iq2_data[gindex].grid); iq2_data[gindex].grid = NULL;
  8901. free(iq2_data[gindex].map); iq2_data[gindex].map = NULL;
  8902. free(iq2_data[gindex].neighbours); iq2_data[gindex].neighbours = NULL;
  8903. }
  8904. }
  8905. static int iq2_find_best_neighbour(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
  8906. const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) {
  8907. int num_neighbors = neighbours[0];
  8908. GGML_ASSERT(num_neighbors > 0);
  8909. float best_d2 = FLT_MAX;
  8910. int grid_index = -1;
  8911. for (int j = 1; j <= num_neighbors; ++j) {
  8912. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  8913. float d2 = 0;
  8914. for (int i = 0; i < 8; ++i) {
  8915. float q = pg[i];
  8916. float diff = scale*q - xval[i];
  8917. d2 += weight[i]*diff*diff;
  8918. }
  8919. if (d2 < best_d2) {
  8920. best_d2 = d2; grid_index = neighbours[j];
  8921. }
  8922. }
  8923. GGML_ASSERT(grid_index >= 0);
  8924. const int8_t * pg = (const int8_t *)(grid + grid_index);
  8925. for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
  8926. return grid_index;
  8927. }
  8928. static void quantize_row_iq2_xxs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
  8929. const int gindex = iq2_data_index(GGML_TYPE_IQ2_XXS);
  8930. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  8931. const int * kmap_q2xs = iq2_data[gindex].map;
  8932. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  8933. GGML_ASSERT(quant_weights && "missing quantization weights");
  8934. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  8935. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  8936. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  8937. GGML_ASSERT(n%QK_K == 0);
  8938. const int kMaxQ = 3;
  8939. const int nbl = n/QK_K;
  8940. block_iq2_xxs * y = vy;
  8941. float scales[QK_K/32];
  8942. float weight[32];
  8943. float xval[32];
  8944. int8_t L[32];
  8945. int8_t Laux[32];
  8946. float waux[32];
  8947. uint8_t block_signs[4];
  8948. uint32_t q2[2*(QK_K/32)];
  8949. for (int ibl = 0; ibl < nbl; ++ibl) {
  8950. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  8951. memset(q2, 0, QK_K/4);
  8952. float max_scale = 0;
  8953. const float * xbl = x + QK_K*ibl;
  8954. float sumx2 = 0;
  8955. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  8956. float sigma2 = sumx2/QK_K;
  8957. for (int ib = 0; ib < QK_K/32; ++ib) {
  8958. const float * xb = xbl + 32*ib;
  8959. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  8960. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  8961. for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
  8962. for (int k = 0; k < 4; ++k) {
  8963. int nflip = 0;
  8964. uint8_t s = 0;
  8965. for (int i = 0; i < 8; ++i) {
  8966. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  8967. else {
  8968. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  8969. }
  8970. }
  8971. if (nflip%2) {
  8972. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  8973. for (int i = 1; i < 8; ++i) {
  8974. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  8975. if (ax < min) {
  8976. min = ax; imin = i;
  8977. }
  8978. }
  8979. xval[8*k+imin] = -xval[8*k+imin];
  8980. s ^= (1 << imin);
  8981. }
  8982. block_signs[k] = s & 127;
  8983. }
  8984. float max = xval[0];
  8985. for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]);
  8986. if (!max) {
  8987. scales[ib] = 0;
  8988. memset(L, 0, 32);
  8989. continue;
  8990. }
  8991. float scale = make_qp_quants(32, kMaxQ+1, xval, (uint8_t*)L, weight);
  8992. float eff_max = scale*kMaxQ;
  8993. float best = 0;
  8994. for (int is = -6; is <= 6; ++is) {
  8995. float id = (2*kMaxQ-1+is*0.1f)/eff_max;
  8996. float this_scale = 1/id;
  8997. for (int k = 0; k < 4; ++k) {
  8998. for (int i = 0; i < 8; ++i) {
  8999. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  9000. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  9001. }
  9002. uint16_t u = 0;
  9003. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  9004. int grid_index = kmap_q2xs[u];
  9005. if (grid_index < 0) {
  9006. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  9007. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  9008. }
  9009. }
  9010. float sumqx = 0, sumq2 = 0;
  9011. for (int i = 0; i < 32; ++i) {
  9012. float w = weight[i];
  9013. float q = 2*Laux[i] + 1;
  9014. sumqx += w*xval[i]*q;
  9015. sumq2 += w*q*q;
  9016. }
  9017. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  9018. scale = sumqx/sumq2; best = scale*sumqx;
  9019. memcpy(L, Laux, 32);
  9020. }
  9021. }
  9022. if (scale > 0) {
  9023. float id = 1/scale;
  9024. for (int k = 0; k < 4; ++k) {
  9025. uint16_t u = 0;
  9026. for (int i = 0; i < 8; ++i) {
  9027. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  9028. l = MAX(0, MIN(kMaxQ-1, l));
  9029. u |= (l << 2*i);
  9030. }
  9031. int grid_index = kmap_q2xs[u];
  9032. if (grid_index < 0) {
  9033. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  9034. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  9035. }
  9036. const int8_t * pg = (const int8_t *)(kgrid_q2xs + grid_index);
  9037. for (int i = 0; i < 8; ++i) L[8*k+i] = (pg[i] - 1)/2;
  9038. }
  9039. float sumqx = 0, sumq2 = 0;
  9040. for (int i = 0; i < 32; ++i) {
  9041. float w = weight[i];
  9042. float q = 2*L[i] + 1;
  9043. sumqx += w*xval[i]*q;
  9044. sumq2 += w*q*q;
  9045. }
  9046. if (sumq2 > 0) scale = sumqx/sumq2;
  9047. }
  9048. if (scale < 0) {
  9049. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  9050. // and correspondingly flip quant signs.
  9051. scale = -scale;
  9052. for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127;
  9053. }
  9054. for (int k = 0; k < 4; ++k) {
  9055. uint16_t u = 0;
  9056. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  9057. int grid_index = kmap_q2xs[u];
  9058. if (grid_index < 0) {
  9059. printf("Oops: found point %u not on grid:", u);
  9060. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  9061. printf("\n");
  9062. GGML_ASSERT(false);
  9063. }
  9064. q2[2*ib+0] |= (grid_index << 8*k);
  9065. q2[2*ib+1] |= (block_signs[k] << 7*k);
  9066. }
  9067. GGML_ASSERT(scale >= 0);
  9068. scales[ib] = scale;
  9069. max_scale = MAX(max_scale, scale);
  9070. }
  9071. if (!max_scale) {
  9072. memset(y[ibl].qs, 0, QK_K/4);
  9073. continue;
  9074. }
  9075. float d = max_scale/31;
  9076. y[ibl].d = GGML_FP32_TO_FP16(d);
  9077. float id = 1/d;
  9078. for (int ib = 0; ib < QK_K/32; ++ib) {
  9079. int l = nearest_int(0.5f*(id*scales[ib]-1));
  9080. l = MAX(0, MIN(15, l));
  9081. q2[2*ib+1] |= ((uint32_t)l << 28);
  9082. }
  9083. memcpy(y[ibl].qs, q2, QK_K/4);
  9084. }
  9085. }
  9086. static void quantize_row_iq2_xs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
  9087. const int gindex = iq2_data_index(GGML_TYPE_IQ2_XS);
  9088. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  9089. const int * kmap_q2xs = iq2_data[gindex].map;
  9090. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  9091. GGML_ASSERT(quant_weights && "missing quantization weights");
  9092. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  9093. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  9094. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  9095. GGML_ASSERT(n%QK_K == 0);
  9096. const int kMaxQ = 3;
  9097. const int nbl = n/QK_K;
  9098. block_iq2_xs * y = vy;
  9099. float scales[QK_K/16];
  9100. float weight[16];
  9101. float xval[16];
  9102. int8_t L[16];
  9103. int8_t Laux[16];
  9104. float waux[16];
  9105. bool is_on_grid[2];
  9106. bool is_on_grid_aux[2];
  9107. uint8_t block_signs[2];
  9108. uint16_t q2[2*(QK_K/16)];
  9109. for (int ibl = 0; ibl < nbl; ++ibl) {
  9110. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  9111. memset(q2, 0, QK_K/4);
  9112. memset(y[ibl].scales, 0, QK_K/32);
  9113. float max_scale = 0;
  9114. const float * xbl = x + QK_K*ibl;
  9115. float sumx2 = 0;
  9116. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  9117. float sigma2 = sumx2/QK_K;
  9118. for (int ib = 0; ib < QK_K/16; ++ib) {
  9119. const float * xb = xbl + 16*ib;
  9120. const float * qw = quant_weights + QK_K*ibl + 16*ib;
  9121. for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  9122. for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]);
  9123. for (int k = 0; k < 2; ++k) {
  9124. int nflip = 0;
  9125. uint8_t s = 0;
  9126. for (int i = 0; i < 8; ++i) {
  9127. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  9128. else {
  9129. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  9130. }
  9131. }
  9132. if (nflip%2) {
  9133. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  9134. for (int i = 1; i < 8; ++i) {
  9135. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  9136. if (ax < min) {
  9137. min = ax; imin = i;
  9138. }
  9139. }
  9140. xval[8*k+imin] = -xval[8*k+imin];
  9141. s ^= (1 << imin);
  9142. }
  9143. block_signs[k] = s & 127;
  9144. }
  9145. float max = xval[0];
  9146. for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]);
  9147. if (!max) {
  9148. scales[ib] = 0;
  9149. memset(L, 0, 16);
  9150. continue;
  9151. }
  9152. float best = 0;
  9153. float scale = max/(2*kMaxQ-1);
  9154. is_on_grid[0] = is_on_grid[1] = true;
  9155. for (int is = -9; is <= 9; ++is) {
  9156. float id = (2*kMaxQ-1+is*0.1f)/max;
  9157. float this_scale = 1/id;
  9158. for (int k = 0; k < 2; ++k) {
  9159. for (int i = 0; i < 8; ++i) {
  9160. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  9161. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  9162. }
  9163. uint16_t u = 0;
  9164. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  9165. int grid_index = kmap_q2xs[u];
  9166. is_on_grid_aux[k] = true;
  9167. if (grid_index < 0) {
  9168. is_on_grid_aux[k] = false;
  9169. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  9170. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  9171. }
  9172. }
  9173. float sumqx = 0, sumq2 = 0;
  9174. for (int i = 0; i < 16; ++i) {
  9175. float w = weight[i];
  9176. float q = 2*Laux[i] + 1;
  9177. sumqx += w*xval[i]*q;
  9178. sumq2 += w*q*q;
  9179. }
  9180. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  9181. scale = sumqx/sumq2; best = scale*sumqx;
  9182. for (int i = 0; i < 16; ++i) L[i] = Laux[i];
  9183. for (int k = 0; k < 2; ++k) is_on_grid[k] = is_on_grid_aux[k];
  9184. }
  9185. }
  9186. int n_not_ongrid = 0;
  9187. for (int k = 0; k < 2; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  9188. if (n_not_ongrid > 0 && scale > 0) {
  9189. float id = 1/scale;
  9190. for (int k = 0; k < 2; ++k) {
  9191. if (is_on_grid[k]) continue;
  9192. uint16_t u = 0;
  9193. for (int i = 0; i < 8; ++i) {
  9194. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  9195. l = MAX(0, MIN(kMaxQ-1, l));
  9196. u |= (l << 2*i);
  9197. L[8*k + i] = l;
  9198. }
  9199. int grid_index = kmap_q2xs[u];
  9200. if (grid_index < 0) {
  9201. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  9202. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  9203. }
  9204. }
  9205. float sumqx = 0, sumq2 = 0;
  9206. for (int i = 0; i < 16; ++i) {
  9207. float w = weight[i];
  9208. float q = 2*L[i] + 1;
  9209. sumqx += w*xval[i]*q;
  9210. sumq2 += w*q*q;
  9211. }
  9212. if (sumq2 > 0) scale = sumqx/sumq2;
  9213. }
  9214. if (scale < 0) {
  9215. scale = -scale;
  9216. for (int k = 0; k < 2; ++k) block_signs[k] = (~block_signs[k]) & 127;
  9217. }
  9218. for (int k = 0; k < 2; ++k) {
  9219. uint16_t u = 0;
  9220. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  9221. int grid_index = kmap_q2xs[u];
  9222. if (grid_index < 0) {
  9223. printf("Oops: found point %u not on grid:", u);
  9224. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  9225. printf("\n");
  9226. GGML_ASSERT(false);
  9227. }
  9228. q2[2*ib+k] = grid_index | (block_signs[k] << 9);
  9229. }
  9230. GGML_ASSERT(scale >= 0);
  9231. scales[ib] = scale;
  9232. max_scale = MAX(max_scale, scale);
  9233. }
  9234. if (!max_scale) {
  9235. memset(y[ibl].qs, 0, QK_K/4);
  9236. continue;
  9237. }
  9238. float d = max_scale/31;
  9239. y[ibl].d = GGML_FP32_TO_FP16(d);
  9240. float id = 1/d;
  9241. for (int ib = 0; ib < QK_K/16; ++ib) {
  9242. int l = nearest_int(0.5f*(id*scales[ib]-1));
  9243. l = MAX(0, MIN(15, l));
  9244. if (ib%2 == 0) y[ibl].scales[ib/2] = l;
  9245. else y[ibl].scales[ib/2] |= (l << 4);
  9246. }
  9247. memcpy(y[ibl].qs, q2, QK_K/4);
  9248. }
  9249. }
  9250. size_t quantize_iq2_xxs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  9251. (void)hist;
  9252. GGML_ASSERT(n_per_row%QK_K == 0);
  9253. int nblock = n_per_row/QK_K;
  9254. char * qrow = (char *)dst;
  9255. for (int row = 0; row < nrow; ++row) {
  9256. quantize_row_iq2_xxs_impl(src, qrow, n_per_row, quant_weights);
  9257. src += n_per_row;
  9258. qrow += nblock*sizeof(block_iq2_xxs);
  9259. }
  9260. return nrow * nblock * sizeof(block_iq2_xxs);
  9261. }
  9262. size_t quantize_iq2_xs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  9263. (void)hist;
  9264. GGML_ASSERT(n_per_row%QK_K == 0);
  9265. int nblock = n_per_row/QK_K;
  9266. char * qrow = (char *)dst;
  9267. for (int row = 0; row < nrow; ++row) {
  9268. quantize_row_iq2_xs_impl(src, qrow, n_per_row, quant_weights);
  9269. src += n_per_row;
  9270. qrow += nblock*sizeof(block_iq2_xs);
  9271. }
  9272. return nrow * nblock * sizeof(block_iq2_xs);
  9273. }
  9274. //
  9275. // ============================================= 3-bit using D4 lattice
  9276. //
  9277. typedef struct {
  9278. uint32_t * grid;
  9279. int * map;
  9280. uint16_t * neighbours;
  9281. } iq3_entry_t;
  9282. static iq3_entry_t iq3_data[2] = {
  9283. {NULL, NULL, NULL},
  9284. {NULL, NULL, NULL},
  9285. };
  9286. static inline int iq3_data_index(int grid_size) {
  9287. (void)grid_size;
  9288. GGML_ASSERT(grid_size == 256 || grid_size == 512);
  9289. return grid_size == 256 ? 0 : 1;
  9290. }
  9291. static int iq3_compare_func(const void * left, const void * right) {
  9292. const int * l = (const int *)left;
  9293. const int * r = (const int *)right;
  9294. return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0;
  9295. }
  9296. void iq3xs_init_impl(int grid_size) {
  9297. const int gindex = iq3_data_index(grid_size);
  9298. if (iq3_data[gindex].grid) {
  9299. return;
  9300. }
  9301. static const uint16_t kgrid_256[256] = {
  9302. 0, 2, 4, 9, 11, 15, 16, 18, 25, 34, 59, 61, 65, 67, 72, 74,
  9303. 81, 85, 88, 90, 97, 108, 120, 128, 130, 132, 137, 144, 146, 153, 155, 159,
  9304. 169, 175, 189, 193, 199, 200, 202, 213, 248, 267, 287, 292, 303, 315, 317, 321,
  9305. 327, 346, 362, 413, 436, 456, 460, 462, 483, 497, 513, 515, 520, 522, 529, 531,
  9306. 536, 538, 540, 551, 552, 576, 578, 585, 592, 594, 641, 643, 648, 650, 657, 664,
  9307. 698, 704, 706, 720, 729, 742, 758, 769, 773, 808, 848, 852, 870, 889, 901, 978,
  9308. 992, 1024, 1026, 1033, 1035, 1040, 1042, 1046, 1049, 1058, 1089, 1091, 1093, 1096, 1098, 1105,
  9309. 1112, 1139, 1143, 1144, 1152, 1154, 1161, 1167, 1168, 1170, 1183, 1184, 1197, 1217, 1224, 1228,
  9310. 1272, 1276, 1309, 1323, 1347, 1367, 1377, 1404, 1473, 1475, 1486, 1509, 1537, 1544, 1546, 1553,
  9311. 1555, 1576, 1589, 1594, 1600, 1602, 1616, 1625, 1636, 1638, 1665, 1667, 1672, 1685, 1706, 1722,
  9312. 1737, 1755, 1816, 1831, 1850, 1856, 1862, 1874, 1901, 1932, 1950, 1971, 2011, 2032, 2052, 2063,
  9313. 2077, 2079, 2091, 2095, 2172, 2192, 2207, 2208, 2224, 2230, 2247, 2277, 2308, 2345, 2356, 2389,
  9314. 2403, 2424, 2501, 2504, 2506, 2520, 2570, 2593, 2616, 2624, 2630, 2646, 2669, 2700, 2714, 2746,
  9315. 2754, 2795, 2824, 2835, 2839, 2874, 2882, 2905, 2984, 3028, 3042, 3092, 3108, 3110, 3124, 3153,
  9316. 3185, 3215, 3252, 3288, 3294, 3364, 3397, 3434, 3483, 3523, 3537, 3587, 3589, 3591, 3592, 3610,
  9317. 3626, 3670, 3680, 3722, 3749, 3754, 3776, 3789, 3803, 3824, 3857, 3873, 3904, 3906, 3924, 3992,
  9318. };
  9319. static const uint16_t kgrid_512[512] = {
  9320. 0, 1, 2, 5, 7, 8, 9, 10, 12, 14, 16, 17, 21, 27, 32, 34,
  9321. 37, 39, 41, 43, 48, 50, 57, 60, 63, 64, 65, 66, 68, 72, 73, 77,
  9322. 80, 83, 87, 89, 93, 100, 113, 117, 122, 128, 129, 133, 135, 136, 139, 142,
  9323. 145, 149, 152, 156, 162, 165, 167, 169, 171, 184, 187, 195, 201, 205, 208, 210,
  9324. 217, 219, 222, 228, 232, 234, 247, 249, 253, 256, 267, 271, 273, 276, 282, 288,
  9325. 291, 297, 312, 322, 324, 336, 338, 342, 347, 353, 357, 359, 374, 379, 390, 393,
  9326. 395, 409, 426, 441, 448, 450, 452, 464, 466, 470, 475, 488, 492, 512, 513, 514,
  9327. 516, 520, 521, 523, 525, 527, 528, 530, 537, 540, 542, 556, 558, 561, 570, 576,
  9328. 577, 579, 582, 584, 588, 593, 600, 603, 609, 616, 618, 632, 638, 640, 650, 653,
  9329. 655, 656, 660, 666, 672, 675, 685, 688, 698, 705, 708, 711, 712, 715, 721, 727,
  9330. 728, 732, 737, 754, 760, 771, 773, 778, 780, 793, 795, 802, 806, 808, 812, 833,
  9331. 840, 843, 849, 856, 858, 873, 912, 916, 919, 932, 934, 961, 963, 968, 970, 977,
  9332. 989, 993, 1010, 1016, 1024, 1025, 1027, 1029, 1031, 1032, 1034, 1036, 1038, 1041, 1043, 1047,
  9333. 1048, 1050, 1057, 1059, 1061, 1064, 1066, 1079, 1080, 1083, 1085, 1088, 1090, 1096, 1099, 1103,
  9334. 1106, 1109, 1113, 1116, 1122, 1129, 1153, 1156, 1159, 1169, 1171, 1176, 1183, 1185, 1195, 1199,
  9335. 1209, 1212, 1216, 1218, 1221, 1225, 1234, 1236, 1241, 1243, 1250, 1256, 1270, 1281, 1287, 1296,
  9336. 1299, 1306, 1309, 1313, 1338, 1341, 1348, 1353, 1362, 1375, 1376, 1387, 1400, 1408, 1410, 1415,
  9337. 1425, 1453, 1457, 1477, 1481, 1494, 1496, 1507, 1512, 1538, 1545, 1547, 1549, 1551, 1554, 1561,
  9338. 1563, 1565, 1570, 1572, 1575, 1577, 1587, 1593, 1601, 1603, 1605, 1612, 1617, 1619, 1632, 1648,
  9339. 1658, 1662, 1664, 1674, 1680, 1690, 1692, 1704, 1729, 1736, 1740, 1745, 1747, 1751, 1752, 1761,
  9340. 1763, 1767, 1773, 1787, 1795, 1801, 1806, 1810, 1817, 1834, 1840, 1844, 1857, 1864, 1866, 1877,
  9341. 1882, 1892, 1902, 1915, 1934, 1953, 1985, 1987, 2000, 2002, 2013, 2048, 2052, 2058, 2064, 2068,
  9342. 2071, 2074, 2081, 2088, 2104, 2114, 2119, 2121, 2123, 2130, 2136, 2141, 2147, 2153, 2157, 2177,
  9343. 2179, 2184, 2189, 2193, 2203, 2208, 2223, 2226, 2232, 2244, 2249, 2251, 2256, 2258, 2265, 2269,
  9344. 2304, 2306, 2324, 2335, 2336, 2361, 2373, 2375, 2385, 2418, 2443, 2460, 2480, 2504, 2509, 2520,
  9345. 2531, 2537, 2562, 2568, 2572, 2578, 2592, 2596, 2599, 2602, 2614, 2620, 2625, 2627, 2629, 2634,
  9346. 2641, 2650, 2682, 2688, 2697, 2707, 2712, 2718, 2731, 2754, 2759, 2760, 2775, 2788, 2793, 2805,
  9347. 2811, 2817, 2820, 2832, 2842, 2854, 2890, 2902, 2921, 2923, 2978, 3010, 3012, 3026, 3081, 3083,
  9348. 3085, 3097, 3099, 3120, 3136, 3152, 3159, 3188, 3210, 3228, 3234, 3245, 3250, 3256, 3264, 3276,
  9349. 3281, 3296, 3349, 3363, 3378, 3392, 3395, 3420, 3440, 3461, 3488, 3529, 3531, 3584, 3588, 3591,
  9350. 3600, 3602, 3614, 3616, 3628, 3634, 3650, 3657, 3668, 3683, 3685, 3713, 3716, 3720, 3726, 3729,
  9351. 3736, 3753, 3778, 3802, 3805, 3819, 3841, 3845, 3851, 3856, 3880, 3922, 3938, 3970, 3993, 4032,
  9352. };
  9353. const int kmap_size = 4096;
  9354. const int nwant = grid_size == 256 ? 2 : 3;
  9355. const uint16_t * kgrid = grid_size == 256 ? kgrid_256 : kgrid_512;
  9356. uint32_t * kgrid_q3xs;
  9357. int * kmap_q3xs;
  9358. uint16_t * kneighbors_q3xs;
  9359. printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size);
  9360. uint32_t * the_grid = (uint32_t *)malloc(grid_size*sizeof(uint32_t));
  9361. for (int k = 0; k < grid_size; ++k) {
  9362. int8_t * pos = (int8_t *)(the_grid + k);
  9363. for (int i = 0; i < 4; ++i) {
  9364. int l = (kgrid[k] >> 3*i) & 0x7;
  9365. pos[i] = 2*l + 1;
  9366. }
  9367. }
  9368. kgrid_q3xs = the_grid;
  9369. iq3_data[gindex].grid = the_grid;
  9370. kmap_q3xs = (int *)malloc(kmap_size*sizeof(int));
  9371. iq3_data[gindex].map = kmap_q3xs;
  9372. for (int i = 0; i < kmap_size; ++i) kmap_q3xs[i] = -1;
  9373. uint32_t aux32;
  9374. uint8_t * aux8 = (uint8_t *)&aux32;
  9375. for (int i = 0; i < grid_size; ++i) {
  9376. aux32 = kgrid_q3xs[i];
  9377. uint16_t index = 0;
  9378. for (int k=0; k<4; ++k) {
  9379. uint16_t q = (aux8[k] - 1)/2;
  9380. index |= (q << 3*k);
  9381. }
  9382. kmap_q3xs[index] = i;
  9383. }
  9384. int8_t pos[4];
  9385. int * dist2 = (int *)malloc(2*grid_size*sizeof(int));
  9386. int num_neighbors = 0, num_not_in_map = 0;
  9387. for (int i = 0; i < kmap_size; ++i) {
  9388. if (kmap_q3xs[i] >= 0) continue;
  9389. ++num_not_in_map;
  9390. for (int k = 0; k < 4; ++k) {
  9391. int l = (i >> 3*k) & 0x7;
  9392. pos[k] = 2*l + 1;
  9393. }
  9394. for (int j = 0; j < grid_size; ++j) {
  9395. const int8_t * pg = (const int8_t *)(kgrid_q3xs + j);
  9396. int d2 = 0;
  9397. for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  9398. dist2[2*j+0] = d2;
  9399. dist2[2*j+1] = j;
  9400. }
  9401. qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func);
  9402. int n = 0; int d2 = dist2[0];
  9403. int nhave = 1;
  9404. for (int j = 0; j < grid_size; ++j) {
  9405. if (dist2[2*j] > d2) {
  9406. if (nhave == nwant) break;
  9407. d2 = dist2[2*j];
  9408. ++nhave;
  9409. }
  9410. ++n;
  9411. }
  9412. num_neighbors += n;
  9413. }
  9414. printf("%s: %d neighbours in total\n", __func__, num_neighbors);
  9415. kneighbors_q3xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t));
  9416. iq3_data[gindex].neighbours = kneighbors_q3xs;
  9417. int counter = 0;
  9418. for (int i = 0; i < kmap_size; ++i) {
  9419. if (kmap_q3xs[i] >= 0) continue;
  9420. for (int k = 0; k < 4; ++k) {
  9421. int l = (i >> 3*k) & 0x7;
  9422. pos[k] = 2*l + 1;
  9423. }
  9424. for (int j = 0; j < grid_size; ++j) {
  9425. const int8_t * pg = (const int8_t *)(kgrid_q3xs + j);
  9426. int d2 = 0;
  9427. for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  9428. dist2[2*j+0] = d2;
  9429. dist2[2*j+1] = j;
  9430. }
  9431. qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func);
  9432. kmap_q3xs[i] = -(counter + 1);
  9433. int d2 = dist2[0];
  9434. uint16_t * start = &kneighbors_q3xs[counter++];
  9435. int n = 0, nhave = 1;
  9436. for (int j = 0; j < grid_size; ++j) {
  9437. if (dist2[2*j] > d2) {
  9438. if (nhave == nwant) break;
  9439. d2 = dist2[2*j];
  9440. ++nhave;
  9441. }
  9442. kneighbors_q3xs[counter++] = dist2[2*j+1];
  9443. ++n;
  9444. }
  9445. *start = n;
  9446. }
  9447. free(dist2);
  9448. }
  9449. void iq3xs_free_impl(int grid_size) {
  9450. GGML_ASSERT(grid_size == 256 || grid_size == 512);
  9451. const int gindex = iq3_data_index(grid_size);
  9452. if (iq3_data[gindex].grid) {
  9453. free(iq3_data[gindex].grid); iq3_data[gindex].grid = NULL;
  9454. free(iq3_data[gindex].map); iq3_data[gindex].map = NULL;
  9455. free(iq3_data[gindex].neighbours); iq3_data[gindex].neighbours = NULL;
  9456. }
  9457. }
  9458. static int iq3_find_best_neighbour(const uint16_t * restrict neighbours, const uint32_t * restrict grid,
  9459. const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) {
  9460. int num_neighbors = neighbours[0];
  9461. GGML_ASSERT(num_neighbors > 0);
  9462. float best_d2 = FLT_MAX;
  9463. int grid_index = -1;
  9464. for (int j = 1; j <= num_neighbors; ++j) {
  9465. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  9466. float d2 = 0;
  9467. for (int i = 0; i < 4; ++i) {
  9468. float q = pg[i];
  9469. float diff = scale*q - xval[i];
  9470. d2 += weight[i]*diff*diff;
  9471. }
  9472. if (d2 < best_d2) {
  9473. best_d2 = d2; grid_index = neighbours[j];
  9474. }
  9475. }
  9476. GGML_ASSERT(grid_index >= 0);
  9477. const int8_t * pg = (const int8_t *)(grid + grid_index);
  9478. for (int i = 0; i < 4; ++i) L[i] = (pg[i] - 1)/2;
  9479. return grid_index;
  9480. }
  9481. static void quantize_row_iq3_xxs_impl(int grid_size, const float * restrict x, void * restrict vy, int n,
  9482. const float * restrict quant_weights) {
  9483. const int gindex = iq3_data_index(grid_size);
  9484. const uint32_t * kgrid_q3xs = iq3_data[gindex].grid;
  9485. const int * kmap_q3xs = iq3_data[gindex].map;
  9486. const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours;
  9487. //GGML_ASSERT(quant_weights && "missing quantization weights");
  9488. GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?");
  9489. GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?");
  9490. GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?");
  9491. GGML_ASSERT(n%QK_K == 0);
  9492. const int kMaxQ = 8;
  9493. const int nbl = n/QK_K;
  9494. ggml_fp16_t * dh;
  9495. uint8_t * qs;
  9496. int block_size;
  9497. if (grid_size == 256) {
  9498. block_iq3_xxs * y = vy;
  9499. dh = &y->d;
  9500. qs = y->qs;
  9501. block_size = sizeof(block_iq3_xxs);
  9502. } else {
  9503. block_iq3_s * y = vy;
  9504. dh = &y->d;
  9505. qs = y->qs;
  9506. block_size = sizeof(block_iq3_s);
  9507. }
  9508. int quant_size = block_size - sizeof(ggml_fp16_t);
  9509. float scales[QK_K/32];
  9510. float weight[32];
  9511. float xval[32];
  9512. int8_t L[32];
  9513. int8_t Laux[32];
  9514. float waux[32];
  9515. bool is_on_grid[8];
  9516. bool is_on_grid_aux[8];
  9517. uint8_t block_signs[8];
  9518. uint8_t q3[3*(QK_K/8)+QK_K/32];
  9519. uint32_t * scales_and_signs = (uint32_t *)(q3 + QK_K/4);
  9520. uint8_t * qh = q3 + 3*(QK_K/8);
  9521. for (int ibl = 0; ibl < nbl; ++ibl) {
  9522. dh[0] = GGML_FP32_TO_FP16(0.f);
  9523. memset(q3, 0, 3*QK_K/8+QK_K/32);
  9524. float max_scale = 0;
  9525. const float * xbl = x + QK_K*ibl;
  9526. float sumx2 = 0;
  9527. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  9528. float sigma2 = 2*sumx2/QK_K;
  9529. for (int ib = 0; ib < QK_K/32; ++ib) {
  9530. const float * xb = xbl + 32*ib;
  9531. if (quant_weights) {
  9532. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  9533. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  9534. } else {
  9535. for (int i = 0; i < 32; ++i) weight[i] = xb[i]*xb[i];
  9536. }
  9537. for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
  9538. for (int k = 0; k < 4; ++k) {
  9539. int nflip = 0;
  9540. uint8_t s = 0;
  9541. for (int i = 0; i < 8; ++i) {
  9542. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  9543. else {
  9544. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  9545. }
  9546. }
  9547. if (nflip%2) {
  9548. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  9549. for (int i = 1; i < 8; ++i) {
  9550. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  9551. if (ax < min) {
  9552. min = ax; imin = i;
  9553. }
  9554. }
  9555. xval[8*k+imin] = -xval[8*k+imin];
  9556. s ^= (1 << imin);
  9557. }
  9558. block_signs[k] = s & 127;
  9559. }
  9560. float max = xval[0];
  9561. for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]);
  9562. if (!max) {
  9563. scales[ib] = 0;
  9564. memset(L, 0, 32);
  9565. continue;
  9566. }
  9567. float best = 0;
  9568. float scale = max/(2*kMaxQ-1);
  9569. for (int is = -15; is <= 15; ++is) {
  9570. float id = (2*kMaxQ-1+is*0.2f)/max;
  9571. float this_scale = 1/id;
  9572. for (int k = 0; k < 8; ++k) {
  9573. for (int i = 0; i < 4; ++i) {
  9574. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  9575. Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l));
  9576. }
  9577. uint16_t u = 0;
  9578. for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i);
  9579. int grid_index = kmap_q3xs[u];
  9580. is_on_grid_aux[k] = true;
  9581. if (grid_index < 0) {
  9582. is_on_grid_aux[k] = false;
  9583. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  9584. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k);
  9585. }
  9586. }
  9587. float sumqx = 0, sumq2 = 0;
  9588. for (int i = 0; i < 32; ++i) {
  9589. float w = weight[i];
  9590. float q = 2*Laux[i] + 1;
  9591. sumqx += w*xval[i]*q;
  9592. sumq2 += w*q*q;
  9593. }
  9594. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  9595. scale = sumqx/sumq2; best = scale*sumqx;
  9596. for (int i = 0; i < 32; ++i) L[i] = Laux[i];
  9597. for (int k = 0; k < 8; ++k) is_on_grid[k] = is_on_grid_aux[k];
  9598. }
  9599. }
  9600. int n_not_ongrid = 0;
  9601. for (int k = 0; k < 8; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  9602. if (n_not_ongrid > 0 && scale > 0) {
  9603. float id = 1/scale;
  9604. for (int k = 0; k < 8; ++k) {
  9605. if (is_on_grid[k]) continue;
  9606. uint16_t u = 0;
  9607. for (int i = 0; i < 4; ++i) {
  9608. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  9609. l = MAX(0, MIN(kMaxQ-1, l));
  9610. u |= (l << 3*i);
  9611. }
  9612. int grid_index = kmap_q3xs[u];
  9613. if (grid_index < 0) {
  9614. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  9615. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k);
  9616. }
  9617. const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index);
  9618. for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2;
  9619. }
  9620. float sumqx = 0, sumq2 = 0;
  9621. for (int i = 0; i < 32; ++i) {
  9622. float w = weight[i];
  9623. float q = 2*L[i] + 1;
  9624. sumqx += w*xval[i]*q;
  9625. sumq2 += w*q*q;
  9626. }
  9627. if (sumq2 > 0) scale = sumqx/sumq2;
  9628. }
  9629. if (scale < 0) {
  9630. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  9631. // and correspondingly flip quant signs.
  9632. scale = -scale;
  9633. for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127;
  9634. }
  9635. for (int k = 0; k < 8; ++k) {
  9636. uint16_t u = 0;
  9637. for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i);
  9638. int grid_index = kmap_q3xs[u];
  9639. if (grid_index < 0) {
  9640. printf("Oops: found point %u not on grid:", u);
  9641. for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]);
  9642. printf("\n");
  9643. GGML_ASSERT(false);
  9644. }
  9645. if (grid_size == 256) {
  9646. q3[8*ib+k] = grid_index;
  9647. } else {
  9648. q3[8*ib+k] = grid_index & 255;
  9649. qh[ib] |= ((grid_index >> 8) << k);
  9650. }
  9651. }
  9652. scales_and_signs[ib] = block_signs[0] | (block_signs[1] << 7) | (block_signs[2] << 14) | (block_signs[3] << 21);
  9653. GGML_ASSERT(scale >= 0);
  9654. scales[ib] = scale;
  9655. max_scale = MAX(max_scale, scale);
  9656. }
  9657. if (!max_scale) {
  9658. memset(qs, 0, quant_size);
  9659. dh += block_size/sizeof(ggml_fp16_t);
  9660. qs += block_size;
  9661. continue;
  9662. }
  9663. float d = max_scale/31;
  9664. dh[0] = GGML_FP32_TO_FP16(d * 1.0125f); // small improvement via this fudge factor
  9665. float id = 1/d;
  9666. for (int ib = 0; ib < QK_K/32; ++ib) {
  9667. int l = nearest_int(0.5f*(id*scales[ib]-1));
  9668. l = MAX(0, MIN(15, l));
  9669. scales_and_signs[ib] |= ((uint32_t)l << 28);
  9670. }
  9671. memcpy(qs, q3, quant_size);
  9672. dh += block_size/sizeof(ggml_fp16_t);
  9673. qs += block_size;
  9674. }
  9675. }
  9676. size_t quantize_iq3_xxs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  9677. (void)hist;
  9678. GGML_ASSERT(n_per_row%QK_K == 0);
  9679. int nblock = n_per_row/QK_K;
  9680. char * qrow = (char *)dst;
  9681. for (int row = 0; row < nrow; ++row) {
  9682. quantize_row_iq3_xxs_impl(256, src, qrow, n_per_row, quant_weights);
  9683. src += n_per_row;
  9684. qrow += nblock*sizeof(block_iq3_xxs);
  9685. }
  9686. return nrow * nblock * sizeof(block_iq3_xxs);
  9687. }
  9688. void quantize_row_iq3_xxs(const float * restrict x, void * restrict vy, int k) {
  9689. assert(k % QK_K == 0);
  9690. block_iq3_xxs * restrict y = vy;
  9691. quantize_row_iq3_xxs_reference(x, y, k);
  9692. }
  9693. void quantize_row_iq3_xxs_reference(const float * restrict x, block_iq3_xxs * restrict y, int k) {
  9694. assert(k % QK_K == 0);
  9695. quantize_row_iq3_xxs_impl(256, x, y, k, NULL);
  9696. }
  9697. static void quantize_row_iq3_s_impl(int block_size, const float * restrict x, void * restrict vy, int n,
  9698. const float * restrict quant_weights,
  9699. float * scales,
  9700. float * weight,
  9701. float * xval,
  9702. int8_t * L,
  9703. int8_t * Laux,
  9704. float * waux,
  9705. bool * is_on_grid,
  9706. bool * is_on_grid_aux,
  9707. uint8_t * block_signs) {
  9708. const int gindex = iq3_data_index(512);
  9709. const uint32_t * kgrid_q3xs = iq3_data[gindex].grid;
  9710. const int * kmap_q3xs = iq3_data[gindex].map;
  9711. const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours;
  9712. //GGML_ASSERT(quant_weights && "missing quantization weights");
  9713. GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?");
  9714. GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?");
  9715. GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?");
  9716. GGML_ASSERT(n%QK_K == 0);
  9717. const int kMaxQ = 8;
  9718. const int nbl = n/QK_K;
  9719. block_iq3_s * y = vy;
  9720. const int bs4 = block_size/4;
  9721. const int bs8 = block_size/8;
  9722. for (int ibl = 0; ibl < nbl; ++ibl) {
  9723. memset(&y[ibl], 0, sizeof(block_iq3_s));
  9724. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  9725. uint8_t * qs = y[ibl].qs;
  9726. uint8_t * qh = y[ibl].qh;
  9727. uint8_t * signs = y[ibl].signs;
  9728. float max_scale = 0;
  9729. const float * xbl = x + QK_K*ibl;
  9730. float sumx2 = 0;
  9731. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  9732. float sigma2 = 2*sumx2/QK_K;
  9733. for (int ib = 0; ib < QK_K/block_size; ++ib) {
  9734. const float * xb = xbl + block_size*ib;
  9735. if (quant_weights) {
  9736. const float * qw = quant_weights + QK_K*ibl + block_size*ib;
  9737. for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  9738. } else {
  9739. for (int i = 0; i < block_size; ++i) weight[i] = xb[i]*xb[i];
  9740. }
  9741. for (int i = 0; i < block_size; ++i) waux[i] = sqrtf(weight[i]);
  9742. for (int k = 0; k < bs8; ++k) {
  9743. uint8_t s = 0;
  9744. for (int i = 0; i < 8; ++i) {
  9745. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  9746. else {
  9747. xval[8*k + i] = -xb[8*k + i]; s |= (1 << i);
  9748. }
  9749. }
  9750. block_signs[k] = s;
  9751. }
  9752. float max = xval[0];
  9753. for (int i = 1; i < block_size; ++i) max = MAX(max, xval[i]);
  9754. if (!max) {
  9755. scales[ib] = 0;
  9756. continue;
  9757. }
  9758. float best = 0;
  9759. float scale = max/(2*kMaxQ-1);
  9760. for (int is = -15; is <= 15; ++is) {
  9761. float id = (2*kMaxQ-1+is*0.2f)/max;
  9762. float this_scale = 1/id;
  9763. for (int k = 0; k < bs4; ++k) {
  9764. for (int i = 0; i < 4; ++i) {
  9765. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  9766. Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l));
  9767. }
  9768. uint16_t u = 0;
  9769. for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i);
  9770. int grid_index = kmap_q3xs[u];
  9771. is_on_grid_aux[k] = true;
  9772. if (grid_index < 0) {
  9773. is_on_grid_aux[k] = false;
  9774. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  9775. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k);
  9776. }
  9777. }
  9778. float sumqx = 0, sumq2 = 0;
  9779. for (int i = 0; i < block_size; ++i) {
  9780. float w = weight[i];
  9781. float q = 2*Laux[i] + 1;
  9782. sumqx += w*xval[i]*q;
  9783. sumq2 += w*q*q;
  9784. }
  9785. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  9786. scale = sumqx/sumq2; best = scale*sumqx;
  9787. for (int i = 0; i < block_size; ++i) L[i] = Laux[i];
  9788. for (int k = 0; k < bs4; ++k) is_on_grid[k] = is_on_grid_aux[k];
  9789. }
  9790. }
  9791. int n_not_ongrid = 0;
  9792. for (int k = 0; k < bs4; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  9793. if (n_not_ongrid > 0 && scale > 0) {
  9794. float id = 1/scale;
  9795. for (int k = 0; k < bs4; ++k) {
  9796. if (is_on_grid[k]) continue;
  9797. uint16_t u = 0;
  9798. for (int i = 0; i < 4; ++i) {
  9799. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  9800. l = MAX(0, MIN(kMaxQ-1, l));
  9801. u |= (l << 3*i);
  9802. }
  9803. int grid_index = kmap_q3xs[u];
  9804. if (grid_index < 0) {
  9805. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  9806. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k);
  9807. }
  9808. const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index);
  9809. for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2;
  9810. }
  9811. float sumqx = 0, sumq2 = 0;
  9812. for (int i = 0; i < block_size; ++i) {
  9813. float w = weight[i];
  9814. float q = 2*L[i] + 1;
  9815. sumqx += w*xval[i]*q;
  9816. sumq2 += w*q*q;
  9817. }
  9818. if (sumq2 > 0) scale = sumqx/sumq2;
  9819. }
  9820. if (scale < 0) {
  9821. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  9822. // and correspondingly flip quant signs.
  9823. scale = -scale;
  9824. for (int k = 0; k < bs8; ++k) block_signs[k] = ~block_signs[k];
  9825. }
  9826. for (int k = 0; k < bs4; ++k) {
  9827. uint16_t u = 0;
  9828. for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i);
  9829. int grid_index = kmap_q3xs[u];
  9830. if (grid_index < 0) {
  9831. printf("Oops: found point %u not on grid:", u);
  9832. for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]);
  9833. printf("\n");
  9834. GGML_ASSERT(false);
  9835. }
  9836. qs[k] = grid_index & 255;
  9837. qh[(ib*bs4+k)/8] |= ((grid_index >> 8) << ((ib*bs4+k)%8));
  9838. }
  9839. qs += bs4;
  9840. for (int k = 0; k < bs8; ++k) signs[k] = block_signs[k];
  9841. signs += bs8;
  9842. GGML_ASSERT(scale >= 0);
  9843. scales[ib] = scale;
  9844. max_scale = MAX(max_scale, scale);
  9845. }
  9846. if (!max_scale) {
  9847. continue;
  9848. }
  9849. float d = max_scale/31;
  9850. y[ibl].d = GGML_FP32_TO_FP16(d);
  9851. float id = 1/d;
  9852. for (int ib = 0; ib < QK_K/block_size; ib += 2) {
  9853. int l1 = nearest_int(0.5f*(id*scales[ib+0]-1));
  9854. l1 = MAX(0, MIN(15, l1));
  9855. int l2 = nearest_int(0.5f*(id*scales[ib+1]-1));
  9856. l2 = MAX(0, MIN(15, l2));
  9857. y[ibl].scales[ib/2] = l1 | (l2 << 4);
  9858. }
  9859. }
  9860. }
  9861. #define IQ3S_BLOCK_SIZE 32
  9862. size_t quantize_iq3_s(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  9863. (void)hist;
  9864. GGML_ASSERT(n_per_row%QK_K == 0);
  9865. int nblock = n_per_row/QK_K;
  9866. float scales[QK_K/IQ3S_BLOCK_SIZE];
  9867. float weight[IQ3S_BLOCK_SIZE];
  9868. float xval[IQ3S_BLOCK_SIZE];
  9869. int8_t L[IQ3S_BLOCK_SIZE];
  9870. int8_t Laux[IQ3S_BLOCK_SIZE];
  9871. float waux[IQ3S_BLOCK_SIZE];
  9872. bool is_on_grid[IQ3S_BLOCK_SIZE/4];
  9873. bool is_on_grid_aux[IQ3S_BLOCK_SIZE/4];
  9874. uint8_t block_signs[IQ3S_BLOCK_SIZE/8];
  9875. char * qrow = (char *)dst;
  9876. for (int row = 0; row < nrow; ++row) {
  9877. quantize_row_iq3_s_impl(IQ3S_BLOCK_SIZE, src, qrow, n_per_row, quant_weights,
  9878. scales, weight, xval, L, Laux, waux, is_on_grid, is_on_grid_aux, block_signs);
  9879. src += n_per_row;
  9880. qrow += nblock*sizeof(block_iq3_s);
  9881. }
  9882. return nrow * nblock * sizeof(block_iq3_s);
  9883. }
  9884. void quantize_row_iq3_s(const float * restrict x, void * restrict vy, int k) {
  9885. assert(k % QK_K == 0);
  9886. block_iq3_s * restrict y = vy;
  9887. quantize_row_iq3_s_reference(x, y, k);
  9888. }
  9889. void quantize_row_iq3_s_reference(const float * restrict x, block_iq3_s * restrict y, int k) {
  9890. assert(k % QK_K == 0);
  9891. quantize_iq3_s(x, y, 1, k, NULL, NULL);
  9892. }
  9893. // =================================== 1.5 bpw ===================================================
  9894. static int iq1_find_best_neighbour(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
  9895. const float * restrict xval, const float * restrict weight, float * scale, int8_t * restrict L, int ngrid) {
  9896. int num_neighbors = neighbours[0];
  9897. GGML_ASSERT(num_neighbors > 0);
  9898. float best_score = 0;
  9899. int grid_index = -1;
  9900. for (int j = 1; j <= num_neighbors; ++j) {
  9901. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  9902. float sumqx = 0, sumq2 = 0;
  9903. for (int i = 0; i < 8; ++i) {
  9904. float q = (pg[i] - 3)/2;
  9905. float w = weight[i];
  9906. sumqx += w*q*xval[i];
  9907. sumq2 += w*q*q;
  9908. }
  9909. if (sumqx > 0 && sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  9910. *scale = sumqx/sumq2; best_score = *scale * sumqx;
  9911. grid_index = neighbours[j];
  9912. }
  9913. }
  9914. if (grid_index < 0) {
  9915. for (int i = 0; i < ngrid; ++i) {
  9916. const int8_t * grid_i = (const int8_t *)(grid + i);
  9917. float sumqx = 0, sumq2 = 0;
  9918. for (int j = 0; j < 8; ++j) {
  9919. float w = weight[j];
  9920. float q = (grid_i[j] - 3)/2;
  9921. sumqx += w*q*xval[j];
  9922. sumq2 += w*q*q;
  9923. }
  9924. if (sumqx > 0 && sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  9925. *scale = sumqx/sumq2; best_score = *scale*sumqx;
  9926. grid_index = i;
  9927. }
  9928. }
  9929. }
  9930. if (grid_index < 0) {
  9931. printf("Oops, did not find grid point\n");
  9932. printf("Have %d neighbours\n", num_neighbors);
  9933. for (int j = 1; j <= num_neighbors; ++j) {
  9934. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  9935. float sumqx = 0, sumq2 = 0;
  9936. for (int i = 0; i < 8; ++i) {
  9937. float q = (pg[i] - 3)/2;
  9938. float w = weight[i];
  9939. sumqx += w*q*xval[i];
  9940. sumq2 += w*q*q;
  9941. }
  9942. printf(" neighbour %d: sumqx = %g sumq2 = %g\n", j, (double)sumqx, (double)sumq2);
  9943. }
  9944. }
  9945. GGML_ASSERT(grid_index >= 0);
  9946. //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  9947. *scale *= 1.05f; // This is a fudge factor. Don't ask me why it improves the result.
  9948. //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  9949. const int8_t * pg = (const int8_t *)(grid + grid_index);
  9950. for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
  9951. return grid_index;
  9952. }
  9953. static int iq1_sort_helper(const void * left, const void * right) {
  9954. const float * l = left;
  9955. const float * r = right;
  9956. return *l < *r ? -1 : *l > *r ? 1 : 0;
  9957. }
  9958. static void quantize_row_iq1_s_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
  9959. const int gindex = iq2_data_index(GGML_TYPE_IQ1_S);
  9960. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  9961. const int * kmap_q2xs = iq2_data[gindex].map;
  9962. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  9963. GGML_ASSERT(quant_weights && "missing quantization weights");
  9964. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  9965. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  9966. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  9967. GGML_ASSERT(n%QK_K == 0);
  9968. const int nbl = n/QK_K;
  9969. block_iq1_s * y = vy;
  9970. float scales[QK_K/8];
  9971. float weight[8];
  9972. int8_t L[8];
  9973. float sumx[9];
  9974. float sumw[9];
  9975. float pairs[16];
  9976. int * idx = (int *)(pairs + 1);
  9977. uint8_t hbit[QK_K/8];
  9978. for (int ibl = 0; ibl < nbl; ++ibl) {
  9979. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  9980. memset(y[ibl].qs, 0, QK_K/8);
  9981. memset(y[ibl].scales, 0, QK_K/16);
  9982. float max_scale = 0;
  9983. const float * xbl = x + QK_K*ibl;
  9984. float sumx2 = 0;
  9985. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  9986. float sigma2 = sumx2/QK_K;
  9987. for (int ib = 0; ib < QK_K/8; ++ib) {
  9988. const float * xb = xbl + 8*ib;
  9989. const float * qw = quant_weights + QK_K*ibl + 8*ib;
  9990. for (int i = 0; i < 8; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  9991. float max = fabsf(xb[0]);
  9992. for (int i = 1; i < 8; ++i) max = MAX(max, fabsf(xb[i]));
  9993. if (!max) {
  9994. scales[ib] = 0;
  9995. memset(L, 1, 8);
  9996. continue;
  9997. }
  9998. // Here we solve exactly the sum of squared difference (SSD) weighted minimization problem.
  9999. // With just 3 allowed quant values (-1, 0, 1), we can search exhaustively for the two
  10000. // boundaries that split the weights xb[i] into 3 groups. To do so, we sort the weights
  10001. // in ascending order, compute Si = sum[weight[j] xb[j], j = 0...i] and
  10002. // Wi = sum[weight[j], j = 0...i], and use these to quckly get get the optimum scale
  10003. // for each possible and score for each split.
  10004. for (int j = 0; j < 8; ++j) {
  10005. pairs[2*j] = xb[j];
  10006. idx[2*j] = j;
  10007. }
  10008. qsort(pairs, 8, 2*sizeof(float), iq1_sort_helper);
  10009. {
  10010. sumx[0] = sumw[0] = 0;
  10011. for (int j = 0; j < 8; ++j) {
  10012. int i = idx[2*j];
  10013. sumx[j+1] = sumx[j] + weight[i]*xb[i];
  10014. sumw[j+1] = sumw[j] + weight[i];
  10015. }
  10016. }
  10017. float best_score = 0, scale = max;
  10018. int besti1 = 0, besti2 = 0;
  10019. for (int i1 = 0; i1 <= 8; ++i1) {
  10020. for (int i2 = i1; i2 <= 8; ++i2) {
  10021. float sumqx = -(sumx[i1] - sumx[0]) + (sumx[8] - sumx[i2]);
  10022. float sumq2 = (sumw[i1] - sumw[0]) + (sumw[8] - sumw[i2]);
  10023. if (sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  10024. scale = sumqx/sumq2; best_score = scale*sumqx;
  10025. besti1 = i1; besti2 = i2;
  10026. }
  10027. }
  10028. }
  10029. for (int j = 0; j < besti1; ++j) L[idx[2*j]] = 0;
  10030. for (int j = besti1; j < besti2; ++j) L[idx[2*j]] = 1;
  10031. for (int j = besti2; j < 8; ++j) L[idx[2*j]] = 2;
  10032. if (scale < 0) {
  10033. for (int j = 0; j < 8; ++j) L[j] = 2 - L[j];
  10034. scale = -scale;
  10035. }
  10036. // Now we check if the solution found above corresponds to a grid point and, if not, use a neighbouring
  10037. // grid point that minimizes SSD.
  10038. uint16_t u = 0;
  10039. for (int j = 0; j < 8; ++j) u |= (L[j] << 2*j);
  10040. int grid_index = kmap_q2xs[u];
  10041. if (grid_index < 0) {
  10042. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  10043. grid_index = iq1_find_best_neighbour(neighbours, kgrid_q2xs, xb, weight, &scale, L, NGRID_IQ2XXS);
  10044. GGML_ASSERT(grid_index >= 0);
  10045. }
  10046. y[ibl].qs[ib] = grid_index & 255;
  10047. hbit[ib] = grid_index >> 8;
  10048. GGML_ASSERT(scale >= 0);
  10049. scales[ib] = scale;
  10050. max_scale = MAX(max_scale, scale);
  10051. }
  10052. if (!max_scale) {
  10053. memset(y[ibl].qs, 0, QK_K/8);
  10054. continue;
  10055. }
  10056. float d = max_scale/15;
  10057. y[ibl].d = GGML_FP32_TO_FP16(d*1.085f); // 1.085f is another fudge factor. Don't ask me why it is needed.
  10058. float id = 1/d;
  10059. for (int ib = 0; ib < QK_K/8; ++ib) {
  10060. int l = nearest_int(0.5f*(id*scales[ib]-1));
  10061. l = MAX(0, MIN(7, l));
  10062. if (hbit[ib]) l |= 8;
  10063. y[ibl].scales[ib/2] |= (l << 4*(ib%2));
  10064. }
  10065. }
  10066. }
  10067. size_t quantize_iq1_s(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  10068. (void)hist;
  10069. GGML_ASSERT(n_per_row%QK_K == 0);
  10070. int nblock = n_per_row/QK_K;
  10071. char * qrow = (char *)dst;
  10072. for (int row = 0; row < nrow; ++row) {
  10073. quantize_row_iq1_s_impl(src, qrow, n_per_row, quant_weights);
  10074. src += n_per_row;
  10075. qrow += nblock*sizeof(block_iq1_s);
  10076. }
  10077. return nrow * nblock * sizeof(block_iq1_s);
  10078. }
  10079. // ============================ 4-bit non-linear quants
  10080. static inline int best_index_int8(int n, const int8_t * val, float x) {
  10081. if (x <= val[0]) return 0;
  10082. if (x >= val[n-1]) return n-1;
  10083. int ml = 0, mu = n-1;
  10084. while (mu-ml > 1) {
  10085. int mav = (ml+mu)/2;
  10086. if (x < val[mav]) mu = mav; else ml = mav;
  10087. }
  10088. return x - val[mu-1] < val[mu] - x ? mu-1 : mu;
  10089. }
  10090. static void quantize_row_iq4_nl_impl(const int super_block_size, const int block_size, const float * GGML_RESTRICT x,
  10091. ggml_fp16_t * dh, uint8_t * q4, uint16_t * scales_h, uint8_t * scales_l,
  10092. float * scales, float * weight, uint8_t * L,
  10093. const int8_t * values,
  10094. const float * quant_weights) {
  10095. const int ntry = 7;
  10096. float sigma2 = 0;
  10097. for (int j = 0; j < super_block_size; ++j) sigma2 += x[j]*x[j];
  10098. sigma2 *= 2.f/super_block_size;
  10099. memset(q4, 0, super_block_size/2);
  10100. dh[0] = GGML_FP32_TO_FP16(0.f);
  10101. float max_scale = 0, amax_scale = 0;
  10102. for (int ib = 0; ib < super_block_size/block_size; ++ib) {
  10103. const float * xb = x + ib*block_size;
  10104. if (quant_weights) {
  10105. const float * qw = quant_weights + ib*block_size;
  10106. for (int j = 0; j < block_size; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  10107. } else {
  10108. for (int j = 0; j < block_size; ++j) weight[j] = xb[j]*xb[j];
  10109. }
  10110. float amax = 0, max = 0;
  10111. for (int j = 0; j < block_size; ++j) {
  10112. float ax = fabsf(xb[j]);
  10113. if (ax > amax) {
  10114. amax = ax; max = xb[j];
  10115. }
  10116. }
  10117. if (!amax) {
  10118. scales[ib] = 0;
  10119. continue;
  10120. }
  10121. float d = -max/values[0];
  10122. float id = 1/d;
  10123. float sumqx = 0, sumq2 = 0;
  10124. for (int j = 0; j < block_size; ++j) {
  10125. float al = id*xb[j];
  10126. int l = best_index_int8(16, values, al);
  10127. float q = values[l];
  10128. float w = weight[j];
  10129. sumqx += w*q*xb[j];
  10130. sumq2 += w*q*q;
  10131. }
  10132. d = sumqx/sumq2;
  10133. float best = d*sumqx;
  10134. for (int itry = -ntry; itry <= ntry; ++itry) {
  10135. id = (itry + values[0])/max;
  10136. sumqx = sumq2 = 0;
  10137. for (int j = 0; j < block_size; ++j) {
  10138. float al = id*xb[j];
  10139. int l = best_index_int8(16, values, al);
  10140. float q = values[l];
  10141. float w = weight[j];
  10142. sumqx += w*q*xb[j];
  10143. sumq2 += w*q*q;
  10144. }
  10145. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  10146. d = sumqx/sumq2; best = d * sumqx;
  10147. }
  10148. }
  10149. scales[ib] = d;
  10150. float abs_d = fabsf(d);
  10151. if (abs_d > amax_scale) {
  10152. amax_scale = abs_d; max_scale = d;
  10153. }
  10154. }
  10155. if (super_block_size/block_size > 1) {
  10156. int nb = super_block_size/block_size;
  10157. memset(scales_h, 0, ((nb+7)/8)*sizeof(uint16_t));
  10158. float d = -max_scale/32;
  10159. dh[0] = GGML_FP32_TO_FP16(d);
  10160. float id = d ? 1/d : 0.f;
  10161. for (int ib = 0; ib < super_block_size/block_size; ++ib) {
  10162. int l = nearest_int(id*scales[ib]);
  10163. l = MAX(-32, MIN(31, l));
  10164. float dl = d * l;
  10165. float idl = dl ? 1/dl : 0.f;
  10166. uint8_t * Lb = L + ib*block_size;
  10167. const float * xb = x + ib*block_size;
  10168. for (int j = 0; j < block_size; ++j) {
  10169. Lb[j] = best_index_int8(16, values, idl*xb[j]);
  10170. }
  10171. l += 32;
  10172. uint8_t l_l = l & 0xf;
  10173. uint8_t l_h = l >> 4;
  10174. if (ib%2 == 0) scales_l[ib/2] = l_l;
  10175. else scales_l[ib/2] |= (l_l << 4);
  10176. scales_h[ib/8] |= (l_h << 2*(ib%8));
  10177. }
  10178. } else {
  10179. dh[0] = GGML_FP32_TO_FP16(scales[0]);
  10180. float id = scales[0] ? 1/scales[0] : 0;
  10181. for (int j = 0; j < super_block_size; ++j) {
  10182. L[j] = best_index_int8(16, values, id*x[j]);
  10183. }
  10184. }
  10185. for (int i = 0; i < super_block_size/32; ++i) {
  10186. for (int j = 0; j < 16; ++j) {
  10187. q4[16*i + j] = L[32*i + j] | (L[32*i + 16 + j] << 4);
  10188. }
  10189. }
  10190. }
  10191. size_t quantize_iq4_nl(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  10192. (void)hist;
  10193. GGML_ASSERT(n_per_row%QK4_NL == 0);
  10194. int nblock = n_per_row/QK4_NL;
  10195. char * qrow = (char *)dst;
  10196. uint8_t L[QK4_NL];
  10197. float weight[QK4_NL];
  10198. uint16_t unused_h;
  10199. uint8_t * unused_l = NULL;
  10200. float scale;
  10201. for (int row = 0; row < nrow; ++row) {
  10202. block_iq4_nl * iq4 = (block_iq4_nl *)qrow;
  10203. for (int ibl = 0; ibl < nblock; ++ibl) {
  10204. const float * qw = quant_weights ? quant_weights + QK4_NL*ibl : NULL;
  10205. quantize_row_iq4_nl_impl(QK4_NL, 32, src + QK4_NL*ibl, &iq4[ibl].d, iq4[ibl].qs, &unused_h, unused_l,
  10206. &scale, weight, L, kvalues_iq4nl, qw);
  10207. }
  10208. src += n_per_row;
  10209. qrow += nblock*sizeof(block_iq4_nl);
  10210. }
  10211. return nrow * nblock * sizeof(block_iq4_nl);
  10212. }
  10213. void quantize_row_iq4_nl(const float * restrict x, void * restrict vy, int k) {
  10214. assert(k % QK4_NL == 0);
  10215. block_iq4_nl * restrict y = vy;
  10216. quantize_row_iq4_nl_reference(x, y, k);
  10217. }
  10218. void quantize_row_iq4_nl_reference(const float * restrict x, block_iq4_nl * restrict y, int k) {
  10219. assert(k % QK4_NL == 0);
  10220. quantize_iq4_nl(x, y, 1, k, NULL, NULL);
  10221. }
  10222. size_t quantize_iq4_xs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  10223. #if QK_K == 64
  10224. return quantize_iq4_nl(src, dst, nrow, n_per_row, hist, quant_weights);
  10225. #else
  10226. (void)hist;
  10227. GGML_ASSERT(n_per_row%QK_K == 0);
  10228. int nblock = n_per_row/QK_K;
  10229. char * qrow = (char *)dst;
  10230. uint8_t L[QK_K];
  10231. float weight[32];
  10232. float scales[QK_K/32];
  10233. for (int row = 0; row < nrow; ++row) {
  10234. block_iq4_xs * iq4 = (block_iq4_xs *)qrow;
  10235. for (int ibl = 0; ibl < nblock; ++ibl) {
  10236. const float * qw = quant_weights ? quant_weights + QK_K*ibl : NULL;
  10237. quantize_row_iq4_nl_impl(QK_K, 32, src + QK_K*ibl, &iq4[ibl].d, iq4[ibl].qs, &iq4[ibl].scales_h, iq4[ibl].scales_l,
  10238. scales, weight, L, kvalues_iq4nl, qw);
  10239. }
  10240. src += n_per_row;
  10241. qrow += nblock*sizeof(block_iq4_xs);
  10242. }
  10243. return nrow * nblock * sizeof(block_iq4_xs);
  10244. #endif
  10245. }
  10246. void quantize_row_iq4_xs(const float * restrict x, void * restrict vy, int k) {
  10247. assert(k % QK_K == 0);
  10248. block_iq4_xs * restrict y = vy;
  10249. quantize_row_iq4_xs_reference(x, y, k);
  10250. }
  10251. void quantize_row_iq4_xs_reference(const float * restrict x, block_iq4_xs * restrict y, int k) {
  10252. assert(k % QK_K == 0);
  10253. quantize_iq4_xs(x, y, 1, k, NULL, NULL);
  10254. }
  10255. // =============================== 2.5625 bpw
  10256. static void quantize_row_iq2_s_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
  10257. const int gindex = iq2_data_index(GGML_TYPE_IQ2_S);
  10258. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  10259. const int * kmap_q2xs = iq2_data[gindex].map;
  10260. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  10261. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  10262. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  10263. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  10264. GGML_ASSERT(n%QK_K == 0);
  10265. const int kMaxQ = 3;
  10266. const int nbl = n/QK_K;
  10267. block_iq2_s * y = vy;
  10268. float scales[QK_K/16];
  10269. float weight[16];
  10270. float xval[16];
  10271. int8_t L[16];
  10272. int8_t Laux[16];
  10273. float waux[16];
  10274. bool is_on_grid[2];
  10275. bool is_on_grid_aux[2];
  10276. uint8_t block_signs[2];
  10277. for (int ibl = 0; ibl < nbl; ++ibl) {
  10278. memset(&y[ibl], 0, sizeof(block_iq2_s));
  10279. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  10280. float max_scale = 0;
  10281. const float * xbl = x + QK_K*ibl;
  10282. float sumx2 = 0;
  10283. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  10284. float sigma2 = 2*sumx2/QK_K;
  10285. for (int ib = 0; ib < QK_K/16; ++ib) {
  10286. const float * xb = xbl + 16*ib;
  10287. if (quant_weights) {
  10288. const float * qw = quant_weights + QK_K*ibl + 16*ib;
  10289. for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  10290. } else {
  10291. for (int i = 0; i < 16; ++i) weight[i] = 0.25f*sigma2 + xb[i]*xb[i];
  10292. }
  10293. for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]);
  10294. for (int k = 0; k < 2; ++k) {
  10295. uint8_t s = 0;
  10296. for (int i = 0; i < 8; ++i) {
  10297. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  10298. else {
  10299. xval[8*k + i] = -xb[8*k + i]; s |= (1 << i);
  10300. }
  10301. }
  10302. block_signs[k] = s;
  10303. }
  10304. float max = xval[0];
  10305. for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]);
  10306. if (!max) {
  10307. scales[ib] = 0;
  10308. continue;
  10309. }
  10310. float best = 0;
  10311. float scale = max/(2*kMaxQ-1);
  10312. is_on_grid[0] = is_on_grid[1] = true;
  10313. for (int is = -9; is <= 9; ++is) {
  10314. float id = (2*kMaxQ-1+is*0.1f)/max;
  10315. float this_scale = 1/id;
  10316. for (int k = 0; k < 2; ++k) {
  10317. for (int i = 0; i < 8; ++i) {
  10318. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  10319. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  10320. }
  10321. uint16_t u = 0;
  10322. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  10323. int grid_index = kmap_q2xs[u];
  10324. is_on_grid_aux[k] = true;
  10325. if (grid_index < 0) {
  10326. is_on_grid_aux[k] = false;
  10327. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  10328. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  10329. }
  10330. }
  10331. float sumqx = 0, sumq2 = 0;
  10332. for (int i = 0; i < 16; ++i) {
  10333. float w = weight[i];
  10334. float q = 2*Laux[i] + 1;
  10335. sumqx += w*xval[i]*q;
  10336. sumq2 += w*q*q;
  10337. }
  10338. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  10339. scale = sumqx/sumq2; best = scale*sumqx;
  10340. for (int i = 0; i < 16; ++i) L[i] = Laux[i];
  10341. for (int k = 0; k < 2; ++k) is_on_grid[k] = is_on_grid_aux[k];
  10342. }
  10343. }
  10344. int n_not_ongrid = 0;
  10345. for (int k = 0; k < 2; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  10346. if (n_not_ongrid > 0 && scale > 0) {
  10347. float id = 1/scale;
  10348. for (int k = 0; k < 2; ++k) {
  10349. if (is_on_grid[k]) continue;
  10350. uint16_t u = 0;
  10351. for (int i = 0; i < 8; ++i) {
  10352. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  10353. l = MAX(0, MIN(kMaxQ-1, l));
  10354. u |= (l << 2*i);
  10355. L[8*k + i] = l;
  10356. }
  10357. int grid_index = kmap_q2xs[u];
  10358. if (grid_index < 0) {
  10359. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  10360. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  10361. }
  10362. }
  10363. float sumqx = 0, sumq2 = 0;
  10364. for (int i = 0; i < 16; ++i) {
  10365. float w = weight[i];
  10366. float q = 2*L[i] + 1;
  10367. sumqx += w*xval[i]*q;
  10368. sumq2 += w*q*q;
  10369. }
  10370. if (sumq2 > 0) scale = sumqx/sumq2;
  10371. }
  10372. if (scale < 0) {
  10373. scale = -scale;
  10374. for (int k = 0; k < 2; ++k) block_signs[k] = ~block_signs[k];
  10375. }
  10376. for (int k = 0; k < 2; ++k) {
  10377. uint16_t u = 0;
  10378. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  10379. int grid_index = kmap_q2xs[u];
  10380. if (grid_index < 0) {
  10381. printf("Oops: found point %u not on grid:", u);
  10382. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  10383. printf("\n");
  10384. GGML_ASSERT(false);
  10385. }
  10386. const int i8 = 2*ib + k;
  10387. y[ibl].qs[i8] = grid_index & 255;
  10388. y[ibl].qh[i8/4] |= ((grid_index >> 8) << 2*(i8%4));
  10389. y[ibl].qs[QK_K/8 + i8] = block_signs[k];
  10390. }
  10391. GGML_ASSERT(scale >= 0);
  10392. scales[ib] = scale;
  10393. max_scale = MAX(max_scale, scale);
  10394. }
  10395. if (!max_scale) {
  10396. continue;
  10397. }
  10398. float d = max_scale/31;
  10399. y[ibl].d = GGML_FP32_TO_FP16(d * 0.9875f);
  10400. float id = 1/d;
  10401. for (int ib = 0; ib < QK_K/16; ++ib) {
  10402. int l = nearest_int(0.5f*(id*scales[ib]-1));
  10403. l = MAX(0, MIN(15, l));
  10404. if (ib%2 == 0) y[ibl].scales[ib/2] = l;
  10405. else y[ibl].scales[ib/2] |= (l << 4);
  10406. }
  10407. }
  10408. }
  10409. size_t quantize_iq2_s(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) {
  10410. (void)hist;
  10411. GGML_ASSERT(n_per_row%QK_K == 0);
  10412. int nblock = n_per_row/QK_K;
  10413. char * qrow = (char *)dst;
  10414. for (int row = 0; row < nrow; ++row) {
  10415. quantize_row_iq2_s_impl(src, qrow, n_per_row, quant_weights);
  10416. src += n_per_row;
  10417. qrow += nblock*sizeof(block_iq2_s);
  10418. }
  10419. return nrow * nblock * sizeof(block_iq2_s);
  10420. }
  10421. void quantize_row_iq2_s_reference(const float * restrict x, block_iq2_s * restrict y, int k) {
  10422. assert(k % QK_K == 0);
  10423. quantize_iq2_s(x, y, 1, k, NULL, NULL);
  10424. }
  10425. void quantize_row_iq2_s(const float * restrict x, void * restrict vy, int k) {
  10426. assert(k % QK_K == 0);
  10427. block_iq2_s * restrict y = vy;
  10428. quantize_row_iq2_s_reference(x, y, k);
  10429. }