ggml-quants.c 488 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071
  1. #define GGML_COMMON_IMPL_C
  2. #include "ggml-common.h"
  3. #include "ggml-quants.h"
  4. #include "ggml-impl.h"
  5. #define GGML_COMMON_IMPL_C
  6. #include "ggml-common.h"
  7. #include <math.h>
  8. #include <string.h>
  9. #include <assert.h>
  10. #include <float.h>
  11. #include <stdlib.h> // for qsort
  12. #include <stdio.h> // for GGML_ASSERT
  13. #ifdef __ARM_NEON
  14. // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
  15. //
  16. // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
  17. //
  18. #include <arm_neon.h>
  19. #else
  20. #ifdef __wasm_simd128__
  21. #include <wasm_simd128.h>
  22. #else
  23. #if defined(__POWER9_VECTOR__) || defined(__powerpc64__)
  24. #include <altivec.h>
  25. #undef bool
  26. #define bool _Bool
  27. #else
  28. #if defined(_MSC_VER) || defined(__MINGW32__)
  29. #include <intrin.h>
  30. #else
  31. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
  32. #if !defined(__riscv)
  33. #include <immintrin.h>
  34. #endif
  35. #endif
  36. #endif
  37. #endif
  38. #endif
  39. #endif
  40. #ifdef __riscv_v_intrinsic
  41. #include <riscv_vector.h>
  42. #endif
  43. #undef MIN
  44. #undef MAX
  45. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  46. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  47. #define UNUSED GGML_UNUSED
  48. // some compilers don't provide _mm256_set_m128i, e.g. gcc 7
  49. #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
  50. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  51. // multiply int8_t, add results pairwise twice
  52. static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
  53. // Get absolute values of x vectors
  54. const __m128i ax = _mm_sign_epi8(x, x);
  55. // Sign the values of the y vectors
  56. const __m128i sy = _mm_sign_epi8(y, x);
  57. // Perform multiplication and create 16-bit values
  58. const __m128i dot = _mm_maddubs_epi16(ax, sy);
  59. const __m128i ones = _mm_set1_epi16(1);
  60. return _mm_madd_epi16(ones, dot);
  61. }
  62. #if __AVX__ || __AVX2__ || __AVX512F__
  63. // horizontally add 8 floats
  64. static inline float hsum_float_8(const __m256 x) {
  65. __m128 res = _mm256_extractf128_ps(x, 1);
  66. res = _mm_add_ps(res, _mm256_castps256_ps128(x));
  67. res = _mm_add_ps(res, _mm_movehl_ps(res, res));
  68. res = _mm_add_ss(res, _mm_movehdup_ps(res));
  69. return _mm_cvtss_f32(res);
  70. }
  71. // horizontally add 8 int32_t
  72. static inline int hsum_i32_8(const __m256i a) {
  73. const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
  74. const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
  75. const __m128i sum64 = _mm_add_epi32(hi64, sum128);
  76. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  77. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  78. }
  79. // horizontally add 4 int32_t
  80. static inline int hsum_i32_4(const __m128i a) {
  81. const __m128i hi64 = _mm_unpackhi_epi64(a, a);
  82. const __m128i sum64 = _mm_add_epi32(hi64, a);
  83. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  84. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  85. }
  86. #if defined(__AVX2__) || defined(__AVX512F__)
  87. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  88. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  89. uint32_t x32;
  90. memcpy(&x32, x, sizeof(uint32_t));
  91. const __m256i shuf_mask = _mm256_set_epi64x(
  92. 0x0303030303030303, 0x0202020202020202,
  93. 0x0101010101010101, 0x0000000000000000);
  94. __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
  95. const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
  96. bytes = _mm256_or_si256(bytes, bit_mask);
  97. return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1));
  98. }
  99. // Unpack 32 4-bit fields into 32 bytes
  100. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  101. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  102. {
  103. const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
  104. const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
  105. const __m256i lowMask = _mm256_set1_epi8( 0xF );
  106. return _mm256_and_si256(lowMask, bytes);
  107. }
  108. // add int16_t pairwise and return as float vector
  109. static inline __m256 sum_i16_pairs_float(const __m256i x) {
  110. const __m256i ones = _mm256_set1_epi16(1);
  111. const __m256i summed_pairs = _mm256_madd_epi16(ones, x);
  112. return _mm256_cvtepi32_ps(summed_pairs);
  113. }
  114. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  115. #if __AVXVNNI__
  116. const __m256i zero = _mm256_setzero_si256();
  117. const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy);
  118. return _mm256_cvtepi32_ps(summed_pairs);
  119. #else
  120. // Perform multiplication and create 16-bit values
  121. const __m256i dot = _mm256_maddubs_epi16(ax, sy);
  122. return sum_i16_pairs_float(dot);
  123. #endif
  124. }
  125. // multiply int8_t, add results pairwise twice and return as float vector
  126. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  127. #if __AVXVNNIINT8__
  128. const __m256i zero = _mm256_setzero_si256();
  129. const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y);
  130. return _mm256_cvtepi32_ps(summed_pairs);
  131. #else
  132. // Get absolute values of x vectors
  133. const __m256i ax = _mm256_sign_epi8(x, x);
  134. // Sign the values of the y vectors
  135. const __m256i sy = _mm256_sign_epi8(y, x);
  136. return mul_sum_us8_pairs_float(ax, sy);
  137. #endif
  138. }
  139. static inline __m128i packNibbles( __m256i bytes )
  140. {
  141. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  142. #if __AVX512F__
  143. const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000
  144. bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh
  145. return _mm256_cvtepi16_epi8(bytes); // abcd_efgh
  146. #else
  147. const __m256i lowByte = _mm256_set1_epi16( 0xFF );
  148. __m256i high = _mm256_andnot_si256( lowByte, bytes );
  149. __m256i low = _mm256_and_si256( lowByte, bytes );
  150. high = _mm256_srli_epi16( high, 4 );
  151. bytes = _mm256_or_si256( low, high );
  152. // Compress uint16_t lanes into bytes
  153. __m128i r0 = _mm256_castsi256_si128( bytes );
  154. __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
  155. return _mm_packus_epi16( r0, r1 );
  156. #endif
  157. }
  158. #elif defined(__AVX__)
  159. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  160. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  161. uint32_t x32;
  162. memcpy(&x32, x, sizeof(uint32_t));
  163. const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  164. const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202);
  165. __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl);
  166. __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh);
  167. const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe);
  168. bytesl = _mm_or_si128(bytesl, bit_mask);
  169. bytesh = _mm_or_si128(bytesh, bit_mask);
  170. bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
  171. bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
  172. return MM256_SET_M128I(bytesh, bytesl);
  173. }
  174. // Unpack 32 4-bit fields into 32 bytes
  175. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  176. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  177. {
  178. // Load 16 bytes from memory
  179. __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi);
  180. __m128i tmph = _mm_srli_epi16(tmpl, 4);
  181. const __m128i lowMask = _mm_set1_epi8(0xF);
  182. tmpl = _mm_and_si128(lowMask, tmpl);
  183. tmph = _mm_and_si128(lowMask, tmph);
  184. return MM256_SET_M128I(tmph, tmpl);
  185. }
  186. // add int16_t pairwise and return as float vector
  187. static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
  188. const __m128i ones = _mm_set1_epi16(1);
  189. const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
  190. const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
  191. const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
  192. return _mm256_cvtepi32_ps(summed_pairs);
  193. }
  194. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  195. const __m128i axl = _mm256_castsi256_si128(ax);
  196. const __m128i axh = _mm256_extractf128_si256(ax, 1);
  197. const __m128i syl = _mm256_castsi256_si128(sy);
  198. const __m128i syh = _mm256_extractf128_si256(sy, 1);
  199. // Perform multiplication and create 16-bit values
  200. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  201. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  202. return sum_i16_pairs_float(doth, dotl);
  203. }
  204. // multiply int8_t, add results pairwise twice and return as float vector
  205. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  206. const __m128i xl = _mm256_castsi256_si128(x);
  207. const __m128i xh = _mm256_extractf128_si256(x, 1);
  208. const __m128i yl = _mm256_castsi256_si128(y);
  209. const __m128i yh = _mm256_extractf128_si256(y, 1);
  210. // Get absolute values of x vectors
  211. const __m128i axl = _mm_sign_epi8(xl, xl);
  212. const __m128i axh = _mm_sign_epi8(xh, xh);
  213. // Sign the values of the y vectors
  214. const __m128i syl = _mm_sign_epi8(yl, xl);
  215. const __m128i syh = _mm_sign_epi8(yh, xh);
  216. // Perform multiplication and create 16-bit values
  217. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  218. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  219. return sum_i16_pairs_float(doth, dotl);
  220. }
  221. static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
  222. {
  223. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  224. const __m128i lowByte = _mm_set1_epi16( 0xFF );
  225. __m128i high = _mm_andnot_si128( lowByte, bytes1 );
  226. __m128i low = _mm_and_si128( lowByte, bytes1 );
  227. high = _mm_srli_epi16( high, 4 );
  228. bytes1 = _mm_or_si128( low, high );
  229. high = _mm_andnot_si128( lowByte, bytes2 );
  230. low = _mm_and_si128( lowByte, bytes2 );
  231. high = _mm_srli_epi16( high, 4 );
  232. bytes2 = _mm_or_si128( low, high );
  233. return _mm_packus_epi16( bytes1, bytes2);
  234. }
  235. #endif
  236. #elif defined(__SSSE3__)
  237. // horizontally add 4x4 floats
  238. static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
  239. __m128 res_0 =_mm_hadd_ps(a, b);
  240. __m128 res_1 =_mm_hadd_ps(c, d);
  241. __m128 res =_mm_hadd_ps(res_0, res_1);
  242. res =_mm_hadd_ps(res, res);
  243. res =_mm_hadd_ps(res, res);
  244. return _mm_cvtss_f32(res);
  245. }
  246. #endif // __AVX__ || __AVX2__ || __AVX512F__
  247. #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  248. #if defined(__ARM_NEON)
  249. #ifdef _MSC_VER
  250. #define ggml_vld1q_u32(w,x,y,z) { ((w) + ((uint64_t)(x) << 32)), ((y) + ((uint64_t)(z) << 32)) }
  251. #else
  252. #define ggml_vld1q_u32(w,x,y,z) { (w), (x), (y), (z) }
  253. #endif
  254. #if !defined(__aarch64__)
  255. // 64-bit compatibility
  256. // vaddvq_s16
  257. // vpaddq_s16
  258. // vpaddq_s32
  259. // vaddvq_s32
  260. // vaddvq_f32
  261. // vmaxvq_f32
  262. // vcvtnq_s32_f32
  263. // vzip1_u8
  264. // vzip2_u8
  265. inline static int32_t vaddvq_s16(int16x8_t v) {
  266. return
  267. (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) +
  268. (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) +
  269. (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) +
  270. (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7);
  271. }
  272. inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) {
  273. int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a));
  274. int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b));
  275. return vcombine_s16(a0, b0);
  276. }
  277. inline static int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) {
  278. int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a));
  279. int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b));
  280. return vcombine_s32(a0, b0);
  281. }
  282. inline static int32_t vaddvq_s32(int32x4_t v) {
  283. return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
  284. }
  285. inline static float vaddvq_f32(float32x4_t v) {
  286. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  287. }
  288. inline static float vmaxvq_f32(float32x4_t v) {
  289. return
  290. MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
  291. MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
  292. }
  293. inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
  294. int32x4_t res;
  295. res[0] = roundf(vgetq_lane_f32(v, 0));
  296. res[1] = roundf(vgetq_lane_f32(v, 1));
  297. res[2] = roundf(vgetq_lane_f32(v, 2));
  298. res[3] = roundf(vgetq_lane_f32(v, 3));
  299. return res;
  300. }
  301. inline static uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) {
  302. uint8x8_t res;
  303. res[0] = a[0]; res[1] = b[0];
  304. res[2] = a[1]; res[3] = b[1];
  305. res[4] = a[2]; res[5] = b[2];
  306. res[6] = a[3]; res[7] = b[3];
  307. return res;
  308. }
  309. inline static uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) {
  310. uint8x8_t res;
  311. res[0] = a[4]; res[1] = b[4];
  312. res[2] = a[5]; res[3] = b[5];
  313. res[4] = a[6]; res[5] = b[6];
  314. res[6] = a[7]; res[7] = b[7];
  315. return res;
  316. }
  317. // vld1q_s16_x2
  318. // vld1q_u8_x2
  319. // vld1q_u8_x4
  320. // vld1q_s8_x2
  321. // vld1q_s8_x4
  322. // TODO: double-check these work correctly
  323. typedef struct ggml_int16x8x2_t {
  324. int16x8_t val[2];
  325. } ggml_int16x8x2_t;
  326. inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) {
  327. ggml_int16x8x2_t res;
  328. res.val[0] = vld1q_s16(ptr + 0);
  329. res.val[1] = vld1q_s16(ptr + 8);
  330. return res;
  331. }
  332. typedef struct ggml_uint8x16x2_t {
  333. uint8x16_t val[2];
  334. } ggml_uint8x16x2_t;
  335. inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) {
  336. ggml_uint8x16x2_t res;
  337. res.val[0] = vld1q_u8(ptr + 0);
  338. res.val[1] = vld1q_u8(ptr + 16);
  339. return res;
  340. }
  341. typedef struct ggml_uint8x16x4_t {
  342. uint8x16_t val[4];
  343. } ggml_uint8x16x4_t;
  344. inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) {
  345. ggml_uint8x16x4_t res;
  346. res.val[0] = vld1q_u8(ptr + 0);
  347. res.val[1] = vld1q_u8(ptr + 16);
  348. res.val[2] = vld1q_u8(ptr + 32);
  349. res.val[3] = vld1q_u8(ptr + 48);
  350. return res;
  351. }
  352. typedef struct ggml_int8x16x2_t {
  353. int8x16_t val[2];
  354. } ggml_int8x16x2_t;
  355. inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) {
  356. ggml_int8x16x2_t res;
  357. res.val[0] = vld1q_s8(ptr + 0);
  358. res.val[1] = vld1q_s8(ptr + 16);
  359. return res;
  360. }
  361. typedef struct ggml_int8x16x4_t {
  362. int8x16_t val[4];
  363. } ggml_int8x16x4_t;
  364. inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) {
  365. ggml_int8x16x4_t res;
  366. res.val[0] = vld1q_s8(ptr + 0);
  367. res.val[1] = vld1q_s8(ptr + 16);
  368. res.val[2] = vld1q_s8(ptr + 32);
  369. res.val[3] = vld1q_s8(ptr + 48);
  370. return res;
  371. }
  372. // NOTE: not tested
  373. inline static int8x16_t ggml_vqtbl1q_s8(int8x16_t a, uint8x16_t b) {
  374. int8x16_t res;
  375. res[ 0] = a[b[ 0]];
  376. res[ 1] = a[b[ 1]];
  377. res[ 2] = a[b[ 2]];
  378. res[ 3] = a[b[ 3]];
  379. res[ 4] = a[b[ 4]];
  380. res[ 5] = a[b[ 5]];
  381. res[ 6] = a[b[ 6]];
  382. res[ 7] = a[b[ 7]];
  383. res[ 8] = a[b[ 8]];
  384. res[ 9] = a[b[ 9]];
  385. res[10] = a[b[10]];
  386. res[11] = a[b[11]];
  387. res[12] = a[b[12]];
  388. res[13] = a[b[13]];
  389. res[14] = a[b[14]];
  390. res[15] = a[b[15]];
  391. return res;
  392. }
  393. // NOTE: not tested
  394. inline static uint8x16_t ggml_vqtbl1q_u8(uint8x16_t a, uint8x16_t b) {
  395. uint8x16_t res;
  396. res[ 0] = a[b[ 0]];
  397. res[ 1] = a[b[ 1]];
  398. res[ 2] = a[b[ 2]];
  399. res[ 3] = a[b[ 3]];
  400. res[ 4] = a[b[ 4]];
  401. res[ 5] = a[b[ 5]];
  402. res[ 6] = a[b[ 6]];
  403. res[ 7] = a[b[ 7]];
  404. res[ 8] = a[b[ 8]];
  405. res[ 9] = a[b[ 9]];
  406. res[10] = a[b[10]];
  407. res[11] = a[b[11]];
  408. res[12] = a[b[12]];
  409. res[13] = a[b[13]];
  410. res[14] = a[b[14]];
  411. res[15] = a[b[15]];
  412. return res;
  413. }
  414. #else
  415. #define ggml_int16x8x2_t int16x8x2_t
  416. #define ggml_uint8x16x2_t uint8x16x2_t
  417. #define ggml_uint8x16x4_t uint8x16x4_t
  418. #define ggml_int8x16x2_t int8x16x2_t
  419. #define ggml_int8x16x4_t int8x16x4_t
  420. #define ggml_vld1q_s16_x2 vld1q_s16_x2
  421. #define ggml_vld1q_u8_x2 vld1q_u8_x2
  422. #define ggml_vld1q_u8_x4 vld1q_u8_x4
  423. #define ggml_vld1q_s8_x2 vld1q_s8_x2
  424. #define ggml_vld1q_s8_x4 vld1q_s8_x4
  425. #define ggml_vqtbl1q_s8 vqtbl1q_s8
  426. #define ggml_vqtbl1q_u8 vqtbl1q_u8
  427. #endif
  428. #if !defined(__ARM_FEATURE_DOTPROD)
  429. inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) {
  430. const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b));
  431. const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b));
  432. return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1)));
  433. }
  434. #else
  435. #define ggml_vdotq_s32(a, b, c) vdotq_s32(a, b, c)
  436. #endif
  437. #endif
  438. #if defined(__ARM_NEON) || defined(__wasm_simd128__)
  439. #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
  440. #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
  441. #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
  442. #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
  443. #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
  444. #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
  445. #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
  446. #define B8(c,s ) B7(c,s, c), B7(c,s, s)
  447. // precomputed tables for expanding 8bits to 8 bytes:
  448. static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
  449. static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
  450. #endif
  451. // reference implementation for deterministic creation of model files
  452. void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
  453. static const int qk = QK4_0;
  454. assert(k % qk == 0);
  455. const int nb = k / qk;
  456. for (int i = 0; i < nb; i++) {
  457. float amax = 0.0f; // absolute max
  458. float max = 0.0f;
  459. for (int j = 0; j < qk; j++) {
  460. const float v = x[i*qk + j];
  461. if (amax < fabsf(v)) {
  462. amax = fabsf(v);
  463. max = v;
  464. }
  465. }
  466. const float d = max / -8;
  467. const float id = d ? 1.0f/d : 0.0f;
  468. y[i].d = GGML_FP32_TO_FP16(d);
  469. for (int j = 0; j < qk/2; ++j) {
  470. const float x0 = x[i*qk + 0 + j]*id;
  471. const float x1 = x[i*qk + qk/2 + j]*id;
  472. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
  473. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
  474. y[i].qs[j] = xi0;
  475. y[i].qs[j] |= xi1 << 4;
  476. }
  477. }
  478. }
  479. void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
  480. quantize_row_q4_0_reference(x, y, k);
  481. }
  482. void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) {
  483. const int qk = QK4_1;
  484. assert(k % qk == 0);
  485. const int nb = k / qk;
  486. for (int i = 0; i < nb; i++) {
  487. float min = FLT_MAX;
  488. float max = -FLT_MAX;
  489. for (int j = 0; j < qk; j++) {
  490. const float v = x[i*qk + j];
  491. if (v < min) min = v;
  492. if (v > max) max = v;
  493. }
  494. const float d = (max - min) / ((1 << 4) - 1);
  495. const float id = d ? 1.0f/d : 0.0f;
  496. y[i].d = GGML_FP32_TO_FP16(d);
  497. y[i].m = GGML_FP32_TO_FP16(min);
  498. for (int j = 0; j < qk/2; ++j) {
  499. const float x0 = (x[i*qk + 0 + j] - min)*id;
  500. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  501. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
  502. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
  503. y[i].qs[j] = xi0;
  504. y[i].qs[j] |= xi1 << 4;
  505. }
  506. }
  507. }
  508. void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
  509. quantize_row_q4_1_reference(x, y, k);
  510. }
  511. void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
  512. static const int qk = QK5_0;
  513. assert(k % qk == 0);
  514. const int nb = k / qk;
  515. for (int i = 0; i < nb; i++) {
  516. float amax = 0.0f; // absolute max
  517. float max = 0.0f;
  518. for (int j = 0; j < qk; j++) {
  519. const float v = x[i*qk + j];
  520. if (amax < fabsf(v)) {
  521. amax = fabsf(v);
  522. max = v;
  523. }
  524. }
  525. const float d = max / -16;
  526. const float id = d ? 1.0f/d : 0.0f;
  527. y[i].d = GGML_FP32_TO_FP16(d);
  528. uint32_t qh = 0;
  529. for (int j = 0; j < qk/2; ++j) {
  530. const float x0 = x[i*qk + 0 + j]*id;
  531. const float x1 = x[i*qk + qk/2 + j]*id;
  532. const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
  533. const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
  534. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  535. // get the 5-th bit and store it in qh at the right position
  536. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  537. qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
  538. }
  539. memcpy(&y[i].qh, &qh, sizeof(qh));
  540. }
  541. }
  542. void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) {
  543. quantize_row_q5_0_reference(x, y, k);
  544. }
  545. void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
  546. const int qk = QK5_1;
  547. assert(k % qk == 0);
  548. const int nb = k / qk;
  549. for (int i = 0; i < nb; i++) {
  550. float min = FLT_MAX;
  551. float max = -FLT_MAX;
  552. for (int j = 0; j < qk; j++) {
  553. const float v = x[i*qk + j];
  554. if (v < min) min = v;
  555. if (v > max) max = v;
  556. }
  557. const float d = (max - min) / ((1 << 5) - 1);
  558. const float id = d ? 1.0f/d : 0.0f;
  559. y[i].d = GGML_FP32_TO_FP16(d);
  560. y[i].m = GGML_FP32_TO_FP16(min);
  561. uint32_t qh = 0;
  562. for (int j = 0; j < qk/2; ++j) {
  563. const float x0 = (x[i*qk + 0 + j] - min)*id;
  564. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  565. const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
  566. const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
  567. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  568. // get the 5-th bit and store it in qh at the right position
  569. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  570. qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
  571. }
  572. memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
  573. }
  574. }
  575. void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) {
  576. quantize_row_q5_1_reference(x, y, k);
  577. }
  578. // reference implementation for deterministic creation of model files
  579. void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
  580. assert(k % QK8_0 == 0);
  581. const int nb = k / QK8_0;
  582. for (int i = 0; i < nb; i++) {
  583. float amax = 0.0f; // absolute max
  584. for (int j = 0; j < QK8_0; j++) {
  585. const float v = x[i*QK8_0 + j];
  586. amax = MAX(amax, fabsf(v));
  587. }
  588. const float d = amax / ((1 << 7) - 1);
  589. const float id = d ? 1.0f/d : 0.0f;
  590. y[i].d = GGML_FP32_TO_FP16(d);
  591. for (int j = 0; j < QK8_0; ++j) {
  592. const float x0 = x[i*QK8_0 + j]*id;
  593. y[i].qs[j] = roundf(x0);
  594. }
  595. }
  596. }
  597. void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
  598. assert(QK8_0 == 32);
  599. assert(k % QK8_0 == 0);
  600. const int nb = k / QK8_0;
  601. block_q8_0 * restrict y = vy;
  602. #if defined(__ARM_NEON)
  603. for (int i = 0; i < nb; i++) {
  604. float32x4_t srcv [8];
  605. float32x4_t asrcv[8];
  606. float32x4_t amaxv[8];
  607. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  608. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  609. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  610. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  611. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  612. const float amax = vmaxvq_f32(amaxv[0]);
  613. const float d = amax / ((1 << 7) - 1);
  614. const float id = d ? 1.0f/d : 0.0f;
  615. y[i].d = GGML_FP32_TO_FP16(d);
  616. for (int j = 0; j < 8; j++) {
  617. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  618. const int32x4_t vi = vcvtnq_s32_f32(v);
  619. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  620. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  621. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  622. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  623. }
  624. }
  625. #elif defined(__wasm_simd128__)
  626. for (int i = 0; i < nb; i++) {
  627. v128_t srcv [8];
  628. v128_t asrcv[8];
  629. v128_t amaxv[8];
  630. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  631. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  632. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  633. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  634. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  635. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  636. wasm_f32x4_extract_lane(amaxv[0], 1)),
  637. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  638. wasm_f32x4_extract_lane(amaxv[0], 3)));
  639. const float d = amax / ((1 << 7) - 1);
  640. const float id = d ? 1.0f/d : 0.0f;
  641. y[i].d = GGML_FP32_TO_FP16(d);
  642. for (int j = 0; j < 8; j++) {
  643. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  644. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  645. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  646. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  647. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  648. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  649. }
  650. }
  651. #elif defined(__AVX2__) || defined(__AVX__)
  652. for (int i = 0; i < nb; i++) {
  653. // Load elements into 4 AVX vectors
  654. __m256 v0 = _mm256_loadu_ps( x );
  655. __m256 v1 = _mm256_loadu_ps( x + 8 );
  656. __m256 v2 = _mm256_loadu_ps( x + 16 );
  657. __m256 v3 = _mm256_loadu_ps( x + 24 );
  658. x += 32;
  659. // Compute max(abs(e)) for the block
  660. const __m256 signBit = _mm256_set1_ps( -0.0f );
  661. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  662. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  663. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  664. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  665. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  666. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  667. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  668. const float maxScalar = _mm_cvtss_f32( max4 );
  669. // Quantize these floats
  670. const float d = maxScalar / 127.f;
  671. y[i].d = GGML_FP32_TO_FP16(d);
  672. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  673. const __m256 mul = _mm256_set1_ps( id );
  674. // Apply the multiplier
  675. v0 = _mm256_mul_ps( v0, mul );
  676. v1 = _mm256_mul_ps( v1, mul );
  677. v2 = _mm256_mul_ps( v2, mul );
  678. v3 = _mm256_mul_ps( v3, mul );
  679. // Round to nearest integer
  680. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  681. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  682. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  683. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  684. // Convert floats to integers
  685. __m256i i0 = _mm256_cvtps_epi32( v0 );
  686. __m256i i1 = _mm256_cvtps_epi32( v1 );
  687. __m256i i2 = _mm256_cvtps_epi32( v2 );
  688. __m256i i3 = _mm256_cvtps_epi32( v3 );
  689. #if defined(__AVX2__)
  690. // Convert int32 to int16
  691. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  692. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  693. // Convert int16 to int8
  694. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  695. // We got our precious signed bytes, but the order is now wrong
  696. // These AVX2 pack instructions process 16-byte pieces independently
  697. // The following instruction is fixing the order
  698. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  699. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  700. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  701. #else
  702. // Since we don't have in AVX some necessary functions,
  703. // we split the registers in half and call AVX2 analogs from SSE
  704. __m128i ni0 = _mm256_castsi256_si128( i0 );
  705. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  706. __m128i ni2 = _mm256_castsi256_si128( i1 );
  707. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  708. __m128i ni4 = _mm256_castsi256_si128( i2 );
  709. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  710. __m128i ni6 = _mm256_castsi256_si128( i3 );
  711. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  712. // Convert int32 to int16
  713. ni0 = _mm_packs_epi32( ni0, ni1 );
  714. ni2 = _mm_packs_epi32( ni2, ni3 );
  715. ni4 = _mm_packs_epi32( ni4, ni5 );
  716. ni6 = _mm_packs_epi32( ni6, ni7 );
  717. // Convert int16 to int8
  718. ni0 = _mm_packs_epi16( ni0, ni2 );
  719. ni4 = _mm_packs_epi16( ni4, ni6 );
  720. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  721. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  722. #endif
  723. }
  724. #elif defined(__riscv_v_intrinsic)
  725. size_t vl = __riscv_vsetvl_e32m4(QK8_0);
  726. for (int i = 0; i < nb; i++) {
  727. // load elements
  728. vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_0, vl);
  729. vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
  730. vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl);
  731. vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
  732. float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
  733. const float d = amax / ((1 << 7) - 1);
  734. const float id = d ? 1.0f/d : 0.0f;
  735. y[i].d = GGML_FP32_TO_FP16(d);
  736. vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
  737. // convert to integer
  738. vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
  739. vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
  740. // store result
  741. __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
  742. }
  743. #else
  744. GGML_UNUSED(nb);
  745. // scalar
  746. quantize_row_q8_0_reference(x, y, k);
  747. #endif
  748. }
  749. // reference implementation for deterministic creation of model files
  750. void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
  751. assert(QK8_1 == 32);
  752. assert(k % QK8_1 == 0);
  753. const int nb = k / QK8_1;
  754. for (int i = 0; i < nb; i++) {
  755. float amax = 0.0f; // absolute max
  756. for (int j = 0; j < QK8_1; j++) {
  757. const float v = x[i*QK8_1 + j];
  758. amax = MAX(amax, fabsf(v));
  759. }
  760. const float d = amax / ((1 << 7) - 1);
  761. const float id = d ? 1.0f/d : 0.0f;
  762. y[i].d = GGML_FP32_TO_FP16(d);
  763. int sum = 0;
  764. for (int j = 0; j < QK8_1/2; ++j) {
  765. const float v0 = x[i*QK8_1 + j]*id;
  766. const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
  767. y[i].qs[ j] = roundf(v0);
  768. y[i].qs[QK8_1/2 + j] = roundf(v1);
  769. sum += y[i].qs[ j];
  770. sum += y[i].qs[QK8_1/2 + j];
  771. }
  772. y[i].s = GGML_FP32_TO_FP16(sum*d);
  773. }
  774. }
  775. void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) {
  776. assert(k % QK8_1 == 0);
  777. const int nb = k / QK8_1;
  778. block_q8_1 * restrict y = vy;
  779. #if defined(__ARM_NEON)
  780. for (int i = 0; i < nb; i++) {
  781. float32x4_t srcv [8];
  782. float32x4_t asrcv[8];
  783. float32x4_t amaxv[8];
  784. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  785. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  786. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  787. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  788. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  789. const float amax = vmaxvq_f32(amaxv[0]);
  790. const float d = amax / ((1 << 7) - 1);
  791. const float id = d ? 1.0f/d : 0.0f;
  792. y[i].d = GGML_FP32_TO_FP16(d);
  793. int32x4_t accv = vdupq_n_s32(0);
  794. for (int j = 0; j < 8; j++) {
  795. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  796. const int32x4_t vi = vcvtnq_s32_f32(v);
  797. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  798. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  799. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  800. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  801. accv = vaddq_s32(accv, vi);
  802. }
  803. y[i].s = GGML_FP32_TO_FP16(d * vaddvq_s32(accv));
  804. }
  805. #elif defined(__wasm_simd128__)
  806. for (int i = 0; i < nb; i++) {
  807. v128_t srcv [8];
  808. v128_t asrcv[8];
  809. v128_t amaxv[8];
  810. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  811. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  812. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  813. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  814. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  815. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  816. wasm_f32x4_extract_lane(amaxv[0], 1)),
  817. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  818. wasm_f32x4_extract_lane(amaxv[0], 3)));
  819. const float d = amax / ((1 << 7) - 1);
  820. const float id = d ? 1.0f/d : 0.0f;
  821. y[i].d = GGML_FP32_TO_FP16(d);
  822. v128_t accv = wasm_i32x4_splat(0);
  823. for (int j = 0; j < 8; j++) {
  824. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  825. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  826. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  827. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  828. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  829. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  830. accv = wasm_i32x4_add(accv, vi);
  831. }
  832. y[i].s = GGML_FP32_TO_FP16(
  833. d * (wasm_i32x4_extract_lane(accv, 0) +
  834. wasm_i32x4_extract_lane(accv, 1) +
  835. wasm_i32x4_extract_lane(accv, 2) +
  836. wasm_i32x4_extract_lane(accv, 3)));
  837. }
  838. #elif defined(__AVX2__) || defined(__AVX__)
  839. for (int i = 0; i < nb; i++) {
  840. // Load elements into 4 AVX vectors
  841. __m256 v0 = _mm256_loadu_ps( x );
  842. __m256 v1 = _mm256_loadu_ps( x + 8 );
  843. __m256 v2 = _mm256_loadu_ps( x + 16 );
  844. __m256 v3 = _mm256_loadu_ps( x + 24 );
  845. x += 32;
  846. // Compute max(abs(e)) for the block
  847. const __m256 signBit = _mm256_set1_ps( -0.0f );
  848. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  849. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  850. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  851. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  852. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  853. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  854. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  855. const float maxScalar = _mm_cvtss_f32( max4 );
  856. // Quantize these floats
  857. const float d = maxScalar / 127.f;
  858. y[i].d = GGML_FP32_TO_FP16(d);
  859. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  860. const __m256 mul = _mm256_set1_ps( id );
  861. // Apply the multiplier
  862. v0 = _mm256_mul_ps( v0, mul );
  863. v1 = _mm256_mul_ps( v1, mul );
  864. v2 = _mm256_mul_ps( v2, mul );
  865. v3 = _mm256_mul_ps( v3, mul );
  866. // Round to nearest integer
  867. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  868. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  869. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  870. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  871. // Convert floats to integers
  872. __m256i i0 = _mm256_cvtps_epi32( v0 );
  873. __m256i i1 = _mm256_cvtps_epi32( v1 );
  874. __m256i i2 = _mm256_cvtps_epi32( v2 );
  875. __m256i i3 = _mm256_cvtps_epi32( v3 );
  876. #if defined(__AVX2__)
  877. // Compute the sum of the quants and set y[i].s
  878. y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3))));
  879. // Convert int32 to int16
  880. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  881. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  882. // Convert int16 to int8
  883. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  884. // We got our precious signed bytes, but the order is now wrong
  885. // These AVX2 pack instructions process 16-byte pieces independently
  886. // The following instruction is fixing the order
  887. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  888. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  889. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  890. #else
  891. // Since we don't have in AVX some necessary functions,
  892. // we split the registers in half and call AVX2 analogs from SSE
  893. __m128i ni0 = _mm256_castsi256_si128( i0 );
  894. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  895. __m128i ni2 = _mm256_castsi256_si128( i1 );
  896. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  897. __m128i ni4 = _mm256_castsi256_si128( i2 );
  898. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  899. __m128i ni6 = _mm256_castsi256_si128( i3 );
  900. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  901. // Compute the sum of the quants and set y[i].s
  902. const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
  903. const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
  904. y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_4(_mm_add_epi32(s0, s1)));
  905. // Convert int32 to int16
  906. ni0 = _mm_packs_epi32( ni0, ni1 );
  907. ni2 = _mm_packs_epi32( ni2, ni3 );
  908. ni4 = _mm_packs_epi32( ni4, ni5 );
  909. ni6 = _mm_packs_epi32( ni6, ni7 );
  910. // Convert int16 to int8
  911. ni0 = _mm_packs_epi16( ni0, ni2 );
  912. ni4 = _mm_packs_epi16( ni4, ni6 );
  913. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  914. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  915. #endif
  916. }
  917. #elif defined(__riscv_v_intrinsic)
  918. size_t vl = __riscv_vsetvl_e32m4(QK8_1);
  919. for (int i = 0; i < nb; i++) {
  920. // load elements
  921. vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_1, vl);
  922. vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
  923. vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl);
  924. vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
  925. float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
  926. const float d = amax / ((1 << 7) - 1);
  927. const float id = d ? 1.0f/d : 0.0f;
  928. y[i].d = GGML_FP32_TO_FP16(d);
  929. vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
  930. // convert to integer
  931. vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
  932. vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
  933. // store result
  934. __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
  935. // compute sum for y[i].s
  936. vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl);
  937. vint16m1_t vwrs = __riscv_vwredsum_vs_i8m1_i16m1(vs, tmp2, vl);
  938. // set y[i].s
  939. int sum = __riscv_vmv_x_s_i16m1_i16(vwrs);
  940. y[i].s = GGML_FP32_TO_FP16(sum*d);
  941. }
  942. #else
  943. GGML_UNUSED(nb);
  944. // scalar
  945. quantize_row_q8_1_reference(x, y, k);
  946. #endif
  947. }
  948. void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) {
  949. static const int qk = QK4_0;
  950. assert(k % qk == 0);
  951. const int nb = k / qk;
  952. for (int i = 0; i < nb; i++) {
  953. const float d = GGML_FP16_TO_FP32(x[i].d);
  954. for (int j = 0; j < qk/2; ++j) {
  955. const int x0 = (x[i].qs[j] & 0x0F) - 8;
  956. const int x1 = (x[i].qs[j] >> 4) - 8;
  957. y[i*qk + j + 0 ] = x0*d;
  958. y[i*qk + j + qk/2] = x1*d;
  959. }
  960. }
  961. }
  962. void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
  963. static const int qk = QK4_1;
  964. assert(k % qk == 0);
  965. const int nb = k / qk;
  966. for (int i = 0; i < nb; i++) {
  967. const float d = GGML_FP16_TO_FP32(x[i].d);
  968. const float m = GGML_FP16_TO_FP32(x[i].m);
  969. for (int j = 0; j < qk/2; ++j) {
  970. const int x0 = (x[i].qs[j] & 0x0F);
  971. const int x1 = (x[i].qs[j] >> 4);
  972. y[i*qk + j + 0 ] = x0*d + m;
  973. y[i*qk + j + qk/2] = x1*d + m;
  974. }
  975. }
  976. }
  977. void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) {
  978. static const int qk = QK5_0;
  979. assert(k % qk == 0);
  980. const int nb = k / qk;
  981. for (int i = 0; i < nb; i++) {
  982. const float d = GGML_FP16_TO_FP32(x[i].d);
  983. uint32_t qh;
  984. memcpy(&qh, x[i].qh, sizeof(qh));
  985. for (int j = 0; j < qk/2; ++j) {
  986. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  987. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  988. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  989. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  990. y[i*qk + j + 0 ] = x0*d;
  991. y[i*qk + j + qk/2] = x1*d;
  992. }
  993. }
  994. }
  995. void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) {
  996. static const int qk = QK5_1;
  997. assert(k % qk == 0);
  998. const int nb = k / qk;
  999. for (int i = 0; i < nb; i++) {
  1000. const float d = GGML_FP16_TO_FP32(x[i].d);
  1001. const float m = GGML_FP16_TO_FP32(x[i].m);
  1002. uint32_t qh;
  1003. memcpy(&qh, x[i].qh, sizeof(qh));
  1004. for (int j = 0; j < qk/2; ++j) {
  1005. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  1006. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  1007. const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
  1008. const int x1 = (x[i].qs[j] >> 4) | xh_1;
  1009. y[i*qk + j + 0 ] = x0*d + m;
  1010. y[i*qk + j + qk/2] = x1*d + m;
  1011. }
  1012. }
  1013. }
  1014. void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int k) {
  1015. static const int qk = QK8_0;
  1016. assert(k % qk == 0);
  1017. const int nb = k / qk;
  1018. for (int i = 0; i < nb; i++) {
  1019. const float d = GGML_FP16_TO_FP32(x[i].d);
  1020. for (int j = 0; j < qk; ++j) {
  1021. y[i*qk + j] = x[i].qs[j]*d;
  1022. }
  1023. }
  1024. }
  1025. //
  1026. // 2-6 bit quantization in super-blocks
  1027. //
  1028. //
  1029. // ===================== Helper functions
  1030. //
  1031. static inline int nearest_int(float fval) {
  1032. assert(fval <= 4194303.f);
  1033. float val = fval + 12582912.f;
  1034. int i; memcpy(&i, &val, sizeof(int));
  1035. return (i & 0x007fffff) - 0x00400000;
  1036. }
  1037. static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, int rmse_type,
  1038. const float * restrict qw) {
  1039. float max = 0;
  1040. float amax = 0;
  1041. for (int i = 0; i < n; ++i) {
  1042. float ax = fabsf(x[i]);
  1043. if (ax > amax) { amax = ax; max = x[i]; }
  1044. }
  1045. if (amax < 1e-30f) { // all zero
  1046. for (int i = 0; i < n; ++i) {
  1047. L[i] = 0;
  1048. }
  1049. return 0.f;
  1050. }
  1051. float iscale = -nmax / max;
  1052. if (rmse_type == 0) {
  1053. for (int i = 0; i < n; ++i) {
  1054. int l = nearest_int(iscale * x[i]);
  1055. L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
  1056. }
  1057. return 1/iscale;
  1058. }
  1059. bool return_early = false;
  1060. if (rmse_type < 0) {
  1061. rmse_type = -rmse_type;
  1062. return_early = true;
  1063. }
  1064. float sumlx = 0;
  1065. float suml2 = 0;
  1066. #ifdef HAVE_BUGGY_APPLE_LINKER
  1067. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  1068. for (volatile int i = 0; i < n; ++i) {
  1069. #else
  1070. for (int i = 0; i < n; ++i) {
  1071. #endif
  1072. int l = nearest_int(iscale * x[i]);
  1073. l = MAX(-nmax, MIN(nmax-1, l));
  1074. L[i] = l + nmax;
  1075. float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i]));
  1076. sumlx += w*x[i]*l;
  1077. suml2 += w*l*l;
  1078. }
  1079. float scale = sumlx/suml2;
  1080. if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale;
  1081. float best = scale * sumlx;
  1082. for (int is = -9; is <= 9; ++is) {
  1083. if (is == 0) {
  1084. continue;
  1085. }
  1086. iscale = -(nmax + 0.1f*is) / max;
  1087. sumlx = suml2 = 0;
  1088. for (int i = 0; i < n; ++i) {
  1089. int l = nearest_int(iscale * x[i]);
  1090. l = MAX(-nmax, MIN(nmax-1, l));
  1091. float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i]));
  1092. sumlx += w*x[i]*l;
  1093. suml2 += w*l*l;
  1094. }
  1095. if (suml2 > 0 && sumlx*sumlx > best*suml2) {
  1096. for (int i = 0; i < n; ++i) {
  1097. int l = nearest_int(iscale * x[i]);
  1098. L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
  1099. }
  1100. scale = sumlx/suml2; best = scale*sumlx;
  1101. }
  1102. }
  1103. return scale;
  1104. }
  1105. static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, bool do_rmse) {
  1106. float max = 0;
  1107. float amax = 0;
  1108. for (int i = 0; i < n; ++i) {
  1109. float ax = fabsf(x[i]);
  1110. if (ax > amax) { amax = ax; max = x[i]; }
  1111. }
  1112. if (!amax) { // all zero
  1113. for (int i = 0; i < n; ++i) { L[i] = 0; }
  1114. return 0.f;
  1115. }
  1116. float iscale = -nmax / max;
  1117. if (do_rmse) {
  1118. float sumlx = 0;
  1119. float suml2 = 0;
  1120. for (int i = 0; i < n; ++i) {
  1121. int l = nearest_int(iscale * x[i]);
  1122. l = MAX(-nmax, MIN(nmax-1, l));
  1123. L[i] = l;
  1124. float w = x[i]*x[i];
  1125. sumlx += w*x[i]*l;
  1126. suml2 += w*l*l;
  1127. }
  1128. for (int itry = 0; itry < 5; ++itry) {
  1129. int n_changed = 0;
  1130. for (int i = 0; i < n; ++i) {
  1131. float w = x[i]*x[i];
  1132. float slx = sumlx - w*x[i]*L[i];
  1133. if (slx > 0) {
  1134. float sl2 = suml2 - w*L[i]*L[i];
  1135. int new_l = nearest_int(x[i] * sl2 / slx);
  1136. new_l = MAX(-nmax, MIN(nmax-1, new_l));
  1137. if (new_l != L[i]) {
  1138. slx += w*x[i]*new_l;
  1139. sl2 += w*new_l*new_l;
  1140. if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) {
  1141. L[i] = new_l; sumlx = slx; suml2 = sl2;
  1142. ++n_changed;
  1143. }
  1144. }
  1145. }
  1146. }
  1147. if (!n_changed) {
  1148. break;
  1149. }
  1150. }
  1151. for (int i = 0; i < n; ++i) {
  1152. L[i] += nmax;
  1153. }
  1154. return sumlx / suml2;
  1155. }
  1156. for (int i = 0; i < n; ++i) {
  1157. int l = nearest_int(iscale * x[i]);
  1158. l = MAX(-nmax, MIN(nmax-1, l));
  1159. L[i] = l + nmax;
  1160. }
  1161. return 1/iscale;
  1162. }
  1163. static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min,
  1164. int ntry, float alpha) {
  1165. float min = x[0];
  1166. float max = x[0];
  1167. for (int i = 1; i < n; ++i) {
  1168. if (x[i] < min) min = x[i];
  1169. if (x[i] > max) max = x[i];
  1170. }
  1171. if (max == min) {
  1172. for (int i = 0; i < n; ++i) L[i] = 0;
  1173. *the_min = 0;
  1174. return 0.f;
  1175. }
  1176. if (min > 0) min = 0;
  1177. float iscale = nmax/(max - min);
  1178. float scale = 1/iscale;
  1179. for (int itry = 0; itry < ntry; ++itry) {
  1180. float sumlx = 0; int suml2 = 0;
  1181. bool did_change = false;
  1182. for (int i = 0; i < n; ++i) {
  1183. int l = nearest_int(iscale*(x[i] - min));
  1184. l = MAX(0, MIN(nmax, l));
  1185. if (l != L[i]) {
  1186. L[i] = l;
  1187. did_change = true;
  1188. }
  1189. sumlx += (x[i] - min)*l;
  1190. suml2 += l*l;
  1191. }
  1192. scale = sumlx/suml2;
  1193. float sum = 0;
  1194. for (int i = 0; i < n; ++i) {
  1195. sum += x[i] - scale*L[i];
  1196. }
  1197. min = alpha*min + (1 - alpha)*sum/n;
  1198. if (min > 0) min = 0;
  1199. iscale = 1/scale;
  1200. if (!did_change) break;
  1201. }
  1202. *the_min = -min;
  1203. return scale;
  1204. }
  1205. static float make_qkx2_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
  1206. uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
  1207. float rmin, float rdelta, int nstep, bool use_mad) {
  1208. float min = x[0];
  1209. float max = x[0];
  1210. float sum_w = weights[0];
  1211. float sum_x = sum_w * x[0];
  1212. #ifdef HAVE_BUGGY_APPLE_LINKER
  1213. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  1214. for (volatile int i = 1; i < n; ++i) {
  1215. #else
  1216. for (int i = 1; i < n; ++i) {
  1217. #endif
  1218. if (x[i] < min) min = x[i];
  1219. if (x[i] > max) max = x[i];
  1220. float w = weights[i];
  1221. sum_w += w;
  1222. sum_x += w * x[i];
  1223. }
  1224. if (min > 0) min = 0;
  1225. if (max == min) {
  1226. for (int i = 0; i < n; ++i) L[i] = 0;
  1227. *the_min = -min;
  1228. return 0.f;
  1229. }
  1230. float iscale = nmax/(max - min);
  1231. float scale = 1/iscale;
  1232. float best_mad = 0;
  1233. for (int i = 0; i < n; ++i) {
  1234. int l = nearest_int(iscale*(x[i] - min));
  1235. L[i] = MAX(0, MIN(nmax, l));
  1236. float diff = scale * L[i] + min - x[i];
  1237. diff = use_mad ? fabsf(diff) : diff * diff;
  1238. float w = weights[i];
  1239. best_mad += w * diff;
  1240. }
  1241. if (nstep < 1) {
  1242. *the_min = -min;
  1243. return scale;
  1244. }
  1245. for (int is = 0; is <= nstep; ++is) {
  1246. iscale = (rmin + rdelta*is + nmax)/(max - min);
  1247. float sum_l = 0, sum_l2 = 0, sum_xl = 0;
  1248. for (int i = 0; i < n; ++i) {
  1249. int l = nearest_int(iscale*(x[i] - min));
  1250. l = MAX(0, MIN(nmax, l));
  1251. Laux[i] = l;
  1252. float w = weights[i];
  1253. sum_l += w*l;
  1254. sum_l2 += w*l*l;
  1255. sum_xl += w*l*x[i];
  1256. }
  1257. float D = sum_w * sum_l2 - sum_l * sum_l;
  1258. if (D > 0) {
  1259. float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
  1260. float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
  1261. if (this_min > 0) {
  1262. this_min = 0;
  1263. this_scale = sum_xl / sum_l2;
  1264. }
  1265. float mad = 0;
  1266. for (int i = 0; i < n; ++i) {
  1267. float diff = this_scale * Laux[i] + this_min - x[i];
  1268. diff = use_mad ? fabsf(diff) : diff * diff;
  1269. float w = weights[i];
  1270. mad += w * diff;
  1271. }
  1272. if (mad < best_mad) {
  1273. for (int i = 0; i < n; ++i) {
  1274. L[i] = Laux[i];
  1275. }
  1276. best_mad = mad;
  1277. scale = this_scale;
  1278. min = this_min;
  1279. }
  1280. }
  1281. }
  1282. *the_min = -min;
  1283. return scale;
  1284. }
  1285. #if QK_K == 256
  1286. static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) {
  1287. if (j < 4) {
  1288. *d = q[j] & 63; *m = q[j + 4] & 63;
  1289. } else {
  1290. *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
  1291. *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
  1292. }
  1293. }
  1294. #endif
  1295. //========================- 2-bit (de)-quantization
  1296. void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k) {
  1297. assert(k % QK_K == 0);
  1298. const int nb = k / QK_K;
  1299. uint8_t L[QK_K];
  1300. uint8_t Laux[16];
  1301. float weights[16];
  1302. float mins[QK_K/16];
  1303. float scales[QK_K/16];
  1304. const float q4scale = 15.f;
  1305. for (int i = 0; i < nb; i++) {
  1306. float max_scale = 0; // as we are deducting the min, scales are always positive
  1307. float max_min = 0;
  1308. for (int j = 0; j < QK_K/16; ++j) {
  1309. for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]);
  1310. scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true);
  1311. float scale = scales[j];
  1312. if (scale > max_scale) {
  1313. max_scale = scale;
  1314. }
  1315. float min = mins[j];
  1316. if (min > max_min) {
  1317. max_min = min;
  1318. }
  1319. }
  1320. if (max_scale > 0) {
  1321. float iscale = q4scale/max_scale;
  1322. for (int j = 0; j < QK_K/16; ++j) {
  1323. int l = nearest_int(iscale*scales[j]);
  1324. y[i].scales[j] = l;
  1325. }
  1326. y[i].d = GGML_FP32_TO_FP16(max_scale/q4scale);
  1327. } else {
  1328. for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0;
  1329. y[i].d = GGML_FP32_TO_FP16(0.f);
  1330. }
  1331. if (max_min > 0) {
  1332. float iscale = q4scale/max_min;
  1333. for (int j = 0; j < QK_K/16; ++j) {
  1334. int l = nearest_int(iscale*mins[j]);
  1335. y[i].scales[j] |= (l << 4);
  1336. }
  1337. y[i].dmin = GGML_FP32_TO_FP16(max_min/q4scale);
  1338. } else {
  1339. y[i].dmin = GGML_FP32_TO_FP16(0.f);
  1340. }
  1341. for (int j = 0; j < QK_K/16; ++j) {
  1342. const float d = GGML_FP16_TO_FP32(y[i].d) * (y[i].scales[j] & 0xF);
  1343. if (!d) continue;
  1344. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * (y[i].scales[j] >> 4);
  1345. for (int ii = 0; ii < 16; ++ii) {
  1346. int l = nearest_int((x[16*j + ii] + dm)/d);
  1347. l = MAX(0, MIN(3, l));
  1348. L[16*j + ii] = l;
  1349. }
  1350. }
  1351. #if QK_K == 256
  1352. for (int j = 0; j < QK_K; j += 128) {
  1353. for (int l = 0; l < 32; ++l) {
  1354. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1355. }
  1356. }
  1357. #else
  1358. for (int l = 0; l < 16; ++l) {
  1359. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1360. }
  1361. #endif
  1362. x += QK_K;
  1363. }
  1364. }
  1365. void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k) {
  1366. assert(k % QK_K == 0);
  1367. const int nb = k / QK_K;
  1368. for (int i = 0; i < nb; i++) {
  1369. const float d = GGML_FP16_TO_FP32(x[i].d);
  1370. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  1371. const uint8_t * q = x[i].qs;
  1372. #if QK_K == 256
  1373. int is = 0;
  1374. float dl, ml;
  1375. for (int n = 0; n < QK_K; n += 128) {
  1376. int shift = 0;
  1377. for (int j = 0; j < 4; ++j) {
  1378. uint8_t sc = x[i].scales[is++];
  1379. dl = d * (sc & 0xF); ml = min * (sc >> 4);
  1380. for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml;
  1381. sc = x[i].scales[is++];
  1382. dl = d * (sc & 0xF); ml = min * (sc >> 4);
  1383. for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml;
  1384. shift += 2;
  1385. }
  1386. q += 32;
  1387. }
  1388. #else
  1389. float dl1 = d * (x[i].scales[0] & 0xF), ml1 = min * (x[i].scales[0] >> 4);
  1390. float dl2 = d * (x[i].scales[1] & 0xF), ml2 = min * (x[i].scales[1] >> 4);
  1391. float dl3 = d * (x[i].scales[2] & 0xF), ml3 = min * (x[i].scales[2] >> 4);
  1392. float dl4 = d * (x[i].scales[3] & 0xF), ml4 = min * (x[i].scales[3] >> 4);
  1393. for (int l = 0; l < 16; ++l) {
  1394. y[l+ 0] = dl1 * ((int8_t)((q[l] >> 0) & 3)) - ml1;
  1395. y[l+16] = dl2 * ((int8_t)((q[l] >> 2) & 3)) - ml2;
  1396. y[l+32] = dl3 * ((int8_t)((q[l] >> 4) & 3)) - ml3;
  1397. y[l+48] = dl4 * ((int8_t)((q[l] >> 6) & 3)) - ml4;
  1398. }
  1399. y += QK_K;
  1400. #endif
  1401. }
  1402. }
  1403. void quantize_row_q2_K(const float * restrict x, void * restrict vy, int k) {
  1404. quantize_row_q2_K_reference(x, vy, k);
  1405. }
  1406. static float make_qkx3_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
  1407. uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
  1408. float rmin, float rdelta, int nstep, bool use_mad) {
  1409. float min = x[0];
  1410. float max = x[0];
  1411. float sum_w = weights ? weights[0] : x[0]*x[0];
  1412. float sum_x = sum_w * x[0];
  1413. #ifdef HAVE_BUGGY_APPLE_LINKER
  1414. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  1415. for (volatile int i = 1; i < n; ++i) {
  1416. #else
  1417. for (int i = 1; i < n; ++i) {
  1418. #endif
  1419. if (x[i] < min) min = x[i];
  1420. if (x[i] > max) max = x[i];
  1421. float w = weights ? weights[i] : x[i]*x[i];
  1422. sum_w += w;
  1423. sum_x += w * x[i];
  1424. }
  1425. if (min > 0) {
  1426. min = 0;
  1427. }
  1428. if (max <= min) {
  1429. memset(L, 0, n);
  1430. *the_min = -min;
  1431. return 0.f;
  1432. }
  1433. float iscale = nmax/(max - min);
  1434. float scale = 1/iscale;
  1435. float best_mad = 0;
  1436. for (int i = 0; i < n; ++i) {
  1437. int l = nearest_int(iscale*(x[i] - min));
  1438. L[i] = MAX(0, MIN(nmax, l));
  1439. float diff = scale * L[i] + min - x[i];
  1440. diff = use_mad ? fabsf(diff) : diff*diff;
  1441. float w = weights ? weights[i] : x[i]*x[i];
  1442. best_mad += w * diff;
  1443. }
  1444. if (nstep < 1) {
  1445. *the_min = -min;
  1446. return scale;
  1447. }
  1448. for (int is = 0; is <= nstep; ++is) {
  1449. iscale = (rmin + rdelta*is + nmax)/(max - min);
  1450. float sum_l = 0, sum_l2 = 0, sum_xl = 0;
  1451. for (int i = 0; i < n; ++i) {
  1452. int l = nearest_int(iscale*(x[i] - min));
  1453. l = MAX(0, MIN(nmax, l));
  1454. Laux[i] = l;
  1455. float w = weights ? weights[i] : x[i]*x[i];
  1456. sum_l += w*l;
  1457. sum_l2 += w*l*l;
  1458. sum_xl += w*l*x[i];
  1459. }
  1460. float D = sum_w * sum_l2 - sum_l * sum_l;
  1461. if (D > 0) {
  1462. float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
  1463. float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
  1464. if (this_min > 0) {
  1465. this_min = 0;
  1466. this_scale = sum_xl / sum_l2;
  1467. }
  1468. float mad = 0;
  1469. for (int i = 0; i < n; ++i) {
  1470. float diff = this_scale * Laux[i] + this_min - x[i];
  1471. diff = use_mad ? fabsf(diff) : diff*diff;
  1472. float w = weights ? weights[i] : x[i]*x[i];
  1473. mad += w * diff;
  1474. }
  1475. if (mad < best_mad) {
  1476. for (int i = 0; i < n; ++i) {
  1477. L[i] = Laux[i];
  1478. }
  1479. best_mad = mad;
  1480. scale = this_scale;
  1481. min = this_min;
  1482. }
  1483. }
  1484. }
  1485. *the_min = -min;
  1486. return scale;
  1487. }
  1488. static float make_qp_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, const float * quant_weights) {
  1489. float max = 0;
  1490. for (int i = 0; i < n; ++i) {
  1491. max = MAX(max, x[i]);
  1492. }
  1493. if (!max) { // all zero
  1494. for (int i = 0; i < n; ++i) { L[i] = 0; }
  1495. return 0.f;
  1496. }
  1497. float iscale = nmax / max;
  1498. for (int i = 0; i < n; ++i) {
  1499. L[i] = nearest_int(iscale * x[i]);
  1500. }
  1501. float scale = 1/iscale;
  1502. float best_mse = 0;
  1503. for (int i = 0; i < n; ++i) {
  1504. float diff = x[i] - scale*L[i];
  1505. float w = quant_weights[i];
  1506. best_mse += w*diff*diff;
  1507. }
  1508. for (int is = -4; is <= 4; ++is) {
  1509. if (is == 0) continue;
  1510. float iscale_is = (0.1f*is + nmax)/max;
  1511. float scale_is = 1/iscale_is;
  1512. float mse = 0;
  1513. for (int i = 0; i < n; ++i) {
  1514. int l = nearest_int(iscale_is*x[i]);
  1515. l = MIN(nmax, l);
  1516. float diff = x[i] - scale_is*l;
  1517. float w = quant_weights[i];
  1518. mse += w*diff*diff;
  1519. }
  1520. if (mse < best_mse) {
  1521. best_mse = mse;
  1522. iscale = iscale_is;
  1523. }
  1524. }
  1525. float sumlx = 0;
  1526. float suml2 = 0;
  1527. for (int i = 0; i < n; ++i) {
  1528. int l = nearest_int(iscale * x[i]);
  1529. l = MIN(nmax, l);
  1530. L[i] = l;
  1531. float w = quant_weights[i];
  1532. sumlx += w*x[i]*l;
  1533. suml2 += w*l*l;
  1534. }
  1535. for (int itry = 0; itry < 5; ++itry) {
  1536. int n_changed = 0;
  1537. for (int i = 0; i < n; ++i) {
  1538. float w = quant_weights[i];
  1539. float slx = sumlx - w*x[i]*L[i];
  1540. float sl2 = suml2 - w*L[i]*L[i];
  1541. if (slx > 0 && sl2 > 0) {
  1542. int new_l = nearest_int(x[i] * sl2 / slx);
  1543. new_l = MIN(nmax, new_l);
  1544. if (new_l != L[i]) {
  1545. slx += w*x[i]*new_l;
  1546. sl2 += w*new_l*new_l;
  1547. if (slx*slx*suml2 > sumlx*sumlx*sl2) {
  1548. L[i] = new_l; sumlx = slx; suml2 = sl2;
  1549. ++n_changed;
  1550. }
  1551. }
  1552. }
  1553. }
  1554. if (!n_changed) {
  1555. break;
  1556. }
  1557. }
  1558. return sumlx / suml2;
  1559. }
  1560. static void quantize_row_q2_K_impl(const float * restrict x, block_q2_K * restrict y, int k, const float * restrict quant_weights) {
  1561. GGML_ASSERT(quant_weights);
  1562. assert(k % QK_K == 0);
  1563. const int nb = k / QK_K;
  1564. const bool requantize = true;
  1565. uint8_t L[QK_K];
  1566. uint8_t Laux[16];
  1567. float mins[QK_K/16];
  1568. float scales[QK_K/16];
  1569. float sw[QK_K/16];
  1570. float weight[16];
  1571. uint8_t Ls[QK_K/16], Lm[QK_K/16];
  1572. for (int i = 0; i < nb; i++) {
  1573. memset(sw, 0, QK_K/16*sizeof(float));
  1574. float sumx2 = 0;
  1575. for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j];
  1576. float sigma2 = sumx2/QK_K;
  1577. for (int j = 0; j < QK_K/16; ++j) {
  1578. const float * restrict qw = quant_weights + QK_K * i + 16*j;
  1579. for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j + l]*x[16*j + l]);
  1580. for (int l = 0; l < QK_K/16; ++l) sw[j] += weight[l];
  1581. scales[j] = make_qkx3_quants(16, 3, x + 16*j, weight, L + 16*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  1582. }
  1583. float dm, mm;
  1584. #if QK_K == 64
  1585. float max_scale = 0, max_min = 0;
  1586. for (int j = 0; j < QK_K/16; ++j) {
  1587. max_scale = MAX(max_scale, scales[j]);
  1588. max_min = MAX(max_min, mins[j]);
  1589. }
  1590. dm = max_scale/15;
  1591. mm = max_min/15;
  1592. if (max_scale) {
  1593. float id = 1/dm;
  1594. for (int j = 0; j < QK_K/16; ++j) {
  1595. int l = nearest_int(id*scales[j]);
  1596. Ls[j] = MAX(0, MIN(15, l));
  1597. }
  1598. } else {
  1599. memset(Ls, 0, QK_K/16);
  1600. }
  1601. if (max_min) {
  1602. float id = 1/mm;
  1603. for (int j = 0; j < QK_K/16; ++j) {
  1604. int l = nearest_int(id*mins[j]);
  1605. Lm[j] = MAX(0, MIN(15, l));
  1606. }
  1607. } else {
  1608. memset(Lm, 0, QK_K/16);
  1609. }
  1610. #else
  1611. dm = make_qp_quants(QK_K/16, 15, scales, Ls, sw);
  1612. mm = make_qp_quants(QK_K/16, 15, mins, Lm, sw);
  1613. #endif
  1614. y[i].d = GGML_FP32_TO_FP16(dm);
  1615. y[i].dmin = GGML_FP32_TO_FP16(mm);
  1616. dm = GGML_FP16_TO_FP32(y[i].d);
  1617. mm = GGML_FP16_TO_FP32(y[i].dmin);
  1618. for (int j = 0; j < QK_K/16; ++j) {
  1619. y[i].scales[j] = Ls[j] | (Lm[j] << 4);
  1620. }
  1621. if (requantize) {
  1622. for (int j = 0; j < QK_K/16; ++j) {
  1623. const float d = dm * (y[i].scales[j] & 0xF);
  1624. if (!d) continue;
  1625. const float m = mm * (y[i].scales[j] >> 4);
  1626. for (int ii = 0; ii < 16; ++ii) {
  1627. int l = nearest_int((x[16*j + ii] + m)/d);
  1628. l = MAX(0, MIN(3, l));
  1629. L[16*j + ii] = l;
  1630. }
  1631. }
  1632. }
  1633. #if QK_K == 256
  1634. for (int j = 0; j < QK_K; j += 128) {
  1635. for (int l = 0; l < 32; ++l) {
  1636. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1637. }
  1638. }
  1639. #else
  1640. for (int l = 0; l < 16; ++l) {
  1641. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1642. }
  1643. #endif
  1644. x += QK_K;
  1645. }
  1646. }
  1647. size_t quantize_q2_K(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  1648. size_t row_size = ggml_row_size(GGML_TYPE_Q2_K, n_per_row);
  1649. if (!quant_weights) {
  1650. quantize_row_q2_K_reference(src, dst, nrow*n_per_row);
  1651. }
  1652. else {
  1653. char * qrow = (char *)dst;
  1654. for (int row = 0; row < nrow; ++row) {
  1655. quantize_row_q2_K_impl(src, (block_q2_K*)qrow, n_per_row, quant_weights);
  1656. src += n_per_row;
  1657. qrow += row_size;
  1658. }
  1659. }
  1660. return nrow * row_size;
  1661. }
  1662. //========================= 3-bit (de)-quantization
  1663. void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k) {
  1664. assert(k % QK_K == 0);
  1665. const int nb = k / QK_K;
  1666. int8_t L[QK_K];
  1667. float scales[QK_K / 16];
  1668. for (int i = 0; i < nb; i++) {
  1669. float max_scale = 0;
  1670. float amax = 0;
  1671. for (int j = 0; j < QK_K/16; ++j) {
  1672. scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true);
  1673. float scale = fabsf(scales[j]);
  1674. if (scale > amax) {
  1675. amax = scale; max_scale = scales[j];
  1676. }
  1677. }
  1678. #if QK_K == 256
  1679. memset(y[i].scales, 0, 12);
  1680. if (max_scale) {
  1681. float iscale = -32.f/max_scale;
  1682. for (int j = 0; j < QK_K/16; ++j) {
  1683. int8_t l = nearest_int(iscale*scales[j]);
  1684. l = MAX(-32, MIN(31, l)) + 32;
  1685. if (j < 8) {
  1686. y[i].scales[j] = l & 0xF;
  1687. } else {
  1688. y[i].scales[j-8] |= ((l & 0xF) << 4);
  1689. }
  1690. l >>= 4;
  1691. y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
  1692. }
  1693. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  1694. } else {
  1695. y[i].d = GGML_FP32_TO_FP16(0.f);
  1696. }
  1697. int8_t sc;
  1698. for (int j = 0; j < QK_K/16; ++j) {
  1699. sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
  1700. sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
  1701. float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1702. if (!d) {
  1703. continue;
  1704. }
  1705. for (int ii = 0; ii < 16; ++ii) {
  1706. int l = nearest_int(x[16*j + ii]/d);
  1707. l = MAX(-4, MIN(3, l));
  1708. L[16*j + ii] = l + 4;
  1709. }
  1710. }
  1711. #else
  1712. if (max_scale) {
  1713. float iscale = -8.f/max_scale;
  1714. for (int j = 0; j < QK_K/16; j+=2) {
  1715. int l1 = nearest_int(iscale*scales[j]);
  1716. l1 = 8 + MAX(-8, MIN(7, l1));
  1717. int l2 = nearest_int(iscale*scales[j+1]);
  1718. l2 = 8 + MAX(-8, MIN(7, l2));
  1719. y[i].scales[j/2] = l1 | (l2 << 4);
  1720. }
  1721. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  1722. } else {
  1723. for (int j = 0; j < QK_K/16; j+=2) {
  1724. y[i].scales[j/2] = 0;
  1725. }
  1726. y[i].d = GGML_FP32_TO_FP16(0.f);
  1727. }
  1728. for (int j = 0; j < QK_K/16; ++j) {
  1729. int s = j%2 == 0 ? y[i].scales[j/2] & 0xF : y[i].scales[j/2] >> 4;
  1730. float d = GGML_FP16_TO_FP32(y[i].d) * (s - 8);
  1731. if (!d) {
  1732. continue;
  1733. }
  1734. for (int ii = 0; ii < 16; ++ii) {
  1735. int l = nearest_int(x[16*j + ii]/d);
  1736. l = MAX(-4, MIN(3, l));
  1737. L[16*j + ii] = l + 4;
  1738. }
  1739. }
  1740. #endif
  1741. memset(y[i].hmask, 0, QK_K/8);
  1742. // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
  1743. int m = 0;
  1744. uint8_t hm = 1;
  1745. for (int j = 0; j < QK_K; ++j) {
  1746. if (L[j] > 3) {
  1747. y[i].hmask[m] |= hm;
  1748. L[j] -= 4;
  1749. }
  1750. if (++m == QK_K/8) {
  1751. m = 0; hm <<= 1;
  1752. }
  1753. }
  1754. #if QK_K == 256
  1755. for (int j = 0; j < QK_K; j += 128) {
  1756. for (int l = 0; l < 32; ++l) {
  1757. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1758. }
  1759. }
  1760. #else
  1761. for (int l = 0; l < 16; ++l) {
  1762. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1763. }
  1764. #endif
  1765. x += QK_K;
  1766. }
  1767. }
  1768. #if QK_K == 256
  1769. void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
  1770. assert(k % QK_K == 0);
  1771. const int nb = k / QK_K;
  1772. const uint32_t kmask1 = 0x03030303;
  1773. const uint32_t kmask2 = 0x0f0f0f0f;
  1774. uint32_t aux[4];
  1775. const int8_t * scales = (const int8_t*)aux;
  1776. for (int i = 0; i < nb; i++) {
  1777. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  1778. const uint8_t * restrict q = x[i].qs;
  1779. const uint8_t * restrict hm = x[i].hmask;
  1780. uint8_t m = 1;
  1781. memcpy(aux, x[i].scales, 12);
  1782. uint32_t tmp = aux[2];
  1783. aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
  1784. aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
  1785. aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
  1786. aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
  1787. int is = 0;
  1788. float dl;
  1789. for (int n = 0; n < QK_K; n += 128) {
  1790. int shift = 0;
  1791. for (int j = 0; j < 4; ++j) {
  1792. dl = d_all * (scales[is++] - 32);
  1793. for (int l = 0; l < 16; ++l) {
  1794. *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4));
  1795. }
  1796. dl = d_all * (scales[is++] - 32);
  1797. for (int l = 0; l < 16; ++l) {
  1798. *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4));
  1799. }
  1800. shift += 2;
  1801. m <<= 1;
  1802. }
  1803. q += 32;
  1804. }
  1805. }
  1806. }
  1807. #else
  1808. void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
  1809. assert(k % QK_K == 0);
  1810. assert(QK_K == 64);
  1811. const int nb = k / QK_K;
  1812. for (int i = 0; i < nb; i++) {
  1813. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  1814. const uint8_t * restrict q = x[i].qs;
  1815. const uint8_t * restrict hm = x[i].hmask;
  1816. const float d1 = d_all * ((x[i].scales[0] & 0xF) - 8);
  1817. const float d2 = d_all * ((x[i].scales[0] >> 4) - 8);
  1818. const float d3 = d_all * ((x[i].scales[1] & 0xF) - 8);
  1819. const float d4 = d_all * ((x[i].scales[1] >> 4) - 8);
  1820. for (int l=0; l<8; ++l) {
  1821. uint8_t h = hm[l];
  1822. y[l+ 0] = d1 * ((int8_t)((q[l+0] >> 0) & 3) - ((h & 0x01) ? 0 : 4));
  1823. y[l+ 8] = d1 * ((int8_t)((q[l+8] >> 0) & 3) - ((h & 0x02) ? 0 : 4));
  1824. y[l+16] = d2 * ((int8_t)((q[l+0] >> 2) & 3) - ((h & 0x04) ? 0 : 4));
  1825. y[l+24] = d2 * ((int8_t)((q[l+8] >> 2) & 3) - ((h & 0x08) ? 0 : 4));
  1826. y[l+32] = d3 * ((int8_t)((q[l+0] >> 4) & 3) - ((h & 0x10) ? 0 : 4));
  1827. y[l+40] = d3 * ((int8_t)((q[l+8] >> 4) & 3) - ((h & 0x20) ? 0 : 4));
  1828. y[l+48] = d4 * ((int8_t)((q[l+0] >> 6) & 3) - ((h & 0x40) ? 0 : 4));
  1829. y[l+56] = d4 * ((int8_t)((q[l+8] >> 6) & 3) - ((h & 0x80) ? 0 : 4));
  1830. }
  1831. y += QK_K;
  1832. }
  1833. }
  1834. #endif
  1835. void quantize_row_q3_K(const float * restrict x, void * restrict vy, int k) {
  1836. quantize_row_q3_K_reference(x, vy, k);
  1837. }
  1838. static void quantize_row_q3_K_impl(const float * restrict x, block_q3_K * restrict y, int n_per_row, const float * restrict quant_weights) {
  1839. #if QK_K != 256
  1840. (void)quant_weights;
  1841. quantize_row_q3_K_reference(x, y, n_per_row);
  1842. #else
  1843. assert(n_per_row % QK_K == 0);
  1844. const int nb = n_per_row / QK_K;
  1845. int8_t L[QK_K];
  1846. float scales[QK_K / 16];
  1847. float weight[16];
  1848. float sw[QK_K / 16];
  1849. int8_t Ls[QK_K / 16];
  1850. for (int i = 0; i < nb; i++) {
  1851. float sumx2 = 0;
  1852. for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j];
  1853. float sigma2 = 2*sumx2/QK_K;
  1854. for (int j = 0; j < QK_K/16; ++j) {
  1855. if (quant_weights) {
  1856. const float * qw = quant_weights ? quant_weights + QK_K * i + 16*j : NULL;
  1857. for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j+l]*x[16*j+l]);
  1858. } else {
  1859. for (int l = 0; l < 16; ++l) weight[l] = x[16*j+l]*x[16*j+l];
  1860. }
  1861. float sumw = 0;
  1862. for (int l = 0; l < 16; ++l) sumw += weight[l];
  1863. sw[j] = sumw;
  1864. scales[j] = make_qx_quants(16, 4, x + 16*j, L + 16*j, 1, weight);
  1865. }
  1866. memset(y[i].scales, 0, 12);
  1867. float d_block = make_qx_quants(QK_K/16, 32, scales, Ls, 1, sw);
  1868. for (int j = 0; j < QK_K/16; ++j) {
  1869. int l = Ls[j];
  1870. if (j < 8) {
  1871. y[i].scales[j] = l & 0xF;
  1872. } else {
  1873. y[i].scales[j-8] |= ((l & 0xF) << 4);
  1874. }
  1875. l >>= 4;
  1876. y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
  1877. }
  1878. y[i].d = GGML_FP32_TO_FP16(d_block);
  1879. int8_t sc;
  1880. for (int j = 0; j < QK_K/16; ++j) {
  1881. sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
  1882. sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
  1883. float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1884. if (!d) {
  1885. continue;
  1886. }
  1887. for (int ii = 0; ii < 16; ++ii) {
  1888. int l = nearest_int(x[16*j + ii]/d);
  1889. l = MAX(-4, MIN(3, l));
  1890. L[16*j + ii] = l + 4;
  1891. }
  1892. }
  1893. memset(y[i].hmask, 0, QK_K/8);
  1894. // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
  1895. int m = 0;
  1896. uint8_t hm = 1;
  1897. for (int j = 0; j < QK_K; ++j) {
  1898. if (L[j] > 3) {
  1899. y[i].hmask[m] |= hm;
  1900. L[j] -= 4;
  1901. }
  1902. if (++m == QK_K/8) {
  1903. m = 0; hm <<= 1;
  1904. }
  1905. }
  1906. for (int j = 0; j < QK_K; j += 128) {
  1907. for (int l = 0; l < 32; ++l) {
  1908. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1909. }
  1910. }
  1911. x += QK_K;
  1912. }
  1913. #endif
  1914. }
  1915. size_t quantize_q3_K(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  1916. size_t row_size = ggml_row_size(GGML_TYPE_Q3_K, n_per_row);
  1917. if (!quant_weights) {
  1918. quantize_row_q3_K_reference(src, dst, nrow*n_per_row);
  1919. }
  1920. else {
  1921. char * qrow = (char *)dst;
  1922. for (int row = 0; row < nrow; ++row) {
  1923. quantize_row_q3_K_impl(src, (block_q3_K*)qrow, n_per_row, quant_weights);
  1924. src += n_per_row;
  1925. qrow += row_size;
  1926. }
  1927. }
  1928. return nrow * row_size;
  1929. }
  1930. // ====================== 4-bit (de)-quantization
  1931. void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k) {
  1932. assert(k % QK_K == 0);
  1933. const int nb = k / QK_K;
  1934. uint8_t L[QK_K];
  1935. uint8_t Laux[32];
  1936. float weights[32];
  1937. float mins[QK_K/32];
  1938. float scales[QK_K/32];
  1939. for (int i = 0; i < nb; i++) {
  1940. float max_scale = 0; // as we are deducting the min, scales are always positive
  1941. float max_min = 0;
  1942. for (int j = 0; j < QK_K/32; ++j) {
  1943. //scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
  1944. float sum_x2 = 0;
  1945. for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
  1946. float av_x = sqrtf(sum_x2/32);
  1947. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  1948. scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false);
  1949. float scale = scales[j];
  1950. if (scale > max_scale) {
  1951. max_scale = scale;
  1952. }
  1953. float min = mins[j];
  1954. if (min > max_min) {
  1955. max_min = min;
  1956. }
  1957. }
  1958. #if QK_K == 256
  1959. float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
  1960. float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
  1961. for (int j = 0; j < QK_K/32; ++j) {
  1962. uint8_t ls = nearest_int(inv_scale*scales[j]);
  1963. uint8_t lm = nearest_int(inv_min*mins[j]);
  1964. ls = MIN(63, ls);
  1965. lm = MIN(63, lm);
  1966. if (j < 4) {
  1967. y[i].scales[j] = ls;
  1968. y[i].scales[j+4] = lm;
  1969. } else {
  1970. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  1971. y[i].scales[j-4] |= ((ls >> 4) << 6);
  1972. y[i].scales[j-0] |= ((lm >> 4) << 6);
  1973. }
  1974. }
  1975. y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
  1976. y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
  1977. uint8_t sc, m;
  1978. for (int j = 0; j < QK_K/32; ++j) {
  1979. get_scale_min_k4(j, y[i].scales, &sc, &m);
  1980. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1981. if (!d) continue;
  1982. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  1983. for (int ii = 0; ii < 32; ++ii) {
  1984. int l = nearest_int((x[32*j + ii] + dm)/d);
  1985. l = MAX(0, MIN(15, l));
  1986. L[32*j + ii] = l;
  1987. }
  1988. }
  1989. #else
  1990. const float s_factor = 15.f;
  1991. float inv_scale = max_scale > 0 ? s_factor/max_scale : 0.f;
  1992. float inv_min = max_min > 0 ? s_factor/max_min : 0.f;
  1993. int d1 = nearest_int(inv_scale*scales[0]);
  1994. int m1 = nearest_int(inv_min*mins[0]);
  1995. int d2 = nearest_int(inv_scale*scales[1]);
  1996. int m2 = nearest_int(inv_min*mins[1]);
  1997. y[i].scales[0] = d1 | (m1 << 4);
  1998. y[i].scales[1] = d2 | (m2 << 4);
  1999. y[i].d[0] = GGML_FP32_TO_FP16(max_scale/s_factor);
  2000. y[i].d[1] = GGML_FP32_TO_FP16(max_min/s_factor);
  2001. float sumlx = 0;
  2002. int suml2 = 0;
  2003. for (int j = 0; j < QK_K/32; ++j) {
  2004. const uint8_t sd = y[i].scales[j] & 0xF;
  2005. const uint8_t sm = y[i].scales[j] >> 4;
  2006. const float d = GGML_FP16_TO_FP32(y[i].d[0]) * sd;
  2007. if (!d) continue;
  2008. const float m = GGML_FP16_TO_FP32(y[i].d[1]) * sm;
  2009. for (int ii = 0; ii < 32; ++ii) {
  2010. int l = nearest_int((x[32*j + ii] + m)/d);
  2011. l = MAX(0, MIN(15, l));
  2012. L[32*j + ii] = l;
  2013. sumlx += (x[32*j + ii] + m)*l*sd;
  2014. suml2 += l*l*sd*sd;
  2015. }
  2016. }
  2017. if (suml2) {
  2018. y[i].d[0] = GGML_FP32_TO_FP16(sumlx/suml2);
  2019. }
  2020. #endif
  2021. uint8_t * q = y[i].qs;
  2022. for (int j = 0; j < QK_K; j += 64) {
  2023. for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
  2024. q += 32;
  2025. }
  2026. x += QK_K;
  2027. }
  2028. }
  2029. void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k) {
  2030. assert(k % QK_K == 0);
  2031. const int nb = k / QK_K;
  2032. for (int i = 0; i < nb; i++) {
  2033. const uint8_t * q = x[i].qs;
  2034. #if QK_K == 256
  2035. const float d = GGML_FP16_TO_FP32(x[i].d);
  2036. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  2037. int is = 0;
  2038. uint8_t sc, m;
  2039. for (int j = 0; j < QK_K; j += 64) {
  2040. get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
  2041. const float d1 = d * sc; const float m1 = min * m;
  2042. get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
  2043. const float d2 = d * sc; const float m2 = min * m;
  2044. for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1;
  2045. for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2;
  2046. q += 32; is += 2;
  2047. }
  2048. #else
  2049. const float dall = GGML_FP16_TO_FP32(x[i].d[0]);
  2050. const float mall = GGML_FP16_TO_FP32(x[i].d[1]);
  2051. const float d1 = dall * (x[i].scales[0] & 0xF), m1 = mall * (x[i].scales[0] >> 4);
  2052. const float d2 = dall * (x[i].scales[1] & 0xF), m2 = mall * (x[i].scales[1] >> 4);
  2053. for (int l = 0; l < 32; ++l) {
  2054. y[l+ 0] = d1 * (q[l] & 0xF) - m1;
  2055. y[l+32] = d2 * (q[l] >> 4) - m2;
  2056. }
  2057. y += QK_K;
  2058. #endif
  2059. }
  2060. }
  2061. void quantize_row_q4_K(const float * restrict x, void * restrict vy, int k) {
  2062. assert(k % QK_K == 0);
  2063. block_q4_K * restrict y = vy;
  2064. quantize_row_q4_K_reference(x, y, k);
  2065. }
  2066. static void quantize_row_q4_K_impl(const float * restrict x, block_q4_K * restrict y, int n_per_row, const float * quant_weights) {
  2067. #if QK_K != 256
  2068. (void)quant_weights;
  2069. quantize_row_q4_K_reference(x, y, n_per_row);
  2070. #else
  2071. assert(n_per_row % QK_K == 0);
  2072. const int nb = n_per_row / QK_K;
  2073. uint8_t L[QK_K];
  2074. uint8_t Laux[32];
  2075. uint8_t Ls[QK_K/32];
  2076. uint8_t Lm[QK_K/32];
  2077. float weights[32];
  2078. float sw[QK_K/32];
  2079. float mins[QK_K/32];
  2080. float scales[QK_K/32];
  2081. for (int i = 0; i < nb; i++) {
  2082. float sum_x2 = 0;
  2083. for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l];
  2084. float sigma2 = 2*sum_x2/QK_K;
  2085. float av_x = sqrtf(sigma2);
  2086. for (int j = 0; j < QK_K/32; ++j) {
  2087. if (quant_weights) {
  2088. const float * qw = quant_weights + QK_K*i + 32*j;
  2089. for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]);
  2090. } else {
  2091. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  2092. }
  2093. float sumw = 0;
  2094. for (int l = 0; l < 32; ++l) sumw += weights[l];
  2095. sw[j] = sumw;
  2096. scales[j] = make_qkx3_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  2097. }
  2098. float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw);
  2099. float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw);
  2100. for (int j = 0; j < QK_K/32; ++j) {
  2101. uint8_t ls = Ls[j];
  2102. uint8_t lm = Lm[j];
  2103. if (j < 4) {
  2104. y[i].scales[j] = ls;
  2105. y[i].scales[j+4] = lm;
  2106. } else {
  2107. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  2108. y[i].scales[j-4] |= ((ls >> 4) << 6);
  2109. y[i].scales[j-0] |= ((lm >> 4) << 6);
  2110. }
  2111. }
  2112. y[i].d = GGML_FP32_TO_FP16(d_block);
  2113. y[i].dmin = GGML_FP32_TO_FP16(m_block);
  2114. uint8_t sc, m;
  2115. for (int j = 0; j < QK_K/32; ++j) {
  2116. get_scale_min_k4(j, y[i].scales, &sc, &m);
  2117. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  2118. if (!d) continue;
  2119. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  2120. for (int ii = 0; ii < 32; ++ii) {
  2121. int l = nearest_int((x[32*j + ii] + dm)/d);
  2122. l = MAX(0, MIN(15, l));
  2123. L[32*j + ii] = l;
  2124. }
  2125. }
  2126. uint8_t * q = y[i].qs;
  2127. for (int j = 0; j < QK_K; j += 64) {
  2128. for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
  2129. q += 32;
  2130. }
  2131. x += QK_K;
  2132. }
  2133. #endif
  2134. }
  2135. size_t quantize_q4_K(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  2136. size_t row_size = ggml_row_size(GGML_TYPE_Q4_K, n_per_row);
  2137. if (!quant_weights) {
  2138. quantize_row_q4_K_reference(src, dst, nrow*n_per_row);
  2139. }
  2140. else {
  2141. char * qrow = (char *)dst;
  2142. for (int row = 0; row < nrow; ++row) {
  2143. quantize_row_q4_K_impl(src, (block_q4_K*)qrow, n_per_row, quant_weights);
  2144. src += n_per_row;
  2145. qrow += row_size;
  2146. }
  2147. }
  2148. return nrow * row_size;
  2149. }
  2150. // ====================== 5-bit (de)-quantization
  2151. void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k) {
  2152. assert(k % QK_K == 0);
  2153. const int nb = k / QK_K;
  2154. #if QK_K == 256
  2155. uint8_t L[QK_K];
  2156. float mins[QK_K/32];
  2157. float scales[QK_K/32];
  2158. float weights[32];
  2159. uint8_t Laux[32];
  2160. #else
  2161. int8_t L[QK_K];
  2162. float scales[QK_K/16];
  2163. #endif
  2164. for (int i = 0; i < nb; i++) {
  2165. #if QK_K == 256
  2166. float max_scale = 0; // as we are deducting the min, scales are always positive
  2167. float max_min = 0;
  2168. for (int j = 0; j < QK_K/32; ++j) {
  2169. //scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
  2170. float sum_x2 = 0;
  2171. for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
  2172. float av_x = sqrtf(sum_x2/32);
  2173. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  2174. scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false);
  2175. float scale = scales[j];
  2176. if (scale > max_scale) {
  2177. max_scale = scale;
  2178. }
  2179. float min = mins[j];
  2180. if (min > max_min) {
  2181. max_min = min;
  2182. }
  2183. }
  2184. float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
  2185. float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
  2186. for (int j = 0; j < QK_K/32; ++j) {
  2187. uint8_t ls = nearest_int(inv_scale*scales[j]);
  2188. uint8_t lm = nearest_int(inv_min*mins[j]);
  2189. ls = MIN(63, ls);
  2190. lm = MIN(63, lm);
  2191. if (j < 4) {
  2192. y[i].scales[j] = ls;
  2193. y[i].scales[j+4] = lm;
  2194. } else {
  2195. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  2196. y[i].scales[j-4] |= ((ls >> 4) << 6);
  2197. y[i].scales[j-0] |= ((lm >> 4) << 6);
  2198. }
  2199. }
  2200. y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
  2201. y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
  2202. uint8_t sc, m;
  2203. for (int j = 0; j < QK_K/32; ++j) {
  2204. get_scale_min_k4(j, y[i].scales, &sc, &m);
  2205. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  2206. if (!d) continue;
  2207. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  2208. for (int ii = 0; ii < 32; ++ii) {
  2209. int l = nearest_int((x[32*j + ii] + dm)/d);
  2210. l = MAX(0, MIN(31, l));
  2211. L[32*j + ii] = l;
  2212. }
  2213. }
  2214. uint8_t * restrict qh = y[i].qh;
  2215. uint8_t * restrict ql = y[i].qs;
  2216. memset(qh, 0, QK_K/8);
  2217. uint8_t m1 = 1, m2 = 2;
  2218. for (int n = 0; n < QK_K; n += 64) {
  2219. for (int j = 0; j < 32; ++j) {
  2220. int l1 = L[n + j];
  2221. if (l1 > 15) {
  2222. l1 -= 16; qh[j] |= m1;
  2223. }
  2224. int l2 = L[n + j + 32];
  2225. if (l2 > 15) {
  2226. l2 -= 16; qh[j] |= m2;
  2227. }
  2228. ql[j] = l1 | (l2 << 4);
  2229. }
  2230. m1 <<= 2; m2 <<= 2;
  2231. ql += 32;
  2232. }
  2233. #else
  2234. float max_scale = 0, amax = 0;
  2235. for (int j = 0; j < QK_K/16; ++j) {
  2236. scales[j] = make_qx_quants(16, 16, x + 16*j, L + 16*j, 1, NULL);
  2237. float abs_scale = fabsf(scales[j]);
  2238. if (abs_scale > amax) {
  2239. amax = abs_scale;
  2240. max_scale = scales[j];
  2241. }
  2242. }
  2243. float iscale = -128.f/max_scale;
  2244. for (int j = 0; j < QK_K/16; ++j) {
  2245. int l = nearest_int(iscale*scales[j]);
  2246. y[i].scales[j] = MAX(-128, MIN(127, l));
  2247. }
  2248. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2249. for (int j = 0; j < QK_K/16; ++j) {
  2250. const float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2251. if (!d) continue;
  2252. for (int ii = 0; ii < 16; ++ii) {
  2253. int l = nearest_int(x[16*j + ii]/d);
  2254. l = MAX(-16, MIN(15, l));
  2255. L[16*j + ii] = l + 16;
  2256. }
  2257. }
  2258. uint8_t * restrict qh = y[i].qh;
  2259. uint8_t * restrict ql = y[i].qs;
  2260. memset(qh, 0, QK_K/8);
  2261. for (int j = 0; j < 32; ++j) {
  2262. int jm = j%8;
  2263. int is = j/8;
  2264. int l1 = L[j];
  2265. if (l1 > 15) {
  2266. l1 -= 16; qh[jm] |= (1 << is);
  2267. }
  2268. int l2 = L[j + 32];
  2269. if (l2 > 15) {
  2270. l2 -= 16; qh[jm] |= (1 << (4 + is));
  2271. }
  2272. ql[j] = l1 | (l2 << 4);
  2273. }
  2274. #endif
  2275. x += QK_K;
  2276. }
  2277. }
  2278. void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k) {
  2279. assert(k % QK_K == 0);
  2280. const int nb = k / QK_K;
  2281. for (int i = 0; i < nb; i++) {
  2282. const uint8_t * ql = x[i].qs;
  2283. const uint8_t * qh = x[i].qh;
  2284. #if QK_K == 256
  2285. const float d = GGML_FP16_TO_FP32(x[i].d);
  2286. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  2287. int is = 0;
  2288. uint8_t sc, m;
  2289. uint8_t u1 = 1, u2 = 2;
  2290. for (int j = 0; j < QK_K; j += 64) {
  2291. get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
  2292. const float d1 = d * sc; const float m1 = min * m;
  2293. get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
  2294. const float d2 = d * sc; const float m2 = min * m;
  2295. for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1;
  2296. for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2;
  2297. ql += 32; is += 2;
  2298. u1 <<= 2; u2 <<= 2;
  2299. }
  2300. #else
  2301. float d = GGML_FP16_TO_FP32(x[i].d);
  2302. const int8_t * restrict s = x[i].scales;
  2303. for (int l = 0; l < 8; ++l) {
  2304. y[l+ 0] = d * s[0] * ((ql[l+ 0] & 0xF) - (qh[l] & 0x01 ? 0 : 16));
  2305. y[l+ 8] = d * s[0] * ((ql[l+ 8] & 0xF) - (qh[l] & 0x02 ? 0 : 16));
  2306. y[l+16] = d * s[1] * ((ql[l+16] & 0xF) - (qh[l] & 0x04 ? 0 : 16));
  2307. y[l+24] = d * s[1] * ((ql[l+24] & 0xF) - (qh[l] & 0x08 ? 0 : 16));
  2308. y[l+32] = d * s[2] * ((ql[l+ 0] >> 4) - (qh[l] & 0x10 ? 0 : 16));
  2309. y[l+40] = d * s[2] * ((ql[l+ 8] >> 4) - (qh[l] & 0x20 ? 0 : 16));
  2310. y[l+48] = d * s[3] * ((ql[l+16] >> 4) - (qh[l] & 0x40 ? 0 : 16));
  2311. y[l+56] = d * s[3] * ((ql[l+24] >> 4) - (qh[l] & 0x80 ? 0 : 16));
  2312. }
  2313. y += QK_K;
  2314. #endif
  2315. }
  2316. }
  2317. void quantize_row_q5_K(const float * restrict x, void * restrict vy, int k) {
  2318. assert(k % QK_K == 0);
  2319. block_q5_K * restrict y = vy;
  2320. quantize_row_q5_K_reference(x, y, k);
  2321. }
  2322. static void quantize_row_q5_K_impl(const float * restrict x, block_q5_K * restrict y, int n_per_row, const float * quant_weights) {
  2323. #if QK_K != 256
  2324. (void)quant_weights;
  2325. quantize_row_q5_K_reference(x, y, n_per_row);
  2326. #else
  2327. assert(n_per_row % QK_K == 0);
  2328. const int nb = n_per_row / QK_K;
  2329. uint8_t L[QK_K];
  2330. uint8_t Laux[32];
  2331. uint8_t Ls[QK_K/32];
  2332. uint8_t Lm[QK_K/32];
  2333. float mins[QK_K/32];
  2334. float scales[QK_K/32];
  2335. float sw[QK_K/32];
  2336. float weights[32];
  2337. for (int i = 0; i < nb; i++) {
  2338. float sum_x2 = 0;
  2339. for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l];
  2340. float sigma2 = 2*sum_x2/QK_K;
  2341. float av_x = sqrtf(sigma2);
  2342. for (int j = 0; j < QK_K/32; ++j) {
  2343. if (quant_weights) {
  2344. const float * qw = quant_weights + QK_K*i + 32*j;
  2345. for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]);
  2346. } else {
  2347. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  2348. }
  2349. float sumw = 0;
  2350. for (int l = 0; l < 32; ++l) sumw += weights[l];
  2351. sw[j] = sumw;
  2352. scales[j] = make_qkx3_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  2353. }
  2354. float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw);
  2355. float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw);
  2356. for (int j = 0; j < QK_K/32; ++j) {
  2357. uint8_t ls = Ls[j];
  2358. uint8_t lm = Lm[j];
  2359. ls = MIN(63, ls);
  2360. lm = MIN(63, lm);
  2361. if (j < 4) {
  2362. y[i].scales[j] = ls;
  2363. y[i].scales[j+4] = lm;
  2364. } else {
  2365. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  2366. y[i].scales[j-4] |= ((ls >> 4) << 6);
  2367. y[i].scales[j-0] |= ((lm >> 4) << 6);
  2368. }
  2369. }
  2370. y[i].d = GGML_FP32_TO_FP16(d_block);
  2371. y[i].dmin = GGML_FP32_TO_FP16(m_block);
  2372. uint8_t sc, m;
  2373. for (int j = 0; j < QK_K/32; ++j) {
  2374. get_scale_min_k4(j, y[i].scales, &sc, &m);
  2375. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  2376. if (!d) continue;
  2377. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  2378. for (int ii = 0; ii < 32; ++ii) {
  2379. int l = nearest_int((x[32*j + ii] + dm)/d);
  2380. l = MAX(0, MIN(31, l));
  2381. L[32*j + ii] = l;
  2382. }
  2383. }
  2384. uint8_t * restrict qh = y[i].qh;
  2385. uint8_t * restrict ql = y[i].qs;
  2386. memset(qh, 0, QK_K/8);
  2387. uint8_t m1 = 1, m2 = 2;
  2388. for (int n = 0; n < QK_K; n += 64) {
  2389. for (int j = 0; j < 32; ++j) {
  2390. int l1 = L[n + j];
  2391. if (l1 > 15) {
  2392. l1 -= 16; qh[j] |= m1;
  2393. }
  2394. int l2 = L[n + j + 32];
  2395. if (l2 > 15) {
  2396. l2 -= 16; qh[j] |= m2;
  2397. }
  2398. ql[j] = l1 | (l2 << 4);
  2399. }
  2400. m1 <<= 2; m2 <<= 2;
  2401. ql += 32;
  2402. }
  2403. x += QK_K;
  2404. }
  2405. #endif
  2406. }
  2407. size_t quantize_q5_K(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  2408. size_t row_size = ggml_row_size(GGML_TYPE_Q5_K, n_per_row);
  2409. if (!quant_weights) {
  2410. quantize_row_q5_K_reference(src, dst, nrow*n_per_row);
  2411. }
  2412. else {
  2413. char * qrow = (char *)dst;
  2414. for (int row = 0; row < nrow; ++row) {
  2415. quantize_row_q5_K_impl(src, (block_q5_K*)qrow, n_per_row, quant_weights);
  2416. src += n_per_row;
  2417. qrow += row_size;
  2418. }
  2419. }
  2420. return nrow * row_size;
  2421. }
  2422. // ====================== 6-bit (de)-quantization
  2423. void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k) {
  2424. assert(k % QK_K == 0);
  2425. const int nb = k / QK_K;
  2426. int8_t L[QK_K];
  2427. float scales[QK_K/16];
  2428. for (int i = 0; i < nb; i++) {
  2429. float max_scale = 0;
  2430. float max_abs_scale = 0;
  2431. for (int ib = 0; ib < QK_K/16; ++ib) {
  2432. const float scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL);
  2433. scales[ib] = scale;
  2434. const float abs_scale = fabsf(scale);
  2435. if (abs_scale > max_abs_scale) {
  2436. max_abs_scale = abs_scale;
  2437. max_scale = scale;
  2438. }
  2439. }
  2440. if (!max_abs_scale) {
  2441. memset(&y[i], 0, sizeof(block_q6_K));
  2442. y[i].d = GGML_FP32_TO_FP16(0.f);
  2443. x += QK_K;
  2444. continue;
  2445. }
  2446. float iscale = -128.f/max_scale;
  2447. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2448. for (int ib = 0; ib < QK_K/16; ++ib) {
  2449. y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
  2450. }
  2451. for (int j = 0; j < QK_K/16; ++j) {
  2452. float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2453. if (!d) {
  2454. continue;
  2455. }
  2456. for (int ii = 0; ii < 16; ++ii) {
  2457. int l = nearest_int(x[16*j + ii]/d);
  2458. l = MAX(-32, MIN(31, l));
  2459. L[16*j + ii] = l + 32;
  2460. }
  2461. }
  2462. uint8_t * restrict ql = y[i].ql;
  2463. uint8_t * restrict qh = y[i].qh;
  2464. #if QK_K == 256
  2465. for (int j = 0; j < QK_K; j += 128) {
  2466. for (int l = 0; l < 32; ++l) {
  2467. const uint8_t q1 = L[j + l + 0] & 0xF;
  2468. const uint8_t q2 = L[j + l + 32] & 0xF;
  2469. const uint8_t q3 = L[j + l + 64] & 0xF;
  2470. const uint8_t q4 = L[j + l + 96] & 0xF;
  2471. ql[l+ 0] = q1 | (q3 << 4);
  2472. ql[l+32] = q2 | (q4 << 4);
  2473. qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
  2474. }
  2475. ql += 64;
  2476. qh += 32;
  2477. }
  2478. #else
  2479. for (int l = 0; l < 32; ++l) {
  2480. const uint8_t q1 = L[l + 0] & 0xF;
  2481. const uint8_t q2 = L[l + 32] & 0xF;
  2482. ql[l] = q1 | (q2 << 4);
  2483. }
  2484. for (int l = 0; l < 16; ++l) {
  2485. qh[l] = (L[l] >> 4) | ((L[l + 16] >> 4) << 2) | ((L[l + 32] >> 4) << 4) | ((L[l + 48] >> 4) << 6);
  2486. }
  2487. #endif
  2488. x += QK_K;
  2489. }
  2490. }
  2491. void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k) {
  2492. assert(k % QK_K == 0);
  2493. const int nb = k / QK_K;
  2494. for (int i = 0; i < nb; i++) {
  2495. const float d = GGML_FP16_TO_FP32(x[i].d);
  2496. const uint8_t * restrict ql = x[i].ql;
  2497. const uint8_t * restrict qh = x[i].qh;
  2498. const int8_t * restrict sc = x[i].scales;
  2499. #if QK_K == 256
  2500. for (int n = 0; n < QK_K; n += 128) {
  2501. for (int l = 0; l < 32; ++l) {
  2502. int is = l/16;
  2503. const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  2504. const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  2505. const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  2506. const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  2507. y[l + 0] = d * sc[is + 0] * q1;
  2508. y[l + 32] = d * sc[is + 2] * q2;
  2509. y[l + 64] = d * sc[is + 4] * q3;
  2510. y[l + 96] = d * sc[is + 6] * q4;
  2511. }
  2512. y += 128;
  2513. ql += 64;
  2514. qh += 32;
  2515. sc += 8;
  2516. }
  2517. #else
  2518. for (int l = 0; l < 16; ++l) {
  2519. const int8_t q1 = (int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  2520. const int8_t q2 = (int8_t)((ql[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  2521. const int8_t q3 = (int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  2522. const int8_t q4 = (int8_t)((ql[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  2523. y[l+ 0] = d * sc[0] * q1;
  2524. y[l+16] = d * sc[1] * q2;
  2525. y[l+32] = d * sc[2] * q3;
  2526. y[l+48] = d * sc[3] * q4;
  2527. }
  2528. y += 64;
  2529. #endif
  2530. }
  2531. }
  2532. void quantize_row_q6_K(const float * restrict x, void * restrict vy, int k) {
  2533. assert(k % QK_K == 0);
  2534. block_q6_K * restrict y = vy;
  2535. quantize_row_q6_K_reference(x, y, k);
  2536. }
  2537. static void quantize_row_q6_K_impl(const float * restrict x, block_q6_K * restrict y, int n_per_row, const float * quant_weights) {
  2538. #if QK_K != 256
  2539. (void)quant_weights;
  2540. quantize_row_q6_K_reference(x, y, n_per_row);
  2541. #else
  2542. assert(n_per_row % QK_K == 0);
  2543. const int nb = n_per_row / QK_K;
  2544. int8_t L[QK_K];
  2545. float scales[QK_K/16];
  2546. //float weights[16];
  2547. for (int i = 0; i < nb; i++) {
  2548. //float sum_x2 = 0;
  2549. //for (int j = 0; j < QK_K; ++j) sum_x2 += x[j]*x[j];
  2550. //float sigma2 = sum_x2/QK_K;
  2551. float max_scale = 0;
  2552. float max_abs_scale = 0;
  2553. for (int ib = 0; ib < QK_K/16; ++ib) {
  2554. float scale;
  2555. if (quant_weights) {
  2556. const float * qw = quant_weights + QK_K*i + 16*ib;
  2557. //for (int j = 0; j < 16; ++j) weights[j] = qw[j] * sqrtf(sigma2 + x[16*ib + j]*x[16*ib + j]);
  2558. //scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, weights);
  2559. scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, qw);
  2560. } else {
  2561. scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL);
  2562. }
  2563. scales[ib] = scale;
  2564. const float abs_scale = fabsf(scale);
  2565. if (abs_scale > max_abs_scale) {
  2566. max_abs_scale = abs_scale;
  2567. max_scale = scale;
  2568. }
  2569. }
  2570. if (!max_abs_scale) {
  2571. memset(&y[i], 0, sizeof(block_q6_K));
  2572. y[i].d = GGML_FP32_TO_FP16(0.f);
  2573. x += QK_K;
  2574. continue;
  2575. }
  2576. float iscale = -128.f/max_scale;
  2577. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2578. for (int ib = 0; ib < QK_K/16; ++ib) {
  2579. y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
  2580. }
  2581. for (int j = 0; j < QK_K/16; ++j) {
  2582. float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2583. if (!d) {
  2584. continue;
  2585. }
  2586. for (int ii = 0; ii < 16; ++ii) {
  2587. int l = nearest_int(x[16*j + ii]/d);
  2588. l = MAX(-32, MIN(31, l));
  2589. L[16*j + ii] = l + 32;
  2590. }
  2591. }
  2592. uint8_t * restrict ql = y[i].ql;
  2593. uint8_t * restrict qh = y[i].qh;
  2594. for (int j = 0; j < QK_K; j += 128) {
  2595. for (int l = 0; l < 32; ++l) {
  2596. const uint8_t q1 = L[j + l + 0] & 0xF;
  2597. const uint8_t q2 = L[j + l + 32] & 0xF;
  2598. const uint8_t q3 = L[j + l + 64] & 0xF;
  2599. const uint8_t q4 = L[j + l + 96] & 0xF;
  2600. ql[l+ 0] = q1 | (q3 << 4);
  2601. ql[l+32] = q2 | (q4 << 4);
  2602. qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
  2603. }
  2604. ql += 64;
  2605. qh += 32;
  2606. }
  2607. x += QK_K;
  2608. }
  2609. #endif
  2610. }
  2611. size_t quantize_q6_K(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  2612. size_t row_size = ggml_row_size(GGML_TYPE_Q6_K, n_per_row);
  2613. if (!quant_weights) {
  2614. quantize_row_q6_K_reference(src, dst, nrow*n_per_row);
  2615. }
  2616. else {
  2617. char * qrow = (char *)dst;
  2618. for (int row = 0; row < nrow; ++row) {
  2619. quantize_row_q6_K_impl(src, (block_q6_K*)qrow, n_per_row, quant_weights);
  2620. src += n_per_row;
  2621. qrow += row_size;
  2622. }
  2623. }
  2624. return nrow * row_size;
  2625. }
  2626. static void quantize_row_q4_0_impl(const float * restrict x, block_q4_0 * restrict y, int n_per_row, const float * quant_weights) {
  2627. static_assert(QK4_0 == 32, "QK4_0 must be 32");
  2628. if (!quant_weights) {
  2629. quantize_row_q4_0_reference(x, y, n_per_row);
  2630. return;
  2631. }
  2632. float weight[QK4_0];
  2633. int8_t L[QK4_0];
  2634. float sum_x2 = 0;
  2635. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2636. float sigma2 = sum_x2/n_per_row;
  2637. const int nb = n_per_row/QK4_0;
  2638. for (int ib = 0; ib < nb; ++ib) {
  2639. const float * xb = x + QK4_0 * ib;
  2640. const float * qw = quant_weights + QK4_0 * ib;
  2641. for (int j = 0; j < QK4_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2642. float d = make_qx_quants(QK4_0, 8, xb, L, 1, weight);
  2643. y[ib].d = GGML_FP32_TO_FP16(d);
  2644. for (int j = 0; j < 16; ++j) {
  2645. y[ib].qs[j] = L[j] | (L[j+16] << 4);
  2646. }
  2647. }
  2648. }
  2649. size_t quantize_q4_0(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  2650. if (!quant_weights) {
  2651. quantize_row_q4_0_reference(src, dst, nrow*n_per_row);
  2652. return nrow * ggml_row_size(GGML_TYPE_Q4_0, n_per_row);
  2653. }
  2654. size_t row_size = ggml_row_size(GGML_TYPE_Q4_0, n_per_row);
  2655. char * qrow = (char *)dst;
  2656. for (int row = 0; row < nrow; ++row) {
  2657. quantize_row_q4_0_impl(src, (block_q4_0*)qrow, n_per_row, quant_weights);
  2658. src += n_per_row;
  2659. qrow += row_size;
  2660. }
  2661. return nrow * row_size;
  2662. }
  2663. static void quantize_row_q4_1_impl(const float * restrict x, block_q4_1 * restrict y, int n_per_row, const float * quant_weights) {
  2664. static_assert(QK4_1 == 32, "QK4_1 must be 32");
  2665. if (!quant_weights) {
  2666. quantize_row_q4_1_reference(x, y, n_per_row);
  2667. return;
  2668. }
  2669. float weight[QK4_1];
  2670. uint8_t L[QK4_1], Laux[QK4_1];
  2671. float sum_x2 = 0;
  2672. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2673. float sigma2 = sum_x2/n_per_row;
  2674. const int nb = n_per_row/QK4_1;
  2675. for (int ib = 0; ib < nb; ++ib) {
  2676. const float * xb = x + QK4_1 * ib;
  2677. const float * qw = quant_weights + QK4_1 * ib;
  2678. for (int j = 0; j < QK4_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2679. float min;
  2680. float d = make_qkx3_quants(QK4_1, 15, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false);
  2681. y[ib].d = GGML_FP32_TO_FP16(d);
  2682. y[ib].m = GGML_FP32_TO_FP16(-min);
  2683. for (int j = 0; j < 16; ++j) {
  2684. y[ib].qs[j] = L[j] | (L[j+16] << 4);
  2685. }
  2686. }
  2687. }
  2688. size_t quantize_q4_1(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  2689. if (!quant_weights) {
  2690. quantize_row_q4_1_reference(src, dst, nrow*n_per_row);
  2691. return nrow * ggml_row_size(GGML_TYPE_Q4_1, n_per_row);
  2692. }
  2693. size_t row_size = ggml_row_size(GGML_TYPE_Q4_1, n_per_row);
  2694. char * qrow = (char *)dst;
  2695. for (int row = 0; row < nrow; ++row) {
  2696. quantize_row_q4_1_impl(src, (block_q4_1*)qrow, n_per_row, quant_weights);
  2697. src += n_per_row;
  2698. qrow += row_size;
  2699. }
  2700. return nrow * row_size;
  2701. }
  2702. static void quantize_row_q5_0_impl(const float * restrict x, block_q5_0 * restrict y, int n_per_row, const float * quant_weights) {
  2703. static_assert(QK5_0 == 32, "QK5_0 must be 32");
  2704. if (!quant_weights) {
  2705. quantize_row_q5_0_reference(x, y, n_per_row);
  2706. return;
  2707. }
  2708. float weight[QK5_0];
  2709. int8_t L[QK5_0];
  2710. float sum_x2 = 0;
  2711. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2712. float sigma2 = sum_x2/n_per_row;
  2713. const int nb = n_per_row/QK5_0;
  2714. for (int ib = 0; ib < nb; ++ib) {
  2715. const float * xb = x + QK5_0 * ib;
  2716. const float * qw = quant_weights + QK5_0 * ib;
  2717. for (int j = 0; j < QK5_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2718. float d = make_qx_quants(QK5_0, 16, xb, L, 1, weight);
  2719. y[ib].d = GGML_FP32_TO_FP16(d);
  2720. uint32_t qh = 0;
  2721. for (int j = 0; j < 16; ++j) {
  2722. const uint8_t xi0 = L[j];
  2723. const uint8_t xi1 = L[j+16];
  2724. y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  2725. // get the 5-th bit and store it in qh at the right position
  2726. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  2727. qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
  2728. }
  2729. memcpy(&y[ib].qh, &qh, sizeof(qh));
  2730. }
  2731. }
  2732. size_t quantize_q5_0(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  2733. if (!quant_weights) {
  2734. quantize_row_q5_0_reference(src, dst, nrow*n_per_row);
  2735. return nrow * ggml_row_size(GGML_TYPE_Q5_0, n_per_row);
  2736. }
  2737. size_t row_size = ggml_row_size(GGML_TYPE_Q5_0, n_per_row);
  2738. char * qrow = (char *)dst;
  2739. for (int row = 0; row < nrow; ++row) {
  2740. quantize_row_q5_0_impl(src, (block_q5_0*)qrow, n_per_row, quant_weights);
  2741. src += n_per_row;
  2742. qrow += row_size;
  2743. }
  2744. return nrow * row_size;
  2745. }
  2746. static void quantize_row_q5_1_impl(const float * restrict x, block_q5_1 * restrict y, int n_per_row, const float * quant_weights) {
  2747. static_assert(QK5_1 == 32, "QK5_1 must be 32");
  2748. if (!quant_weights) {
  2749. quantize_row_q5_1_reference(x, y, n_per_row);
  2750. return;
  2751. }
  2752. float weight[QK5_1];
  2753. uint8_t L[QK5_1], Laux[QK5_1];
  2754. float sum_x2 = 0;
  2755. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2756. float sigma2 = sum_x2/n_per_row;
  2757. const int nb = n_per_row/QK5_1;
  2758. for (int ib = 0; ib < nb; ++ib) {
  2759. const float * xb = x + QK5_1 * ib;
  2760. const float * qw = quant_weights + QK5_1 * ib;
  2761. for (int j = 0; j < QK5_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2762. float min;
  2763. float d = make_qkx3_quants(QK5_1, 31, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false);
  2764. y[ib].d = GGML_FP32_TO_FP16(d);
  2765. y[ib].m = GGML_FP32_TO_FP16(-min);
  2766. uint32_t qh = 0;
  2767. for (int j = 0; j < 16; ++j) {
  2768. const uint8_t xi0 = L[j];
  2769. const uint8_t xi1 = L[j+16];
  2770. y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  2771. // get the 5-th bit and store it in qh at the right position
  2772. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  2773. qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
  2774. }
  2775. memcpy(&y[ib].qh, &qh, sizeof(qh));
  2776. }
  2777. }
  2778. size_t quantize_q5_1(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  2779. if (!quant_weights) {
  2780. quantize_row_q5_1_reference(src, dst, nrow*n_per_row);
  2781. return nrow * ggml_row_size(GGML_TYPE_Q5_1, n_per_row);
  2782. }
  2783. size_t row_size = ggml_row_size(GGML_TYPE_Q5_1, n_per_row);
  2784. char * qrow = (char *)dst;
  2785. for (int row = 0; row < nrow; ++row) {
  2786. quantize_row_q5_1_impl(src, (block_q5_1*)qrow, n_per_row, quant_weights);
  2787. src += n_per_row;
  2788. qrow += row_size;
  2789. }
  2790. return nrow * row_size;
  2791. }
  2792. size_t quantize_q8_0(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  2793. (void)quant_weights; // not used
  2794. const size_t row_size = ggml_row_size(GGML_TYPE_Q8_0, n_per_row);
  2795. quantize_row_q8_0_reference(src, dst, nrow*n_per_row);
  2796. return nrow * row_size;
  2797. }
  2798. // ====================== "True" 2-bit (de)-quantization
  2799. void dequantize_row_iq2_xxs(const block_iq2_xxs * restrict x, float * restrict y, int k) {
  2800. assert(k % QK_K == 0);
  2801. const int nb = k / QK_K;
  2802. uint32_t aux32[2];
  2803. const uint8_t * aux8 = (const uint8_t *)aux32;
  2804. for (int i = 0; i < nb; i++) {
  2805. const float d = GGML_FP16_TO_FP32(x[i].d);
  2806. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  2807. memcpy(aux32, x[i].qs + 4*ib32, 2*sizeof(uint32_t));
  2808. const float db = d * (0.5f + (aux32[1] >> 28)) * 0.25f;
  2809. for (int l = 0; l < 4; ++l) {
  2810. const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
  2811. const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
  2812. for (int j = 0; j < 8; ++j) {
  2813. y[j] = db * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
  2814. }
  2815. y += 8;
  2816. }
  2817. }
  2818. }
  2819. }
  2820. // ====================== 2.3125 bpw (de)-quantization
  2821. void dequantize_row_iq2_xs(const block_iq2_xs * restrict x, float * restrict y, int k) {
  2822. assert(k % QK_K == 0);
  2823. const int nb = k / QK_K;
  2824. float db[2];
  2825. for (int i = 0; i < nb; i++) {
  2826. const float d = GGML_FP16_TO_FP32(x[i].d);
  2827. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  2828. db[0] = d * (0.5f + (x[i].scales[ib32] & 0xf)) * 0.25f;
  2829. db[1] = d * (0.5f + (x[i].scales[ib32] >> 4)) * 0.25f;
  2830. for (int l = 0; l < 4; ++l) {
  2831. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (x[i].qs[4*ib32 + l] & 511));
  2832. const uint8_t signs = ksigns_iq2xs[x[i].qs[4*ib32 + l] >> 9];
  2833. for (int j = 0; j < 8; ++j) {
  2834. y[j] = db[l/2] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
  2835. }
  2836. y += 8;
  2837. }
  2838. }
  2839. }
  2840. }
  2841. // ====================== 2.5625 bpw (de)-quantization
  2842. void dequantize_row_iq2_s(const block_iq2_s * restrict x, float * restrict y, int k) {
  2843. assert(k % QK_K == 0);
  2844. const int nb = k / QK_K;
  2845. float db[2];
  2846. for (int i = 0; i < nb; i++) {
  2847. const float d = GGML_FP16_TO_FP32(x[i].d);
  2848. const uint8_t * qs = x[i].qs;
  2849. const uint8_t * qh = x[i].qh;
  2850. const uint8_t * signs = qs + QK_K/8;
  2851. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  2852. db[0] = d * (0.5f + (x[i].scales[ib32] & 0xf)) * 0.25f;
  2853. db[1] = d * (0.5f + (x[i].scales[ib32] >> 4)) * 0.25f;
  2854. for (int l = 0; l < 4; ++l) {
  2855. const float dl = db[l/2];
  2856. const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
  2857. for (int j = 0; j < 8; ++j) {
  2858. y[j] = dl * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1.f : 1.f);
  2859. }
  2860. y += 8;
  2861. }
  2862. qs += 4;
  2863. signs += 4;
  2864. }
  2865. }
  2866. }
  2867. // ====================== 3.0625 bpw (de)-quantization
  2868. void dequantize_row_iq3_xxs(const block_iq3_xxs * restrict x, float * restrict y, int k) {
  2869. assert(k % QK_K == 0);
  2870. const int nb = k / QK_K;
  2871. uint32_t aux32;
  2872. for (int i = 0; i < nb; i++) {
  2873. const float d = GGML_FP16_TO_FP32(x[i].d);
  2874. const uint8_t * qs = x[i].qs;
  2875. const uint8_t * scales_and_signs = qs + QK_K/4;
  2876. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  2877. memcpy(&aux32, scales_and_signs + 4*ib32, sizeof(uint32_t));
  2878. const float db = d * (0.5f + (aux32 >> 28)) * 0.5f;
  2879. for (int l = 0; l < 4; ++l) {
  2880. const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
  2881. const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + qs[2*l+0]);
  2882. const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + qs[2*l+1]);
  2883. for (int j = 0; j < 4; ++j) {
  2884. y[j+0] = db * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f);
  2885. y[j+4] = db * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f);
  2886. }
  2887. y += 8;
  2888. }
  2889. qs += 8;
  2890. }
  2891. }
  2892. }
  2893. // ====================== 3.3125 bpw (de)-quantization
  2894. void dequantize_row_iq3_s(const block_iq3_s * restrict x, float * restrict y, int k) {
  2895. assert(k % QK_K == 0);
  2896. const int nb = k / QK_K;
  2897. for (int i = 0; i < nb; i++) {
  2898. const float d = GGML_FP16_TO_FP32(x[i].d);
  2899. const uint8_t * qs = x[i].qs;
  2900. const uint8_t * qh = x[i].qh;
  2901. const uint8_t * signs = x[i].signs;
  2902. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  2903. const float db1 = d * (1 + 2*(x[i].scales[ib32/2] & 0xf));
  2904. const float db2 = d * (1 + 2*(x[i].scales[ib32/2] >> 4));
  2905. for (int l = 0; l < 4; ++l) {
  2906. const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[0] << (8-2*l)) & 256)));
  2907. const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[0] << (7-2*l)) & 256)));
  2908. for (int j = 0; j < 4; ++j) {
  2909. y[j+0] = db1 * grid1[j] * (signs[l] & kmask_iq2xs[j+0] ? -1.f : 1.f);
  2910. y[j+4] = db1 * grid2[j] * (signs[l] & kmask_iq2xs[j+4] ? -1.f : 1.f);
  2911. }
  2912. y += 8;
  2913. }
  2914. qs += 8;
  2915. signs += 4;
  2916. for (int l = 0; l < 4; ++l) {
  2917. const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[1] << (8-2*l)) & 256)));
  2918. const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[1] << (7-2*l)) & 256)));
  2919. for (int j = 0; j < 4; ++j) {
  2920. y[j+0] = db2 * grid1[j] * (signs[l] & kmask_iq2xs[j+0] ? -1.f : 1.f);
  2921. y[j+4] = db2 * grid2[j] * (signs[l] & kmask_iq2xs[j+4] ? -1.f : 1.f);
  2922. }
  2923. y += 8;
  2924. }
  2925. qh += 2;
  2926. qs += 8;
  2927. signs += 4;
  2928. }
  2929. }
  2930. }
  2931. // ====================== 1.5625 bpw (de)-quantization
  2932. void dequantize_row_iq1_s(const block_iq1_s * restrict x, float * restrict y, int k) {
  2933. assert(k % QK_K == 0);
  2934. const int nb = k / QK_K;
  2935. for (int i = 0; i < nb; i++) {
  2936. const float d = GGML_FP16_TO_FP32(x[i].d);
  2937. const uint8_t * qs = x[i].qs;
  2938. const uint16_t * qh = x[i].qh;
  2939. for (int ib = 0; ib < QK_K/32; ++ib) {
  2940. const float dl = d * (2*((qh[ib] >> 12) & 7) + 1);
  2941. const float delta = qh[ib] & 0x8000 ? -IQ1S_DELTA : IQ1S_DELTA;
  2942. for (int l = 0; l < 4; ++l) {
  2943. const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8)));
  2944. for (int j = 0; j < 8; ++j) {
  2945. y[j] = dl * (grid[j] + delta);
  2946. }
  2947. y += 8;
  2948. }
  2949. qs += 4;
  2950. }
  2951. }
  2952. }
  2953. static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113};
  2954. void dequantize_row_iq4_nl(const block_iq4_nl * restrict x, float * restrict y, int k) {
  2955. assert(k % QK4_NL == 0);
  2956. const int nb = k / QK4_NL;
  2957. for (int i = 0; i < nb; i++) {
  2958. const uint8_t * qs = x[i].qs;
  2959. const float d = GGML_FP16_TO_FP32(x[i].d);
  2960. for (int j = 0; j < QK4_NL/2; ++j) {
  2961. y[j+ 0] = d * kvalues_iq4nl[qs[j] & 0xf];
  2962. y[j+QK4_NL/2] = d * kvalues_iq4nl[qs[j] >> 4];
  2963. }
  2964. y += QK4_NL;
  2965. qs += QK4_NL/2;
  2966. }
  2967. }
  2968. void dequantize_row_iq4_xs(const block_iq4_xs * restrict x, float * restrict y, int k) {
  2969. assert(k % QK_K == 0);
  2970. #if QK_K == 64
  2971. dequantize_row_iq4_nl((const block_iq4_nl *)x, y, k);
  2972. #else
  2973. const int nb = k / QK_K;
  2974. for (int i = 0; i < nb; i++) {
  2975. const uint8_t * qs = x[i].qs;
  2976. const float d = GGML_FP16_TO_FP32(x[i].d);
  2977. for (int ib = 0; ib < QK_K/32; ++ib) {
  2978. const int ls = ((x[i].scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((x[i].scales_h >> 2*ib) & 3) << 4);
  2979. const float dl = d * (ls - 32);
  2980. for (int j = 0; j < 16; ++j) {
  2981. y[j+ 0] = dl * kvalues_iq4nl[qs[j] & 0xf];
  2982. y[j+16] = dl * kvalues_iq4nl[qs[j] >> 4];
  2983. }
  2984. y += 32;
  2985. qs += 16;
  2986. }
  2987. }
  2988. #endif
  2989. }
  2990. //===================================== Q8_K ==============================================
  2991. void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k) {
  2992. assert(k % QK_K == 0);
  2993. const int nb = k / QK_K;
  2994. for (int i = 0; i < nb; i++) {
  2995. float max = 0;
  2996. float amax = 0;
  2997. for (int j = 0; j < QK_K; ++j) {
  2998. float ax = fabsf(x[j]);
  2999. if (ax > amax) {
  3000. amax = ax; max = x[j];
  3001. }
  3002. }
  3003. if (!amax) {
  3004. y[i].d = 0;
  3005. memset(y[i].qs, 0, QK_K);
  3006. x += QK_K;
  3007. continue;
  3008. }
  3009. //const float iscale = -128.f/max;
  3010. // We need this change for IQ2_XXS, else the AVX implementation becomes very awkward
  3011. const float iscale = -127.f/max;
  3012. for (int j = 0; j < QK_K; ++j) {
  3013. int v = nearest_int(iscale*x[j]);
  3014. y[i].qs[j] = MIN(127, v);
  3015. }
  3016. for (int j = 0; j < QK_K/16; ++j) {
  3017. int sum = 0;
  3018. for (int ii = 0; ii < 16; ++ii) {
  3019. sum += y[i].qs[j*16 + ii];
  3020. }
  3021. y[i].bsums[j] = sum;
  3022. }
  3023. y[i].d = 1/iscale;
  3024. x += QK_K;
  3025. }
  3026. }
  3027. void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k) {
  3028. assert(k % QK_K == 0);
  3029. const int nb = k / QK_K;
  3030. for (int i = 0; i < nb; i++) {
  3031. for (int j = 0; j < QK_K; ++j) {
  3032. *y++ = x[i].d * x[i].qs[j];
  3033. }
  3034. }
  3035. }
  3036. void quantize_row_q8_K(const float * restrict x, void * restrict y, int k) {
  3037. quantize_row_q8_K_reference(x, y, k);
  3038. }
  3039. //===================================== Dot ptoducts =================================
  3040. //
  3041. // Helper functions
  3042. //
  3043. #if __AVX__ || __AVX2__ || __AVX512F__
  3044. // shuffles to pick the required scales in dot products
  3045. static inline __m256i get_scale_shuffle_q3k(int i) {
  3046. static const uint8_t k_shuffle[128] = {
  3047. 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
  3048. 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
  3049. 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
  3050. 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,
  3051. };
  3052. return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
  3053. }
  3054. static inline __m256i get_scale_shuffle_k4(int i) {
  3055. static const uint8_t k_shuffle[256] = {
  3056. 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
  3057. 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
  3058. 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5,
  3059. 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
  3060. 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9,
  3061. 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
  3062. 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,
  3063. 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15
  3064. };
  3065. return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
  3066. }
  3067. static inline __m128i get_scale_shuffle(int i) {
  3068. static const uint8_t k_shuffle[128] = {
  3069. 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
  3070. 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
  3071. 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
  3072. 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
  3073. 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
  3074. 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11,
  3075. 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13,
  3076. 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15
  3077. };
  3078. return _mm_loadu_si128((const __m128i*)k_shuffle + i);
  3079. }
  3080. #endif
  3081. void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  3082. const int qk = QK8_0;
  3083. const int nb = n / qk;
  3084. assert(n % qk == 0);
  3085. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3086. assert((nrc == 2) || (nrc == 1));
  3087. #else
  3088. assert(nrc == 1);
  3089. #endif
  3090. UNUSED(nrc);
  3091. UNUSED(bx);
  3092. UNUSED(by);
  3093. UNUSED(bs);
  3094. const block_q4_0 * restrict x = vx;
  3095. const block_q8_0 * restrict y = vy;
  3096. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3097. if (nrc == 2) {
  3098. const block_q4_0 * restrict vx0 = vx;
  3099. const block_q4_0 * restrict vx1 = vx + bx;
  3100. const block_q8_0 * restrict vy0 = vy;
  3101. const block_q8_0 * restrict vy1 = vy + by;
  3102. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3103. for (int i = 0; i < nb; i++) {
  3104. const block_q4_0 * restrict b_x0 = &vx0[i];
  3105. const block_q4_0 * restrict b_x1 = &vx1[i];
  3106. const block_q8_0 * restrict b_y0 = &vy0[i];
  3107. const block_q8_0 * restrict b_y1 = &vy1[i];
  3108. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3109. const int8x16_t s8b = vdupq_n_s8(0x8);
  3110. const uint8x16_t v0_0 = vld1q_u8(b_x0->qs);
  3111. const uint8x16_t v0_1 = vld1q_u8(b_x1->qs);
  3112. // 4-bit -> 8-bit
  3113. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3114. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3115. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3116. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3117. // sub 8
  3118. const int8x16_t x0_l = vsubq_s8(v0_0l, s8b);
  3119. const int8x16_t x0_h = vsubq_s8(v0_0h, s8b);
  3120. const int8x16_t x1_l = vsubq_s8(v0_1l, s8b);
  3121. const int8x16_t x1_h = vsubq_s8(v0_1h, s8b);
  3122. // load y
  3123. const int8x16_t y0_l = vld1q_s8(b_y0->qs);
  3124. const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
  3125. const int8x16_t y1_l = vld1q_s8(b_y1->qs);
  3126. const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
  3127. float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d),
  3128. GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d),
  3129. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d),
  3130. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)};
  3131. int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  3132. int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  3133. int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  3134. int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  3135. int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  3136. int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  3137. int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  3138. int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  3139. sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
  3140. l1, r1)), l2, r2)), l3, r3))), scale);
  3141. }
  3142. float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
  3143. float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
  3144. vst1_f32(s, vget_low_f32(sumv2));
  3145. vst1_f32(s + bs, vget_high_f32(sumv2));
  3146. return;
  3147. }
  3148. #endif
  3149. #if defined(__ARM_NEON)
  3150. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3151. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3152. assert(nb % 2 == 0); // TODO: handle odd nb
  3153. for (int i = 0; i < nb; i += 2) {
  3154. const block_q4_0 * restrict x0 = &x[i + 0];
  3155. const block_q4_0 * restrict x1 = &x[i + 1];
  3156. const block_q8_0 * restrict y0 = &y[i + 0];
  3157. const block_q8_0 * restrict y1 = &y[i + 1];
  3158. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3159. const int8x16_t s8b = vdupq_n_s8(0x8);
  3160. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3161. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3162. // 4-bit -> 8-bit
  3163. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3164. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3165. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3166. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3167. // sub 8
  3168. const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
  3169. const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
  3170. const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
  3171. const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
  3172. // load y
  3173. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3174. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3175. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3176. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3177. // dot product into int32x4_t
  3178. const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
  3179. const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
  3180. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  3181. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  3182. }
  3183. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  3184. #elif defined(__AVX2__)
  3185. // Initialize accumulator with zeros
  3186. __m256 acc = _mm256_setzero_ps();
  3187. // Main loop
  3188. for (int i = 0; i < nb; ++i) {
  3189. /* Compute combined scale for the block */
  3190. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  3191. __m256i qx = bytes_from_nibbles_32(x[i].qs);
  3192. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  3193. const __m256i off = _mm256_set1_epi8( 8 );
  3194. qx = _mm256_sub_epi8( qx, off );
  3195. __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3196. const __m256 q = mul_sum_i8_pairs_float(qx, qy);
  3197. /* Multiply q with scale and accumulate */
  3198. acc = _mm256_fmadd_ps( d, q, acc );
  3199. }
  3200. *s = hsum_float_8(acc);
  3201. #elif defined(__AVX__)
  3202. // Initialize accumulator with zeros
  3203. __m256 acc = _mm256_setzero_ps();
  3204. // Main loop
  3205. for (int i = 0; i < nb; ++i) {
  3206. // Compute combined scale for the block
  3207. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  3208. const __m128i lowMask = _mm_set1_epi8(0xF);
  3209. const __m128i off = _mm_set1_epi8(8);
  3210. const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
  3211. __m128i bx_0 = _mm_and_si128(lowMask, tmp);
  3212. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
  3213. bx_0 = _mm_sub_epi8(bx_0, off);
  3214. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  3215. bx_0 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
  3216. by_0 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  3217. bx_0 = _mm_sub_epi8(bx_0, off);
  3218. const __m128i i32_1 = mul_sum_i8_pairs(bx_0, by_0);
  3219. // Convert int32_t to float
  3220. __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
  3221. // Apply the scale, and accumulate
  3222. acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
  3223. }
  3224. *s = hsum_float_8(acc);
  3225. #elif defined(__SSSE3__)
  3226. // set constants
  3227. const __m128i lowMask = _mm_set1_epi8(0xF);
  3228. const __m128i off = _mm_set1_epi8(8);
  3229. // Initialize accumulator with zeros
  3230. __m128 acc_0 = _mm_setzero_ps();
  3231. __m128 acc_1 = _mm_setzero_ps();
  3232. __m128 acc_2 = _mm_setzero_ps();
  3233. __m128 acc_3 = _mm_setzero_ps();
  3234. // First round without accumulation
  3235. {
  3236. _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
  3237. _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
  3238. // Compute combined scale for the block 0 and 1
  3239. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) );
  3240. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
  3241. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  3242. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
  3243. bx_0 = _mm_sub_epi8(bx_0, off);
  3244. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  3245. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  3246. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
  3247. bx_1 = _mm_sub_epi8(bx_1, off);
  3248. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  3249. _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
  3250. _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
  3251. // Compute combined scale for the block 2 and 3
  3252. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) );
  3253. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
  3254. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  3255. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
  3256. bx_2 = _mm_sub_epi8(bx_2, off);
  3257. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  3258. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  3259. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
  3260. bx_3 = _mm_sub_epi8(bx_3, off);
  3261. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  3262. // Convert int32_t to float
  3263. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  3264. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  3265. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  3266. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  3267. // Apply the scale
  3268. acc_0 = _mm_mul_ps( d_0_1, p0 );
  3269. acc_1 = _mm_mul_ps( d_0_1, p1 );
  3270. acc_2 = _mm_mul_ps( d_2_3, p2 );
  3271. acc_3 = _mm_mul_ps( d_2_3, p3 );
  3272. }
  3273. assert(nb % 2 == 0); // TODO: handle odd nb
  3274. // Main loop
  3275. for (int i = 2; i < nb; i+=2) {
  3276. _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
  3277. _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
  3278. // Compute combined scale for the block 0 and 1
  3279. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  3280. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
  3281. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  3282. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
  3283. bx_0 = _mm_sub_epi8(bx_0, off);
  3284. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  3285. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  3286. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  3287. bx_1 = _mm_sub_epi8(bx_1, off);
  3288. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  3289. _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
  3290. _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
  3291. // Compute combined scale for the block 2 and 3
  3292. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) );
  3293. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
  3294. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  3295. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
  3296. bx_2 = _mm_sub_epi8(bx_2, off);
  3297. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  3298. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  3299. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
  3300. bx_3 = _mm_sub_epi8(bx_3, off);
  3301. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  3302. // Convert int32_t to float
  3303. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  3304. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  3305. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  3306. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  3307. // Apply the scale
  3308. __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
  3309. __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
  3310. __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
  3311. __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
  3312. // Acummulate
  3313. acc_0 = _mm_add_ps(p0_d, acc_0);
  3314. acc_1 = _mm_add_ps(p1_d, acc_1);
  3315. acc_2 = _mm_add_ps(p2_d, acc_2);
  3316. acc_3 = _mm_add_ps(p3_d, acc_3);
  3317. }
  3318. *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
  3319. #elif defined(__riscv_v_intrinsic)
  3320. float sumf = 0.0;
  3321. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3322. for (int i = 0; i < nb; i++) {
  3323. // load elements
  3324. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3325. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3326. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3327. // mask and store lower part of x, and then upper part
  3328. vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3329. vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3330. vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3331. vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3332. // subtract offset
  3333. vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 8, vl);
  3334. vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 8, vl);
  3335. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3336. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3337. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3338. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3339. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3340. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3341. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  3342. }
  3343. *s = sumf;
  3344. #else
  3345. // scalar
  3346. float sumf = 0.0;
  3347. for (int i = 0; i < nb; i++) {
  3348. int sumi = 0;
  3349. for (int j = 0; j < qk/2; ++j) {
  3350. const int v0 = (x[i].qs[j] & 0x0F) - 8;
  3351. const int v1 = (x[i].qs[j] >> 4) - 8;
  3352. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  3353. }
  3354. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  3355. }
  3356. *s = sumf;
  3357. #endif
  3358. }
  3359. void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  3360. const int qk = QK8_1;
  3361. const int nb = n / qk;
  3362. assert(n % qk == 0);
  3363. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3364. assert((nrc == 2) || (nrc == 1));
  3365. #else
  3366. assert(nrc == 1);
  3367. #endif
  3368. UNUSED(nrc);
  3369. UNUSED(bx);
  3370. UNUSED(by);
  3371. UNUSED(bs);
  3372. const block_q4_1 * restrict x = vx;
  3373. const block_q8_1 * restrict y = vy;
  3374. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3375. if (nrc == 2) {
  3376. const block_q4_1 * restrict vx0 = vx;
  3377. const block_q4_1 * restrict vx1 = vx + bx;
  3378. const block_q8_1 * restrict vy0 = vy;
  3379. const block_q8_1 * restrict vy1 = vy + by;
  3380. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3381. float32x4_t summs0 = vdupq_n_f32(0.0f);
  3382. for (int i = 0; i < nb; i++) {
  3383. const block_q4_1 * restrict b_x0 = &vx0[i];
  3384. const block_q4_1 * restrict b_x1 = &vx1[i];
  3385. const block_q8_1 * restrict b_y0 = &vy0[i];
  3386. const block_q8_1 * restrict b_y1 = &vy1[i];
  3387. float32x4_t summs_t = {GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y0->s),
  3388. GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y0->s),
  3389. GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y1->s),
  3390. GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y1->s)};
  3391. summs0 += summs_t;
  3392. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3393. const uint8x16_t v0_0 = vld1q_u8(b_x0->qs);
  3394. const uint8x16_t v0_1 = vld1q_u8(b_x1->qs);
  3395. // 4-bit -> 8-bit
  3396. const int8x16_t x0_l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3397. const int8x16_t x0_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3398. const int8x16_t x1_l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3399. const int8x16_t x1_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3400. // load y
  3401. const int8x16_t y0_l = vld1q_s8(b_y0->qs);
  3402. const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
  3403. const int8x16_t y1_l = vld1q_s8(b_y1->qs);
  3404. const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
  3405. // mmla into int32x4_t
  3406. float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*b_y0->d,
  3407. GGML_FP16_TO_FP32(b_x0->d)*b_y1->d,
  3408. GGML_FP16_TO_FP32(b_x1->d)*b_y0->d,
  3409. GGML_FP16_TO_FP32(b_x1->d)*b_y1->d};
  3410. int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  3411. int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  3412. int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  3413. int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  3414. int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  3415. int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  3416. int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  3417. int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  3418. sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
  3419. l1, r1)), l2, r2)), l3, r3))), scale);
  3420. }
  3421. float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
  3422. float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
  3423. sumv2 = sumv2 + summs0;
  3424. vst1_f32(s, vget_low_f32(sumv2));
  3425. vst1_f32(s + bs, vget_high_f32(sumv2));
  3426. return;
  3427. }
  3428. #endif
  3429. // TODO: add WASM SIMD
  3430. #if defined(__ARM_NEON)
  3431. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3432. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3433. float summs = 0;
  3434. assert(nb % 2 == 0); // TODO: handle odd nb
  3435. for (int i = 0; i < nb; i += 2) {
  3436. const block_q4_1 * restrict x0 = &x[i + 0];
  3437. const block_q4_1 * restrict x1 = &x[i + 1];
  3438. const block_q8_1 * restrict y0 = &y[i + 0];
  3439. const block_q8_1 * restrict y1 = &y[i + 1];
  3440. summs += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s) + GGML_FP16_TO_FP32(x1->m) * GGML_FP16_TO_FP32(y1->s);
  3441. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3442. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3443. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3444. // 4-bit -> 8-bit
  3445. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3446. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3447. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3448. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3449. // load y
  3450. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3451. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3452. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3453. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3454. // dot product into int32x4_t
  3455. const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
  3456. const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
  3457. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  3458. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  3459. }
  3460. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
  3461. #elif defined(__AVX2__) || defined(__AVX__)
  3462. // Initialize accumulator with zeros
  3463. __m256 acc = _mm256_setzero_ps();
  3464. float summs = 0;
  3465. // Main loop
  3466. for (int i = 0; i < nb; ++i) {
  3467. const float d0 = GGML_FP16_TO_FP32(x[i].d);
  3468. const float d1 = GGML_FP16_TO_FP32(y[i].d);
  3469. summs += GGML_FP16_TO_FP32(x[i].m) * GGML_FP16_TO_FP32(y[i].s);
  3470. const __m256 d0v = _mm256_set1_ps( d0 );
  3471. const __m256 d1v = _mm256_set1_ps( d1 );
  3472. // Compute combined scales
  3473. const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
  3474. // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
  3475. const __m256i qx = bytes_from_nibbles_32(x[i].qs);
  3476. const __m256i qy = _mm256_loadu_si256( (const __m256i *)y[i].qs );
  3477. const __m256 xy = mul_sum_us8_pairs_float(qx, qy);
  3478. // Accumulate d0*d1*x*y
  3479. #if defined(__AVX2__)
  3480. acc = _mm256_fmadd_ps( d0d1, xy, acc );
  3481. #else
  3482. acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc );
  3483. #endif
  3484. }
  3485. *s = hsum_float_8(acc) + summs;
  3486. #elif defined(__riscv_v_intrinsic)
  3487. float sumf = 0.0;
  3488. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3489. for (int i = 0; i < nb; i++) {
  3490. // load elements
  3491. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3492. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3493. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3494. // mask and store lower part of x, and then upper part
  3495. vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3496. vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3497. vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3498. vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3499. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3500. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3501. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3502. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3503. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3504. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3505. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d))*sumi + GGML_FP16_TO_FP32(x[i].m)*GGML_FP16_TO_FP32(y[i].s);
  3506. }
  3507. *s = sumf;
  3508. #else
  3509. // scalar
  3510. float sumf = 0.0;
  3511. for (int i = 0; i < nb; i++) {
  3512. int sumi = 0;
  3513. for (int j = 0; j < qk/2; ++j) {
  3514. const int v0 = (x[i].qs[j] & 0x0F);
  3515. const int v1 = (x[i].qs[j] >> 4);
  3516. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  3517. }
  3518. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d))*sumi + GGML_FP16_TO_FP32(x[i].m)*GGML_FP16_TO_FP32(y[i].s);
  3519. }
  3520. *s = sumf;
  3521. #endif
  3522. }
  3523. void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  3524. const int qk = QK8_0;
  3525. const int nb = n / qk;
  3526. assert(n % qk == 0);
  3527. assert(qk == QK5_0);
  3528. assert(nrc == 1);
  3529. UNUSED(nrc);
  3530. UNUSED(bx);
  3531. UNUSED(by);
  3532. UNUSED(bs);
  3533. const block_q5_0 * restrict x = vx;
  3534. const block_q8_0 * restrict y = vy;
  3535. #if defined(__ARM_NEON)
  3536. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3537. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3538. uint32_t qh0;
  3539. uint32_t qh1;
  3540. uint64_t tmp0[4];
  3541. uint64_t tmp1[4];
  3542. assert(nb % 2 == 0); // TODO: handle odd nb
  3543. for (int i = 0; i < nb; i += 2) {
  3544. const block_q5_0 * restrict x0 = &x[i];
  3545. const block_q5_0 * restrict x1 = &x[i + 1];
  3546. const block_q8_0 * restrict y0 = &y[i];
  3547. const block_q8_0 * restrict y1 = &y[i + 1];
  3548. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3549. // extract the 5th bit via lookup table ((!b) << 4)
  3550. memcpy(&qh0, x0->qh, sizeof(qh0));
  3551. memcpy(&qh1, x1->qh, sizeof(qh1));
  3552. tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
  3553. tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
  3554. tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
  3555. tmp0[3] = table_b2b_1[(qh0 >> 24) ];
  3556. tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
  3557. tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
  3558. tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
  3559. tmp1[3] = table_b2b_1[(qh1 >> 24) ];
  3560. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  3561. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  3562. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  3563. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  3564. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3565. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3566. // 4-bit -> 8-bit
  3567. int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3568. int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3569. int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3570. int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3571. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  3572. const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
  3573. const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
  3574. const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
  3575. const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
  3576. // load y
  3577. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3578. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3579. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3580. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3581. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  3582. ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  3583. ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  3584. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  3585. ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  3586. ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  3587. }
  3588. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  3589. #elif defined(__wasm_simd128__)
  3590. v128_t sumv = wasm_f32x4_splat(0.0f);
  3591. uint32_t qh;
  3592. uint64_t tmp[4];
  3593. // TODO: check if unrolling this is better
  3594. for (int i = 0; i < nb; ++i) {
  3595. const block_q5_0 * restrict x0 = &x[i];
  3596. const block_q8_0 * restrict y0 = &y[i];
  3597. const v128_t m4b = wasm_i8x16_splat(0x0F);
  3598. // extract the 5th bit
  3599. memcpy(&qh, x0->qh, sizeof(qh));
  3600. tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
  3601. tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
  3602. tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
  3603. tmp[3] = table_b2b_1[(qh >> 24) ];
  3604. const v128_t qhl = wasm_v128_load(tmp + 0);
  3605. const v128_t qhh = wasm_v128_load(tmp + 2);
  3606. const v128_t v0 = wasm_v128_load(x0->qs);
  3607. // 4-bit -> 8-bit
  3608. const v128_t v0l = wasm_v128_and (v0, m4b);
  3609. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  3610. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  3611. const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
  3612. const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
  3613. // load y
  3614. const v128_t v1l = wasm_v128_load(y0->qs);
  3615. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  3616. // int8x16 -> int16x8
  3617. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  3618. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  3619. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  3620. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  3621. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  3622. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  3623. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  3624. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  3625. // dot product
  3626. sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
  3627. wasm_i32x4_add(
  3628. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  3629. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  3630. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  3631. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  3632. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
  3633. }
  3634. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  3635. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
  3636. #elif defined(__AVX2__)
  3637. // Initialize accumulator with zeros
  3638. __m256 acc = _mm256_setzero_ps();
  3639. // Main loop
  3640. for (int i = 0; i < nb; i++) {
  3641. /* Compute combined scale for the block */
  3642. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  3643. __m256i qx = bytes_from_nibbles_32(x[i].qs);
  3644. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3645. bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
  3646. qx = _mm256_or_si256(qx, bxhi);
  3647. __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3648. const __m256 q = mul_sum_i8_pairs_float(qx, qy);
  3649. /* Multiply q with scale and accumulate */
  3650. acc = _mm256_fmadd_ps(d, q, acc);
  3651. }
  3652. *s = hsum_float_8(acc);
  3653. #elif defined(__AVX__)
  3654. // Initialize accumulator with zeros
  3655. __m256 acc = _mm256_setzero_ps();
  3656. __m128i mask = _mm_set1_epi8((char)0xF0);
  3657. // Main loop
  3658. for (int i = 0; i < nb; i++) {
  3659. /* Compute combined scale for the block */
  3660. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  3661. __m256i bx_0 = bytes_from_nibbles_32(x[i].qs);
  3662. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3663. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  3664. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  3665. bxhil = _mm_andnot_si128(bxhil, mask);
  3666. bxhih = _mm_andnot_si128(bxhih, mask);
  3667. __m128i bxl = _mm256_castsi256_si128(bx_0);
  3668. __m128i bxh = _mm256_extractf128_si256(bx_0, 1);
  3669. bxl = _mm_or_si128(bxl, bxhil);
  3670. bxh = _mm_or_si128(bxh, bxhih);
  3671. bx_0 = MM256_SET_M128I(bxh, bxl);
  3672. const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3673. const __m256 q = mul_sum_i8_pairs_float(bx_0, by_0);
  3674. /* Multiply q with scale and accumulate */
  3675. acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
  3676. }
  3677. *s = hsum_float_8(acc);
  3678. #elif defined(__riscv_v_intrinsic)
  3679. float sumf = 0.0;
  3680. uint32_t qh;
  3681. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3682. // These temporary registers are for masking and shift operations
  3683. vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
  3684. vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl);
  3685. vuint32m2_t vt_3 = __riscv_vsll_vx_u32m2(vt_2, 16, vl);
  3686. vuint32m2_t vt_4 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
  3687. for (int i = 0; i < nb; i++) {
  3688. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  3689. // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  3690. vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(vt_2, qh, vl);
  3691. vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(xha_0, vt_1, vl);
  3692. vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
  3693. // ((qh & (1u << (j + 16))) >> (j + 12));
  3694. vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(vt_3, qh, vl);
  3695. vuint32m2_t xhl_1 = __riscv_vsrl_vv_u32m2(xha_1, vt_4, vl);
  3696. // narrowing
  3697. vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xhl_0, vl);
  3698. vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
  3699. vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xhl_1, vl);
  3700. vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
  3701. // load
  3702. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3703. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3704. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3705. vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3706. vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3707. vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
  3708. vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
  3709. vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3710. vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3711. vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 16, vl);
  3712. vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 16, vl);
  3713. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3714. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3715. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3716. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3717. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3718. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3719. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  3720. }
  3721. *s = sumf;
  3722. #else
  3723. // scalar
  3724. float sumf = 0.0;
  3725. for (int i = 0; i < nb; i++) {
  3726. uint32_t qh;
  3727. memcpy(&qh, x[i].qh, sizeof(qh));
  3728. int sumi = 0;
  3729. for (int j = 0; j < qk/2; ++j) {
  3730. const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  3731. const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
  3732. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  3733. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  3734. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  3735. }
  3736. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  3737. }
  3738. *s = sumf;
  3739. #endif
  3740. }
  3741. void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  3742. const int qk = QK8_1;
  3743. const int nb = n / qk;
  3744. assert(n % qk == 0);
  3745. assert(qk == QK5_1);
  3746. assert(nrc == 1);
  3747. UNUSED(nrc);
  3748. UNUSED(bx);
  3749. UNUSED(by);
  3750. UNUSED(bs);
  3751. const block_q5_1 * restrict x = vx;
  3752. const block_q8_1 * restrict y = vy;
  3753. #if defined(__ARM_NEON)
  3754. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3755. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3756. float summs0 = 0.0f;
  3757. float summs1 = 0.0f;
  3758. uint32_t qh0;
  3759. uint32_t qh1;
  3760. uint64_t tmp0[4];
  3761. uint64_t tmp1[4];
  3762. assert(nb % 2 == 0); // TODO: handle odd nb
  3763. for (int i = 0; i < nb; i += 2) {
  3764. const block_q5_1 * restrict x0 = &x[i];
  3765. const block_q5_1 * restrict x1 = &x[i + 1];
  3766. const block_q8_1 * restrict y0 = &y[i];
  3767. const block_q8_1 * restrict y1 = &y[i + 1];
  3768. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3769. summs0 += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s);
  3770. summs1 += GGML_FP16_TO_FP32(x1->m) * GGML_FP16_TO_FP32(y1->s);
  3771. // extract the 5th bit via lookup table ((b) << 4)
  3772. memcpy(&qh0, x0->qh, sizeof(qh0));
  3773. memcpy(&qh1, x1->qh, sizeof(qh1));
  3774. tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
  3775. tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
  3776. tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
  3777. tmp0[3] = table_b2b_0[(qh0 >> 24) ];
  3778. tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
  3779. tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
  3780. tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
  3781. tmp1[3] = table_b2b_0[(qh1 >> 24) ];
  3782. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  3783. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  3784. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  3785. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  3786. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3787. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3788. // 4-bit -> 8-bit
  3789. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3790. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3791. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3792. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3793. // add high bit
  3794. const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
  3795. const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
  3796. const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
  3797. const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
  3798. // load y
  3799. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3800. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3801. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3802. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3803. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  3804. ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  3805. ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  3806. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  3807. ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  3808. ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  3809. }
  3810. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
  3811. #elif defined(__wasm_simd128__)
  3812. v128_t sumv = wasm_f32x4_splat(0.0f);
  3813. float summs = 0.0f;
  3814. uint32_t qh;
  3815. uint64_t tmp[4];
  3816. // TODO: check if unrolling this is better
  3817. for (int i = 0; i < nb; ++i) {
  3818. const block_q5_1 * restrict x0 = &x[i];
  3819. const block_q8_1 * restrict y0 = &y[i];
  3820. summs += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s);
  3821. const v128_t m4b = wasm_i8x16_splat(0x0F);
  3822. // extract the 5th bit
  3823. memcpy(&qh, x0->qh, sizeof(qh));
  3824. tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
  3825. tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
  3826. tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
  3827. tmp[3] = table_b2b_0[(qh >> 24) ];
  3828. const v128_t qhl = wasm_v128_load(tmp + 0);
  3829. const v128_t qhh = wasm_v128_load(tmp + 2);
  3830. const v128_t v0 = wasm_v128_load(x0->qs);
  3831. // 4-bit -> 8-bit
  3832. const v128_t v0l = wasm_v128_and (v0, m4b);
  3833. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  3834. // add high bit
  3835. const v128_t v0lf = wasm_v128_or(v0l, qhl);
  3836. const v128_t v0hf = wasm_v128_or(v0h, qhh);
  3837. // load y
  3838. const v128_t v1l = wasm_v128_load(y0->qs);
  3839. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  3840. // int8x16 -> int16x8
  3841. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  3842. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  3843. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  3844. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  3845. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  3846. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  3847. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  3848. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  3849. // dot product
  3850. sumv = wasm_f32x4_add(sumv,
  3851. wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add(
  3852. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  3853. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  3854. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  3855. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  3856. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
  3857. }
  3858. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  3859. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
  3860. #elif defined(__AVX2__)
  3861. // Initialize accumulator with zeros
  3862. __m256 acc = _mm256_setzero_ps();
  3863. float summs = 0.0f;
  3864. // Main loop
  3865. for (int i = 0; i < nb; i++) {
  3866. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  3867. summs += GGML_FP16_TO_FP32(x[i].m) * GGML_FP16_TO_FP32(y[i].s);
  3868. __m256i qx = bytes_from_nibbles_32(x[i].qs);
  3869. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3870. bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
  3871. qx = _mm256_or_si256(qx, bxhi);
  3872. const __m256 dy = _mm256_set1_ps(GGML_FP16_TO_FP32(y[i].d));
  3873. const __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3874. const __m256 q = mul_sum_us8_pairs_float(qx, qy);
  3875. acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
  3876. }
  3877. *s = hsum_float_8(acc) + summs;
  3878. #elif defined(__AVX__)
  3879. // Initialize accumulator with zeros
  3880. __m256 acc = _mm256_setzero_ps();
  3881. __m128i mask = _mm_set1_epi8(0x10);
  3882. float summs = 0.0f;
  3883. // Main loop
  3884. for (int i = 0; i < nb; i++) {
  3885. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  3886. summs += GGML_FP16_TO_FP32(x[i].m) * GGML_FP16_TO_FP32(y[i].s);
  3887. __m256i bx_0 = bytes_from_nibbles_32(x[i].qs);
  3888. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3889. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  3890. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  3891. bxhil = _mm_and_si128(bxhil, mask);
  3892. bxhih = _mm_and_si128(bxhih, mask);
  3893. __m128i bxl = _mm256_castsi256_si128(bx_0);
  3894. __m128i bxh = _mm256_extractf128_si256(bx_0, 1);
  3895. bxl = _mm_or_si128(bxl, bxhil);
  3896. bxh = _mm_or_si128(bxh, bxhih);
  3897. bx_0 = MM256_SET_M128I(bxh, bxl);
  3898. const __m256 dy = _mm256_set1_ps(GGML_FP16_TO_FP32(y[i].d));
  3899. const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3900. const __m256 q = mul_sum_us8_pairs_float(bx_0, by_0);
  3901. acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
  3902. }
  3903. *s = hsum_float_8(acc) + summs;
  3904. #elif defined(__riscv_v_intrinsic)
  3905. float sumf = 0.0;
  3906. uint32_t qh;
  3907. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3908. // temporary registers for shift operations
  3909. vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
  3910. vuint32m2_t vt_2 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
  3911. for (int i = 0; i < nb; i++) {
  3912. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  3913. // load qh
  3914. vuint32m2_t vqh = __riscv_vmv_v_x_u32m2(qh, vl);
  3915. // ((qh >> (j + 0)) << 4) & 0x10;
  3916. vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(vqh, vt_1, vl);
  3917. vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
  3918. vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(xhl_0, 0x10, vl);
  3919. // ((qh >> (j + 12)) ) & 0x10;
  3920. vuint32m2_t xhr_1 = __riscv_vsrl_vv_u32m2(vqh, vt_2, vl);
  3921. vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(xhr_1, 0x10, vl);
  3922. // narrowing
  3923. vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xha_0, vl);
  3924. vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
  3925. vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xha_1, vl);
  3926. vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
  3927. // load
  3928. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3929. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3930. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3931. vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3932. vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3933. vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
  3934. vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
  3935. vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3936. vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3937. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3938. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3939. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3940. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3941. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3942. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3943. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d))*sumi + GGML_FP16_TO_FP32(x[i].m)*GGML_FP16_TO_FP32(y[i].s);
  3944. }
  3945. *s = sumf;
  3946. #else
  3947. // scalar
  3948. float sumf = 0.0;
  3949. for (int i = 0; i < nb; i++) {
  3950. uint32_t qh;
  3951. memcpy(&qh, x[i].qh, sizeof(qh));
  3952. int sumi = 0;
  3953. for (int j = 0; j < qk/2; ++j) {
  3954. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  3955. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  3956. const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
  3957. const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
  3958. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  3959. }
  3960. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d))*sumi + GGML_FP16_TO_FP32(x[i].m)*GGML_FP16_TO_FP32(y[i].s);
  3961. }
  3962. *s = sumf;
  3963. #endif
  3964. }
  3965. void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  3966. const int qk = QK8_0;
  3967. const int nb = n / qk;
  3968. assert(n % qk == 0);
  3969. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3970. assert((nrc == 2) || (nrc == 1));
  3971. #else
  3972. assert(nrc == 1);
  3973. #endif
  3974. UNUSED(nrc);
  3975. UNUSED(bx);
  3976. UNUSED(by);
  3977. UNUSED(bs);
  3978. const block_q8_0 * restrict x = vx;
  3979. const block_q8_0 * restrict y = vy;
  3980. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3981. if (nrc == 2) {
  3982. const block_q8_0 * restrict vx0 = vx;
  3983. const block_q8_0 * restrict vx1 = vx + bx;
  3984. const block_q8_0 * restrict vy0 = vy;
  3985. const block_q8_0 * restrict vy1 = vy + by;
  3986. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3987. for (int i = 0; i < nb; i++) {
  3988. const block_q8_0 * restrict b_x0 = &vx0[i];
  3989. const block_q8_0 * restrict b_y0 = &vy0[i];
  3990. const block_q8_0 * restrict b_x1 = &vx1[i];
  3991. const block_q8_0 * restrict b_y1 = &vy1[i];
  3992. const int8x16_t x0_l = vld1q_s8(b_x0->qs);
  3993. const int8x16_t x0_h = vld1q_s8(b_x0->qs + 16);
  3994. const int8x16_t x1_l = vld1q_s8(b_x1->qs);
  3995. const int8x16_t x1_h = vld1q_s8(b_x1->qs + 16);
  3996. // load y
  3997. const int8x16_t y0_l = vld1q_s8(b_y0->qs);
  3998. const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
  3999. const int8x16_t y1_l = vld1q_s8(b_y1->qs);
  4000. const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
  4001. float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d),
  4002. GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d),
  4003. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d),
  4004. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)};
  4005. int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  4006. int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  4007. int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  4008. int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  4009. int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  4010. int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  4011. int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  4012. int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  4013. sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
  4014. l1, r1)), l2, r2)), l3, r3))), scale);
  4015. }
  4016. float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
  4017. float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
  4018. vst1_f32(s, vget_low_f32(sumv2));
  4019. vst1_f32(s + bs, vget_high_f32(sumv2));
  4020. return;
  4021. }
  4022. #endif
  4023. #if defined(__ARM_NEON)
  4024. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  4025. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  4026. assert(nb % 2 == 0); // TODO: handle odd nb
  4027. for (int i = 0; i < nb; i += 2) {
  4028. const block_q8_0 * restrict x0 = &x[i + 0];
  4029. const block_q8_0 * restrict x1 = &x[i + 1];
  4030. const block_q8_0 * restrict y0 = &y[i + 0];
  4031. const block_q8_0 * restrict y1 = &y[i + 1];
  4032. const int8x16_t x0_0 = vld1q_s8(x0->qs);
  4033. const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
  4034. const int8x16_t x1_0 = vld1q_s8(x1->qs);
  4035. const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
  4036. // load y
  4037. const int8x16_t y0_0 = vld1q_s8(y0->qs);
  4038. const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
  4039. const int8x16_t y1_0 = vld1q_s8(y1->qs);
  4040. const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
  4041. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  4042. ggml_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
  4043. ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  4044. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  4045. ggml_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
  4046. ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  4047. }
  4048. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  4049. #elif defined(__AVX2__) || defined(__AVX__)
  4050. // Initialize accumulator with zeros
  4051. __m256 acc = _mm256_setzero_ps();
  4052. // Main loop
  4053. for (int i = 0; i < nb; ++i) {
  4054. // Compute combined scale for the block
  4055. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  4056. __m256i qx = _mm256_loadu_si256((const __m256i *)x[i].qs);
  4057. __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
  4058. const __m256 q = mul_sum_i8_pairs_float(qx, qy);
  4059. // Multiply q with scale and accumulate
  4060. #if defined(__AVX2__)
  4061. acc = _mm256_fmadd_ps( d, q, acc );
  4062. #else
  4063. acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc );
  4064. #endif
  4065. }
  4066. *s = hsum_float_8(acc);
  4067. #elif defined(__riscv_v_intrinsic)
  4068. float sumf = 0.0;
  4069. size_t vl = __riscv_vsetvl_e8m1(qk);
  4070. for (int i = 0; i < nb; i++) {
  4071. // load elements
  4072. vint8m1_t bx_0 = __riscv_vle8_v_i8m1(x[i].qs, vl);
  4073. vint8m1_t by_0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
  4074. vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx_0, by_0, vl);
  4075. vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl);
  4076. vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl);
  4077. int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum);
  4078. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  4079. }
  4080. *s = sumf;
  4081. #else
  4082. // scalar
  4083. float sumf = 0.0;
  4084. for (int i = 0; i < nb; i++) {
  4085. int sumi = 0;
  4086. for (int j = 0; j < qk; j++) {
  4087. sumi += x[i].qs[j]*y[i].qs[j];
  4088. }
  4089. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  4090. }
  4091. *s = sumf;
  4092. #endif
  4093. }
  4094. #if QK_K == 256
  4095. void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4096. assert(nrc == 1);
  4097. UNUSED(nrc);
  4098. UNUSED(bx);
  4099. UNUSED(by);
  4100. UNUSED(bs);
  4101. const block_q2_K * restrict x = vx;
  4102. const block_q8_K * restrict y = vy;
  4103. const int nb = n / QK_K;
  4104. #ifdef __ARM_NEON
  4105. const uint8x16_t m3 = vdupq_n_u8(0x3);
  4106. const uint8x16_t m4 = vdupq_n_u8(0xF);
  4107. const int32x4_t vzero = vdupq_n_s32(0);
  4108. ggml_int8x16x2_t q2bytes;
  4109. uint8_t aux[16];
  4110. float sum = 0;
  4111. for (int i = 0; i < nb; ++i) {
  4112. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4113. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4114. const uint8_t * restrict q2 = x[i].qs;
  4115. const int8_t * restrict q8 = y[i].qs;
  4116. const uint8_t * restrict sc = x[i].scales;
  4117. const uint8x16_t mins_and_scales = vld1q_u8(sc);
  4118. const uint8x16_t scales = vandq_u8(mins_and_scales, m4);
  4119. vst1q_u8(aux, scales);
  4120. const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4);
  4121. const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
  4122. const ggml_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}};
  4123. const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])),
  4124. vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0])));
  4125. const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])),
  4126. vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1])));
  4127. sum += dmin * vaddvq_s32(vaddq_s32(s0, s1));
  4128. int isum = 0;
  4129. int is = 0;
  4130. // We use this macro instead of a function call because for some reason
  4131. // the code runs 2-3% slower, even if the function is declared inline
  4132. #define MULTIPLY_ACCUM_WITH_SCALE(index)\
  4133. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\
  4134. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)];
  4135. #define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\
  4136. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\
  4137. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\
  4138. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\
  4139. MULTIPLY_ACCUM_WITH_SCALE((index));
  4140. for (int j = 0; j < QK_K/128; ++j) {
  4141. const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32;
  4142. ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  4143. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3));
  4144. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3));
  4145. MULTIPLY_ACCUM_WITH_SCALE(0);
  4146. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2);
  4147. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4);
  4148. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6);
  4149. is += 8;
  4150. }
  4151. sum += d * isum;
  4152. }
  4153. *s = sum;
  4154. #elif defined __AVX2__
  4155. const __m256i m3 = _mm256_set1_epi8(3);
  4156. const __m128i m4 = _mm_set1_epi8(0xF);
  4157. __m256 acc = _mm256_setzero_ps();
  4158. for (int i = 0; i < nb; ++i) {
  4159. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4160. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4161. const uint8_t * restrict q2 = x[i].qs;
  4162. const int8_t * restrict q8 = y[i].qs;
  4163. const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  4164. const __m128i scales8 = _mm_and_si128(mins_and_scales, m4);
  4165. const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
  4166. const __m256i mins = _mm256_cvtepi8_epi16(mins8);
  4167. const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums));
  4168. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc);
  4169. const __m256i all_scales = _mm256_cvtepi8_epi16(scales8);
  4170. const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
  4171. const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
  4172. const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
  4173. __m256i sumi = _mm256_setzero_si256();
  4174. for (int j = 0; j < QK_K/128; ++j) {
  4175. const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32;
  4176. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4177. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4178. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4179. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4180. const __m256i q2_0 = _mm256_and_si256(q2bits, m3);
  4181. const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3);
  4182. const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3);
  4183. const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3);
  4184. __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
  4185. __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
  4186. __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2);
  4187. __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3);
  4188. p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0);
  4189. p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1);
  4190. p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2);
  4191. p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3);
  4192. p0 = _mm256_add_epi32(p0, p1);
  4193. p2 = _mm256_add_epi32(p2, p3);
  4194. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2));
  4195. }
  4196. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  4197. }
  4198. *s = hsum_float_8(acc);
  4199. #elif defined __AVX__
  4200. const __m128i m3 = _mm_set1_epi8(0x3);
  4201. const __m128i m4 = _mm_set1_epi8(0xF);
  4202. const __m128i m2 = _mm_set1_epi8(0x2);
  4203. __m256 acc = _mm256_setzero_ps();
  4204. for (int i = 0; i < nb; ++i) {
  4205. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4206. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4207. const uint8_t * restrict q2 = x[i].qs;
  4208. const int8_t * restrict q8 = y[i].qs;
  4209. // load mins and scales from block_q2_K.scales[QK_K/16]
  4210. const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  4211. const __m128i scales16 = _mm_and_si128(mins_and_scales, m4);
  4212. const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
  4213. const __m128i mins_0 = _mm_cvtepi8_epi16(mins16);
  4214. const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16));
  4215. // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2
  4216. const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0]));
  4217. const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8]));
  4218. // sumf += -dmin * summs in 32bits*8
  4219. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc);
  4220. const __m128i scales_0 = _mm_cvtepi8_epi16(scales16);
  4221. const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16));
  4222. const __m128i scales[2] = { scales_0, scales_1 };
  4223. __m128i sumi_0 = _mm_setzero_si128();
  4224. __m128i sumi_1 = _mm_setzero_si128();
  4225. for (int j = 0; j < QK_K/128; ++j) {
  4226. // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K]
  4227. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4228. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4229. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4230. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4231. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4232. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4233. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4234. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4235. // load 2bits*16*8 from block_q2_K.qs[QK_K/4]
  4236. __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
  4237. const __m128i q2_0 = _mm_and_si128(q2bits, m3);
  4238. const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  4239. const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  4240. const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  4241. q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
  4242. const __m128i q2_1 = _mm_and_si128(q2bits, m3);
  4243. const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  4244. const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  4245. const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  4246. // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8
  4247. __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0);
  4248. __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1);
  4249. __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2);
  4250. __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3);
  4251. __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4);
  4252. __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5);
  4253. __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6);
  4254. __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7);
  4255. // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8
  4256. __m128i shuffle = _mm_set1_epi16(0x0100);
  4257. p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0);
  4258. shuffle = _mm_add_epi16(shuffle, m2);
  4259. p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1);
  4260. shuffle = _mm_add_epi16(shuffle, m2);
  4261. p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2);
  4262. shuffle = _mm_add_epi16(shuffle, m2);
  4263. p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3);
  4264. shuffle = _mm_add_epi16(shuffle, m2);
  4265. p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4);
  4266. shuffle = _mm_add_epi16(shuffle, m2);
  4267. p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5);
  4268. shuffle = _mm_add_epi16(shuffle, m2);
  4269. p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6);
  4270. shuffle = _mm_add_epi16(shuffle, m2);
  4271. p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7);
  4272. p0 = _mm_add_epi32(p0, p1);
  4273. p2 = _mm_add_epi32(p2, p3);
  4274. p4 = _mm_add_epi32(p4, p5);
  4275. p6 = _mm_add_epi32(p6, p7);
  4276. // isum in 32bits*4*2
  4277. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2));
  4278. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6));
  4279. }
  4280. // sumf += dall * isum - dmin * summs in 32bits
  4281. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  4282. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc);
  4283. }
  4284. *s = hsum_float_8(acc);
  4285. #elif defined __riscv_v_intrinsic
  4286. float sumf = 0;
  4287. uint8_t temp_01[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  4288. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
  4289. for (int i = 0; i < nb; ++i) {
  4290. const uint8_t * q2 = x[i].qs;
  4291. const int8_t * q8 = y[i].qs;
  4292. const uint8_t * sc = x[i].scales;
  4293. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4294. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4295. size_t vl = 16;
  4296. vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl);
  4297. vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl);
  4298. vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl);
  4299. vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl);
  4300. vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl);
  4301. vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl));
  4302. vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl);
  4303. vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  4304. sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums);
  4305. vl = 32;
  4306. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  4307. vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl);
  4308. uint8_t is=0;
  4309. int isum=0;
  4310. for (int j = 0; j < QK_K/128; ++j) {
  4311. // load Q2
  4312. vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl);
  4313. vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl);
  4314. vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03 , vl);
  4315. vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03 , vl);
  4316. vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03 , vl);
  4317. // duplicate scale elements for product
  4318. vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0+is, vl), vl);
  4319. vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2+is, vl), vl);
  4320. vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4+is, vl), vl);
  4321. vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6+is, vl), vl);
  4322. vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl));
  4323. vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl));
  4324. vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl));
  4325. vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl));
  4326. // load Q8
  4327. vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
  4328. vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
  4329. vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8+64, vl);
  4330. vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8+96, vl);
  4331. vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl);
  4332. vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl);
  4333. vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl);
  4334. vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl);
  4335. vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl);
  4336. vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl);
  4337. isum += __riscv_vmv_x_s_i32m1_i32(isum1);
  4338. q2+=32; q8+=128; is=8;
  4339. }
  4340. sumf += dall * isum;
  4341. }
  4342. *s = sumf;
  4343. #else
  4344. float sumf = 0;
  4345. for (int i = 0; i < nb; ++i) {
  4346. const uint8_t * q2 = x[i].qs;
  4347. const int8_t * q8 = y[i].qs;
  4348. const uint8_t * sc = x[i].scales;
  4349. int summs = 0;
  4350. for (int j = 0; j < 16; ++j) {
  4351. summs += y[i].bsums[j] * (sc[j] >> 4);
  4352. }
  4353. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4354. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4355. int isum = 0;
  4356. int is = 0;
  4357. int d;
  4358. for (int k = 0; k < QK_K/128; ++k) {
  4359. int shift = 0;
  4360. for (int j = 0; j < 4; ++j) {
  4361. d = sc[is++] & 0xF;
  4362. int isuml = 0;
  4363. for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
  4364. isum += d * isuml;
  4365. d = sc[is++] & 0xF;
  4366. isuml = 0;
  4367. for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
  4368. isum += d * isuml;
  4369. shift += 2;
  4370. q8 += 32;
  4371. }
  4372. q2 += 32;
  4373. }
  4374. sumf += dall * isum - dmin * summs;
  4375. }
  4376. *s = sumf;
  4377. #endif
  4378. }
  4379. #else
  4380. void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4381. assert(nrc == 1);
  4382. UNUSED(nrc);
  4383. UNUSED(bx);
  4384. UNUSED(by);
  4385. UNUSED(bs);
  4386. const block_q2_K * restrict x = vx;
  4387. const block_q8_K * restrict y = vy;
  4388. const int nb = n / QK_K;
  4389. #ifdef __ARM_NEON
  4390. const uint8x16_t m3 = vdupq_n_u8(0x3);
  4391. const int32x4_t vzero = vdupq_n_s32(0);
  4392. ggml_int8x16x4_t q2bytes;
  4393. uint32_t aux32[2];
  4394. const uint8_t * scales = (const uint8_t *)aux32;
  4395. float sum = 0;
  4396. for (int i = 0; i < nb; ++i) {
  4397. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4398. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4399. const uint8_t * restrict q2 = x[i].qs;
  4400. const int8_t * restrict q8 = y[i].qs;
  4401. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4402. aux32[0] = sc[0] & 0x0f0f0f0f;
  4403. aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
  4404. sum += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
  4405. int isum1 = 0, isum2 = 0;
  4406. const uint8x16_t q2bits = vld1q_u8(q2);
  4407. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  4408. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits, m3));
  4409. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 2), m3));
  4410. q2bytes.val[2] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 4), m3));
  4411. q2bytes.val[3] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 6), m3));
  4412. isum1 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * scales[0];
  4413. isum2 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * scales[1];
  4414. isum1 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[2], q8bytes.val[2])) * scales[2];
  4415. isum2 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[3], q8bytes.val[3])) * scales[3];
  4416. sum += d * (isum1 + isum2);
  4417. }
  4418. *s = sum;
  4419. #elif defined __AVX2__
  4420. const __m256i m3 = _mm256_set1_epi8(3);
  4421. __m256 acc = _mm256_setzero_ps();
  4422. uint32_t ud, um;
  4423. const uint8_t * restrict db = (const uint8_t *)&ud;
  4424. const uint8_t * restrict mb = (const uint8_t *)&um;
  4425. float summs = 0;
  4426. // TODO: optimize this
  4427. for (int i = 0; i < nb; ++i) {
  4428. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4429. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4430. const uint8_t * restrict q2 = x[i].qs;
  4431. const int8_t * restrict q8 = y[i].qs;
  4432. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4433. ud = (sc[0] >> 0) & 0x0f0f0f0f;
  4434. um = (sc[0] >> 4) & 0x0f0f0f0f;
  4435. int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
  4436. summs += dmin * smin;
  4437. const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
  4438. const __m256i q2_0 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 2), q2bits), m3);
  4439. const __m256i q2_1 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 6), _mm_srli_epi16(q2bits, 4)), m3);
  4440. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  4441. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  4442. const __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
  4443. const __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
  4444. const __m256i p_0 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 0));
  4445. const __m256i p_1 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 1));
  4446. const __m256i p_2 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 0));
  4447. const __m256i p_3 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 1));
  4448. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0), acc);
  4449. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1), acc);
  4450. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2), acc);
  4451. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3), acc);
  4452. }
  4453. *s = hsum_float_8(acc) + summs;
  4454. #elif defined __AVX__
  4455. const __m128i m3 = _mm_set1_epi8(3);
  4456. __m256 acc = _mm256_setzero_ps();
  4457. uint32_t ud, um;
  4458. const uint8_t * restrict db = (const uint8_t *)&ud;
  4459. const uint8_t * restrict mb = (const uint8_t *)&um;
  4460. float summs = 0;
  4461. // TODO: optimize this
  4462. for (int i = 0; i < nb; ++i) {
  4463. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4464. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4465. const uint8_t * restrict q2 = x[i].qs;
  4466. const int8_t * restrict q8 = y[i].qs;
  4467. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4468. ud = (sc[0] >> 0) & 0x0f0f0f0f;
  4469. um = (sc[0] >> 4) & 0x0f0f0f0f;
  4470. int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
  4471. summs += dmin * smin;
  4472. const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
  4473. const __m128i q2_0 = _mm_and_si128(q2bits, m3);
  4474. const __m128i q2_1 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  4475. const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  4476. const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  4477. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  4478. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  4479. const __m128i p0 = _mm_maddubs_epi16(q2_0, _mm256_extractf128_si256(q8_0, 0));
  4480. const __m128i p1 = _mm_maddubs_epi16(q2_1, _mm256_extractf128_si256(q8_0, 1));
  4481. const __m128i p2 = _mm_maddubs_epi16(q2_2, _mm256_extractf128_si256(q8_1, 0));
  4482. const __m128i p3 = _mm_maddubs_epi16(q2_3, _mm256_extractf128_si256(q8_1, 1));
  4483. const __m256i p_0 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p0, p0)), _mm_cvtepi16_epi32(p0));
  4484. const __m256i p_1 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p1, p1)), _mm_cvtepi16_epi32(p1));
  4485. const __m256i p_2 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p2, p2)), _mm_cvtepi16_epi32(p2));
  4486. const __m256i p_3 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p3, p3)), _mm_cvtepi16_epi32(p3));
  4487. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0)), acc);
  4488. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1)), acc);
  4489. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2)), acc);
  4490. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3)), acc);
  4491. }
  4492. *s = hsum_float_8(acc) + summs;
  4493. #elif defined __riscv_v_intrinsic
  4494. uint32_t aux32[2];
  4495. const uint8_t * scales = (const uint8_t *)aux32;
  4496. float sumf = 0;
  4497. for (int i = 0; i < nb; ++i) {
  4498. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4499. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4500. const uint8_t * restrict q2 = x[i].qs;
  4501. const int8_t * restrict q8 = y[i].qs;
  4502. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4503. aux32[0] = sc[0] & 0x0f0f0f0f;
  4504. aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
  4505. sumf += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
  4506. int isum1 = 0;
  4507. int isum2 = 0;
  4508. size_t vl = 16;
  4509. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  4510. // load Q2
  4511. vuint8mf2_t q2_x = __riscv_vle8_v_u8mf2(q2, vl);
  4512. vint8mf2_t q2_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q2_x, 0x03, vl));
  4513. vint8mf2_t q2_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x2, vl), 0x03 , vl));
  4514. vint8mf2_t q2_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x4, vl), 0x03 , vl));
  4515. vint8mf2_t q2_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x6, vl), 0x03 , vl));
  4516. // load Q8, and take product with Q2
  4517. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q2_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  4518. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q2_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  4519. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q2_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  4520. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q2_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  4521. vint16m1_t vs_0 = __riscv_vredsum_vs_i16m1_i16m1(p0, vzero, vl);
  4522. vint16m1_t vs_1 = __riscv_vredsum_vs_i16m1_i16m1(p1, vzero, vl);
  4523. vint16m1_t vs_2 = __riscv_vredsum_vs_i16m1_i16m1(p2, vzero, vl);
  4524. vint16m1_t vs_3 = __riscv_vredsum_vs_i16m1_i16m1(p3, vzero, vl);
  4525. isum1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[0];
  4526. isum2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[1];
  4527. isum1 += __riscv_vmv_x_s_i16m1_i16(vs_2) * scales[2];
  4528. isum2 += __riscv_vmv_x_s_i16m1_i16(vs_3) * scales[3];
  4529. sumf += d * (isum1 + isum2);
  4530. }
  4531. *s = sumf;
  4532. #else
  4533. float sumf = 0;
  4534. int isum[QK_K/16];
  4535. for (int i = 0; i < nb; ++i) {
  4536. const uint8_t * q2 = x[i].qs;
  4537. const int8_t * q8 = y[i].qs;
  4538. const uint8_t * sc = x[i].scales;
  4539. int summs = 0;
  4540. for (int j = 0; j < QK_K/16; ++j) {
  4541. summs += y[i].bsums[j] * (sc[j] >> 4);
  4542. }
  4543. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4544. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4545. memset(isum, 0, (QK_K/16)*sizeof(int));
  4546. for (int l = 0; l < 16; ++l) {
  4547. isum[0] += q8[l+ 0] * ((q2[l] >> 0) & 3);
  4548. isum[1] += q8[l+16] * ((q2[l] >> 2) & 3);
  4549. isum[2] += q8[l+32] * ((q2[l] >> 4) & 3);
  4550. isum[3] += q8[l+48] * ((q2[l] >> 6) & 3);
  4551. }
  4552. for (int l = 0; l < QK_K/16; ++l) {
  4553. isum[l] *= (sc[l] & 0xF);
  4554. }
  4555. sumf += dall * (isum[0] + isum[1] + isum[2] + isum[3]) - dmin * summs;
  4556. }
  4557. *s = sumf;
  4558. #endif
  4559. }
  4560. #endif
  4561. #if QK_K == 256
  4562. void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4563. assert(n % QK_K == 0);
  4564. assert(nrc == 1);
  4565. UNUSED(nrc);
  4566. UNUSED(bx);
  4567. UNUSED(by);
  4568. UNUSED(bs);
  4569. const uint32_t kmask1 = 0x03030303;
  4570. const uint32_t kmask2 = 0x0f0f0f0f;
  4571. const block_q3_K * restrict x = vx;
  4572. const block_q8_K * restrict y = vy;
  4573. const int nb = n / QK_K;
  4574. #ifdef __ARM_NEON
  4575. uint32_t aux[3];
  4576. uint32_t utmp[4];
  4577. const uint8x16_t m3b = vdupq_n_u8(0x3);
  4578. const int32x4_t vzero = vdupq_n_s32(0);
  4579. const uint8x16_t m0 = vdupq_n_u8(1);
  4580. const uint8x16_t m1 = vshlq_n_u8(m0, 1);
  4581. const uint8x16_t m2 = vshlq_n_u8(m0, 2);
  4582. const uint8x16_t m3 = vshlq_n_u8(m0, 3);
  4583. const int8_t m32 = 32;
  4584. ggml_int8x16x4_t q3bytes;
  4585. float sum = 0;
  4586. for (int i = 0; i < nb; ++i) {
  4587. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4588. const uint8_t * restrict q3 = x[i].qs;
  4589. const uint8_t * restrict qh = x[i].hmask;
  4590. const int8_t * restrict q8 = y[i].qs;
  4591. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
  4592. ggml_uint8x16x4_t q3h;
  4593. int32_t isum = 0;
  4594. // Set up scales
  4595. memcpy(aux, x[i].scales, 12);
  4596. utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
  4597. utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
  4598. utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
  4599. utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
  4600. int8_t * scale = (int8_t *)utmp;
  4601. for (int j = 0; j < 16; ++j) scale[j] -= m32;
  4602. for (int j = 0; j < QK_K/128; ++j) {
  4603. const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32;
  4604. const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64;
  4605. const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64;
  4606. q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2);
  4607. q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2);
  4608. q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1);
  4609. q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1);
  4610. q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0]));
  4611. q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1]));
  4612. q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
  4613. q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
  4614. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0];
  4615. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1];
  4616. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2];
  4617. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3];
  4618. scale += 4;
  4619. q3h.val[0] = vbicq_u8(m2, qhbits.val[0]);
  4620. q3h.val[1] = vbicq_u8(m2, qhbits.val[1]);
  4621. q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1);
  4622. q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1);
  4623. q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0]));
  4624. q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1]));
  4625. q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
  4626. q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
  4627. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0];
  4628. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1];
  4629. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2];
  4630. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3];
  4631. scale += 4;
  4632. if (j == 0) {
  4633. qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4);
  4634. qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4);
  4635. }
  4636. }
  4637. sum += d * isum;
  4638. }
  4639. *s = sum;
  4640. #elif defined __AVX2__
  4641. const __m256i m3 = _mm256_set1_epi8(3);
  4642. const __m256i mone = _mm256_set1_epi8(1);
  4643. const __m128i m32 = _mm_set1_epi8(32);
  4644. __m256 acc = _mm256_setzero_ps();
  4645. uint32_t aux[3];
  4646. for (int i = 0; i < nb; ++i) {
  4647. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4648. const uint8_t * restrict q3 = x[i].qs;
  4649. const int8_t * restrict q8 = y[i].qs;
  4650. // Set up scales
  4651. memcpy(aux, x[i].scales, 12);
  4652. __m128i scales128 = _mm_set_epi32(
  4653. ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
  4654. ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
  4655. (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
  4656. (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
  4657. scales128 = _mm_sub_epi8(scales128, m32);
  4658. const __m256i all_scales = _mm256_cvtepi8_epi16(scales128);
  4659. const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
  4660. const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
  4661. const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
  4662. // high bit
  4663. const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask);
  4664. // integer accumulator
  4665. __m256i sumi = _mm256_setzero_si256();
  4666. int bit = 0;
  4667. int is = 0;
  4668. for (int j = 0; j < QK_K/128; ++j) {
  4669. // load low 2 bits
  4670. const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32;
  4671. // prepare low and high bits
  4672. const __m256i q3l_0 = _mm256_and_si256(q3bits, m3);
  4673. const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4674. ++bit;
  4675. const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3);
  4676. const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4677. ++bit;
  4678. const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3);
  4679. const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4680. ++bit;
  4681. const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3);
  4682. const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4683. ++bit;
  4684. // load Q8 quants
  4685. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4686. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4687. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4688. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4689. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  4690. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  4691. // and 2 if the high bit was set)
  4692. __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
  4693. __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
  4694. __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2);
  4695. __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3);
  4696. __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
  4697. __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
  4698. __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2);
  4699. __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3);
  4700. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  4701. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  4702. p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
  4703. p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
  4704. // multiply with scales
  4705. p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0);
  4706. p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1);
  4707. p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2);
  4708. p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3);
  4709. // accumulate
  4710. p16_0 = _mm256_add_epi32(p16_0, p16_1);
  4711. p16_2 = _mm256_add_epi32(p16_2, p16_3);
  4712. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2));
  4713. }
  4714. // multiply with block scale and accumulate
  4715. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  4716. }
  4717. *s = hsum_float_8(acc);
  4718. #elif defined __AVX__
  4719. const __m128i m3 = _mm_set1_epi8(3);
  4720. const __m128i mone = _mm_set1_epi8(1);
  4721. const __m128i m32 = _mm_set1_epi8(32);
  4722. const __m128i m2 = _mm_set1_epi8(2);
  4723. __m256 acc = _mm256_setzero_ps();
  4724. const uint32_t *aux;
  4725. for (int i = 0; i < nb; ++i) {
  4726. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4727. const uint8_t * restrict q3 = x[i].qs;
  4728. const int8_t * restrict q8 = y[i].qs;
  4729. // Set up scales
  4730. aux = (const uint32_t *)x[i].scales;
  4731. __m128i scales128 = _mm_set_epi32(
  4732. ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
  4733. ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
  4734. (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
  4735. (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
  4736. scales128 = _mm_sub_epi8(scales128, m32);
  4737. const __m128i scales_0 = _mm_cvtepi8_epi16(scales128);
  4738. const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128));
  4739. const __m128i scales[2] = { scales_0, scales_1 };
  4740. // high bit *128*2 from block_q3_K.hmask[QK_K/8]
  4741. const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]);
  4742. const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]);
  4743. // integer accumulator
  4744. __m128i sumi_0 = _mm_setzero_si128();
  4745. __m128i sumi_1 = _mm_setzero_si128();
  4746. for (int j = 0; j < QK_K/128; ++j) {
  4747. // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4]
  4748. const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
  4749. const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
  4750. // prepare low and high bits
  4751. const int bit = j << 2;
  4752. const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3);
  4753. const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3);
  4754. const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2);
  4755. const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2);
  4756. const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3);
  4757. const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3);
  4758. const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
  4759. const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
  4760. const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3);
  4761. const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3);
  4762. const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
  4763. const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
  4764. const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3);
  4765. const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3);
  4766. const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
  4767. const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
  4768. // load Q8 quants from block_q8_K.qs[QK_K]
  4769. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4770. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4771. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4772. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4773. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4774. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4775. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4776. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4777. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  4778. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  4779. // and 2 if the high bit was set)
  4780. __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0);
  4781. __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1);
  4782. __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2);
  4783. __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3);
  4784. __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4);
  4785. __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5);
  4786. __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6);
  4787. __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7);
  4788. __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0);
  4789. __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1);
  4790. __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2);
  4791. __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3);
  4792. __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4);
  4793. __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5);
  4794. __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6);
  4795. __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7);
  4796. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  4797. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  4798. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  4799. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  4800. p16_4 = _mm_sub_epi16(p16_4, q8s_4);
  4801. p16_5 = _mm_sub_epi16(p16_5, q8s_5);
  4802. p16_6 = _mm_sub_epi16(p16_6, q8s_6);
  4803. p16_7 = _mm_sub_epi16(p16_7, q8s_7);
  4804. // multiply with scales
  4805. __m128i shuffle = _mm_set1_epi16(0x0100);
  4806. p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0);
  4807. shuffle = _mm_add_epi16(shuffle, m2);
  4808. p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1);
  4809. shuffle = _mm_add_epi16(shuffle, m2);
  4810. p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2);
  4811. shuffle = _mm_add_epi16(shuffle, m2);
  4812. p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3);
  4813. shuffle = _mm_add_epi16(shuffle, m2);
  4814. p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4);
  4815. shuffle = _mm_add_epi16(shuffle, m2);
  4816. p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5);
  4817. shuffle = _mm_add_epi16(shuffle, m2);
  4818. p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6);
  4819. shuffle = _mm_add_epi16(shuffle, m2);
  4820. p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7);
  4821. // accumulate
  4822. p16_0 = _mm_add_epi32(p16_0, p16_1);
  4823. p16_2 = _mm_add_epi32(p16_2, p16_3);
  4824. p16_4 = _mm_add_epi32(p16_4, p16_5);
  4825. p16_6 = _mm_add_epi32(p16_6, p16_7);
  4826. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  4827. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6));
  4828. }
  4829. // multiply with block scale and accumulate
  4830. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  4831. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
  4832. }
  4833. *s = hsum_float_8(acc);
  4834. #elif defined __riscv_v_intrinsic
  4835. uint32_t aux[3];
  4836. uint32_t utmp[4];
  4837. float sumf = 0;
  4838. for (int i = 0; i < nb; ++i) {
  4839. const uint8_t * restrict q3 = x[i].qs;
  4840. const uint8_t * restrict qh = x[i].hmask;
  4841. const int8_t * restrict q8 = y[i].qs;
  4842. memcpy(aux, x[i].scales, 12);
  4843. utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
  4844. utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
  4845. utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
  4846. utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
  4847. int8_t * scale = (int8_t *)utmp;
  4848. for (int j = 0; j < 16; ++j) scale[j] -= 32;
  4849. size_t vl = 32;
  4850. uint8_t m = 1;
  4851. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  4852. vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl);
  4853. int sum_t = 0;
  4854. for (int j = 0; j < QK_K; j += 128) {
  4855. vl = 32;
  4856. // load Q3
  4857. vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl);
  4858. vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl));
  4859. vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03 , vl));
  4860. vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03 , vl));
  4861. vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03 , vl));
  4862. // compute mask for subtraction
  4863. vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl);
  4864. vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl);
  4865. vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_m(vmask_0, q3_0, 0x4, vl);
  4866. m <<= 1;
  4867. vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
  4868. vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl);
  4869. vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_m(vmask_1, q3_1, 0x4, vl);
  4870. m <<= 1;
  4871. vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
  4872. vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl);
  4873. vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_m(vmask_2, q3_2, 0x4, vl);
  4874. m <<= 1;
  4875. vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl);
  4876. vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl);
  4877. vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_m(vmask_3, q3_3, 0x4, vl);
  4878. m <<= 1;
  4879. // load Q8 and take product with Q3
  4880. vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl);
  4881. vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  4882. vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
  4883. vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
  4884. vl = 16;
  4885. // retrieve lane to multiply with scale
  4886. vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl);
  4887. vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl);
  4888. vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl);
  4889. vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl);
  4890. vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl);
  4891. vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl);
  4892. vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl);
  4893. vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl);
  4894. vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl);
  4895. vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl);
  4896. vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl);
  4897. vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl);
  4898. sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
  4899. q3 += 32; q8 += 128; scale += 8;
  4900. }
  4901. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  4902. sumf += d*sum_t;
  4903. }
  4904. *s = sumf;
  4905. #else
  4906. // scalar version
  4907. // This function is written like this so the compiler can manage to vectorize most of it
  4908. // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
  4909. // manually vectorized version above. Every other version I tried would run at least 4 times slower.
  4910. // The ideal situation would be if we could just write the code once, and the compiler would
  4911. // automatically produce the best possible set of machine instructions, instead of us having to manually
  4912. // write vectorized versions for AVX, ARM_NEON, etc.
  4913. int8_t aux8[QK_K];
  4914. int16_t aux16[8];
  4915. float sums [8];
  4916. int32_t aux32[8];
  4917. memset(sums, 0, 8*sizeof(float));
  4918. uint32_t auxs[4];
  4919. const int8_t * scales = (const int8_t*)auxs;
  4920. float sumf = 0;
  4921. for (int i = 0; i < nb; ++i) {
  4922. const uint8_t * restrict q3 = x[i].qs;
  4923. const uint8_t * restrict hm = x[i].hmask;
  4924. const int8_t * restrict q8 = y[i].qs;
  4925. memset(aux32, 0, 8*sizeof(int32_t));
  4926. int8_t * restrict a = aux8;
  4927. uint8_t m = 1;
  4928. for (int j = 0; j < QK_K; j += 128) {
  4929. for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
  4930. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  4931. a += 32; m <<= 1;
  4932. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
  4933. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  4934. a += 32; m <<= 1;
  4935. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
  4936. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  4937. a += 32; m <<= 1;
  4938. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
  4939. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  4940. a += 32; m <<= 1;
  4941. q3 += 32;
  4942. }
  4943. a = aux8;
  4944. memcpy(auxs, x[i].scales, 12);
  4945. uint32_t tmp = auxs[2];
  4946. auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
  4947. auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
  4948. auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
  4949. auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
  4950. for (int j = 0; j < QK_K/16; ++j) {
  4951. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  4952. for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
  4953. q8 += 8; a += 8;
  4954. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  4955. for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
  4956. q8 += 8; a += 8;
  4957. }
  4958. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  4959. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  4960. }
  4961. for (int l = 0; l < 8; ++l) sumf += sums[l];
  4962. *s = sumf;
  4963. #endif
  4964. }
  4965. #else
  4966. void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4967. assert(n % QK_K == 0);
  4968. assert(nrc == 1);
  4969. UNUSED(nrc);
  4970. UNUSED(bx);
  4971. UNUSED(by);
  4972. UNUSED(bs);
  4973. const block_q3_K * restrict x = vx;
  4974. const block_q8_K * restrict y = vy;
  4975. const int nb = n / QK_K;
  4976. #ifdef __ARM_NEON
  4977. const int32x4_t vzero = vdupq_n_s32(0);
  4978. const uint8x16_t m3b = vdupq_n_u8(0x3);
  4979. const uint8x16_t mh = vdupq_n_u8(4);
  4980. ggml_int8x16x4_t q3bytes;
  4981. uint16_t aux16[2];
  4982. int8_t * scales = (int8_t *)aux16;
  4983. float sum = 0;
  4984. for (int i = 0; i < nb; ++i) {
  4985. ggml_uint8x16x4_t q3h;
  4986. const uint8x8_t hbits = vld1_u8(x[i].hmask);
  4987. const uint8x16_t q3bits = vld1q_u8(x[i].qs);
  4988. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(y[i].qs);
  4989. const uint16_t a = *(const uint16_t *)x[i].scales;
  4990. aux16[0] = a & 0x0f0f;
  4991. aux16[1] = (a >> 4) & 0x0f0f;
  4992. for (int j = 0; j < 4; ++j) scales[j] -= 8;
  4993. int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
  4994. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4995. const uint8x16_t htmp = vcombine_u8(hbits, vshr_n_u8(hbits, 1));
  4996. q3h.val[0] = vandq_u8(mh, vshlq_n_u8(htmp, 2));
  4997. q3h.val[1] = vandq_u8(mh, htmp);
  4998. q3h.val[2] = vandq_u8(mh, vshrq_n_u8(htmp, 2));
  4999. q3h.val[3] = vandq_u8(mh, vshrq_n_u8(htmp, 4));
  5000. q3bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q3bits, m3b), q3h.val[0]));
  5001. q3bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 2), m3b), q3h.val[1]));
  5002. q3bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 4), m3b), q3h.val[2]));
  5003. q3bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q3bits, 6), q3h.val[3]));
  5004. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes.val[0])) * scales[0];
  5005. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes.val[1])) * scales[2];
  5006. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes.val[2])) * scales[1];
  5007. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes.val[3])) * scales[3];
  5008. sum += d * isum;
  5009. }
  5010. *s = sum;
  5011. #elif defined __AVX2__
  5012. const __m256i m3 = _mm256_set1_epi8(3);
  5013. const __m256i m1 = _mm256_set1_epi8(1);
  5014. __m256 acc = _mm256_setzero_ps();
  5015. uint64_t aux64;
  5016. uint16_t aux16[2];
  5017. const int8_t * aux8 = (const int8_t *)aux16;
  5018. for (int i = 0; i < nb; ++i) {
  5019. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5020. const uint8_t * restrict q3 = x[i].qs;
  5021. const int8_t * restrict q8 = y[i].qs;
  5022. const uint16_t a = *(const uint16_t *)x[i].scales;
  5023. aux16[0] = a & 0x0f0f;
  5024. aux16[1] = (a >> 4) & 0x0f0f;
  5025. const __m256i scale_0 = MM256_SET_M128I(_mm_set1_epi16(aux8[2] - 8), _mm_set1_epi16(aux8[0] - 8));
  5026. const __m256i scale_1 = MM256_SET_M128I(_mm_set1_epi16(aux8[3] - 8), _mm_set1_epi16(aux8[1] - 8));
  5027. memcpy(&aux64, x[i].hmask, 8);
  5028. const __m128i haux = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
  5029. __m256i q3h_0 = MM256_SET_M128I(_mm_srli_epi16(haux, 2), haux);
  5030. __m256i q3h_1 = _mm256_srli_epi16(q3h_0, 4);
  5031. q3h_0 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_0, m1), 2);
  5032. q3h_1 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_1, m1), 2);
  5033. // load low 2 bits
  5034. const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
  5035. // prepare low and high bits
  5036. const __m256i q3aux = MM256_SET_M128I(_mm_srli_epi16(q3bits, 2), q3bits);
  5037. const __m256i q3l_0 = _mm256_and_si256(q3aux, m3);
  5038. const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3aux, 4), m3);
  5039. // load Q8 quants
  5040. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5041. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5042. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  5043. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  5044. // and 2 if the high bit was set)
  5045. const __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
  5046. const __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
  5047. __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
  5048. __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
  5049. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  5050. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  5051. // multiply with scales
  5052. p16_0 = _mm256_madd_epi16(scale_0, p16_0);
  5053. p16_1 = _mm256_madd_epi16(scale_1, p16_1);
  5054. p16_0 = _mm256_add_epi32(p16_0, p16_1);
  5055. // multiply with block scale and accumulate
  5056. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16_0), acc);
  5057. }
  5058. *s = hsum_float_8(acc);
  5059. #elif defined __AVX__
  5060. const __m128i m3 = _mm_set1_epi8(3);
  5061. const __m128i m1 = _mm_set1_epi8(1);
  5062. __m256 acc = _mm256_setzero_ps();
  5063. uint64_t aux64;
  5064. uint16_t aux16[2];
  5065. const int8_t * aux8 = (const int8_t *)aux16;
  5066. for (int i = 0; i < nb; ++i) {
  5067. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5068. const uint8_t * restrict q3 = x[i].qs;
  5069. const int8_t * restrict q8 = y[i].qs;
  5070. const uint16_t a = *(const uint16_t *)x[i].scales;
  5071. aux16[0] = a & 0x0f0f;
  5072. aux16[1] = (a >> 4) & 0x0f0f;
  5073. const __m128i scale_0 = _mm_set1_epi16(aux8[0] - 8);
  5074. const __m128i scale_1 = _mm_set1_epi16(aux8[2] - 8);
  5075. const __m128i scale_2 = _mm_set1_epi16(aux8[1] - 8);
  5076. const __m128i scale_3 = _mm_set1_epi16(aux8[3] - 8);
  5077. memcpy(&aux64, x[i].hmask, 8);
  5078. __m128i q3h_0 = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
  5079. __m128i q3h_1 = _mm_srli_epi16(q3h_0, 2);
  5080. __m128i q3h_2 = _mm_srli_epi16(q3h_0, 4);
  5081. __m128i q3h_3 = _mm_srli_epi16(q3h_0, 6);
  5082. q3h_0 = _mm_slli_epi16(_mm_andnot_si128(q3h_0, m1), 2);
  5083. q3h_1 = _mm_slli_epi16(_mm_andnot_si128(q3h_1, m1), 2);
  5084. q3h_2 = _mm_slli_epi16(_mm_andnot_si128(q3h_2, m1), 2);
  5085. q3h_3 = _mm_slli_epi16(_mm_andnot_si128(q3h_3, m1), 2);
  5086. // load low 2 bits
  5087. const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
  5088. // prepare low and high bits
  5089. const __m128i q3l_0 = _mm_and_si128(q3bits, m3);
  5090. const __m128i q3l_1 = _mm_and_si128(_mm_srli_epi16(q3bits, 2), m3);
  5091. const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits, 4), m3);
  5092. const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits, 6), m3);
  5093. // load Q8 quants
  5094. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5095. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5096. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm_maddubs_epi16,
  5097. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  5098. // and 2 if the high bit was set)
  5099. const __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, _mm256_extractf128_si256(q8_0, 0));
  5100. const __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, _mm256_extractf128_si256(q8_0, 1));
  5101. const __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, _mm256_extractf128_si256(q8_1, 0));
  5102. const __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, _mm256_extractf128_si256(q8_1, 1));
  5103. __m128i p16_0 = _mm_maddubs_epi16(q3l_0, _mm256_extractf128_si256(q8_0, 0));
  5104. __m128i p16_1 = _mm_maddubs_epi16(q3l_1, _mm256_extractf128_si256(q8_0, 1));
  5105. __m128i p16_2 = _mm_maddubs_epi16(q3l_2, _mm256_extractf128_si256(q8_1, 0));
  5106. __m128i p16_3 = _mm_maddubs_epi16(q3l_3, _mm256_extractf128_si256(q8_1, 1));
  5107. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  5108. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  5109. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  5110. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  5111. // multiply with scales
  5112. p16_0 = _mm_madd_epi16(scale_0, p16_0);
  5113. p16_1 = _mm_madd_epi16(scale_1, p16_1);
  5114. p16_2 = _mm_madd_epi16(scale_2, p16_2);
  5115. p16_3 = _mm_madd_epi16(scale_3, p16_3);
  5116. p16_0 = _mm_add_epi32(p16_0, p16_2);
  5117. p16_1 = _mm_add_epi32(p16_1, p16_3);
  5118. __m256i p16 = MM256_SET_M128I(p16_1, p16_0);
  5119. // multiply with block scale and accumulate
  5120. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16)), acc);
  5121. }
  5122. *s = hsum_float_8(acc);
  5123. #elif defined __riscv_v_intrinsic
  5124. uint16_t aux16[2];
  5125. int8_t * scales = (int8_t *)aux16;
  5126. float sumf = 0;
  5127. for (int i = 0; i < nb; ++i) {
  5128. const uint8_t * restrict q3 = x[i].qs;
  5129. const int8_t * restrict q8 = y[i].qs;
  5130. const uint16_t a = *(const uint16_t *)x[i].scales;
  5131. aux16[0] = a & 0x0f0f;
  5132. aux16[1] = (a >> 4) & 0x0f0f;
  5133. for (int j = 0; j < 4; ++j) scales[j] -= 8;
  5134. int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
  5135. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5136. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  5137. // load qh
  5138. vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(x[i].hmask, 8);
  5139. vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
  5140. size_t vl = 16;
  5141. // extend and combine both qh_x1 and qh_x2
  5142. vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
  5143. vuint8mf2_t qh_0 = __riscv_vand_vx_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
  5144. vuint8mf2_t qh_1 = __riscv_vand_vx_u8mf2(qh_x, 0x4, vl);
  5145. vuint8mf2_t qh_2 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
  5146. vuint8mf2_t qh_3 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), 0x4, vl);
  5147. // load Q3
  5148. vuint8mf2_t q3_x = __riscv_vle8_v_u8mf2(q3, vl);
  5149. vuint8mf2_t q3h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q3_x, 0x3, vl), qh_0, vl);
  5150. vuint8mf2_t q3h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 2, vl), 0x3, vl), qh_1, vl);
  5151. vuint8mf2_t q3h_2 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 4, vl), 0x3, vl), qh_2, vl);
  5152. vuint8mf2_t q3h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 0x6, vl), qh_3, vl);
  5153. vint8mf2_t q3_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_0);
  5154. vint8mf2_t q3_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_1);
  5155. vint8mf2_t q3_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_2);
  5156. vint8mf2_t q3_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_3);
  5157. // load Q8 and take product with Q3
  5158. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q3_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  5159. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q3_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  5160. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q3_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  5161. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q3_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  5162. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  5163. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  5164. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  5165. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  5166. isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scales[0];
  5167. isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scales[2];
  5168. isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scales[1];
  5169. isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scales[3];
  5170. sumf += d * isum;
  5171. }
  5172. *s = sumf;
  5173. #else
  5174. int8_t aux8[QK_K];
  5175. int16_t aux16[8];
  5176. float sums [8];
  5177. int32_t aux32[8];
  5178. int32_t scales[4];
  5179. memset(sums, 0, 8*sizeof(float));
  5180. float sumf = 0;
  5181. for (int i = 0; i < nb; ++i) {
  5182. const uint8_t * restrict q3 = x[i].qs;
  5183. const uint8_t * restrict hm = x[i].hmask;
  5184. const int8_t * restrict q8 = y[i].qs;
  5185. int8_t * restrict a = aux8;
  5186. for (int l = 0; l < 8; ++l) {
  5187. a[l+ 0] = (int8_t)((q3[l+0] >> 0) & 3) - (hm[l] & 0x01 ? 0 : 4);
  5188. a[l+ 8] = (int8_t)((q3[l+8] >> 0) & 3) - (hm[l] & 0x02 ? 0 : 4);
  5189. a[l+16] = (int8_t)((q3[l+0] >> 2) & 3) - (hm[l] & 0x04 ? 0 : 4);
  5190. a[l+24] = (int8_t)((q3[l+8] >> 2) & 3) - (hm[l] & 0x08 ? 0 : 4);
  5191. a[l+32] = (int8_t)((q3[l+0] >> 4) & 3) - (hm[l] & 0x10 ? 0 : 4);
  5192. a[l+40] = (int8_t)((q3[l+8] >> 4) & 3) - (hm[l] & 0x20 ? 0 : 4);
  5193. a[l+48] = (int8_t)((q3[l+0] >> 6) & 3) - (hm[l] & 0x40 ? 0 : 4);
  5194. a[l+56] = (int8_t)((q3[l+8] >> 6) & 3) - (hm[l] & 0x80 ? 0 : 4);
  5195. }
  5196. scales[0] = (x[i].scales[0] & 0xF) - 8;
  5197. scales[1] = (x[i].scales[0] >> 4) - 8;
  5198. scales[2] = (x[i].scales[1] & 0xF) - 8;
  5199. scales[3] = (x[i].scales[1] >> 4) - 8;
  5200. memset(aux32, 0, 8*sizeof(int32_t));
  5201. for (int j = 0; j < QK_K/16; ++j) {
  5202. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5203. q8 += 8; a += 8;
  5204. for (int l = 0; l < 8; ++l) aux16[l] += q8[l] * a[l];
  5205. q8 += 8; a += 8;
  5206. for (int l = 0; l < 8; ++l) aux32[l] += scales[j] * aux16[l];
  5207. }
  5208. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5209. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  5210. }
  5211. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5212. *s = sumf;
  5213. #endif
  5214. }
  5215. #endif
  5216. #if QK_K == 256
  5217. void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  5218. assert(n % QK_K == 0);
  5219. assert(nrc == 1);
  5220. UNUSED(nrc);
  5221. UNUSED(bx);
  5222. UNUSED(by);
  5223. UNUSED(bs);
  5224. const block_q4_K * restrict x = vx;
  5225. const block_q8_K * restrict y = vy;
  5226. const int nb = n / QK_K;
  5227. static const uint32_t kmask1 = 0x3f3f3f3f;
  5228. static const uint32_t kmask2 = 0x0f0f0f0f;
  5229. static const uint32_t kmask3 = 0x03030303;
  5230. uint32_t utmp[4];
  5231. #ifdef __ARM_NEON
  5232. const uint8x16_t m4b = vdupq_n_u8(0xf);
  5233. const int32x4_t mzero = vdupq_n_s32(0);
  5234. ggml_int8x16x2_t q4bytes;
  5235. ggml_int8x16x2_t q8bytes;
  5236. float sumf = 0;
  5237. for (int i = 0; i < nb; ++i) {
  5238. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5239. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5240. const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
  5241. memcpy(utmp, x[i].scales, 12);
  5242. uint32x2_t mins8 = { 0 };
  5243. mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0);
  5244. mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1);
  5245. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5246. utmp[0] &= kmask1;
  5247. const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8)));
  5248. const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
  5249. vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
  5250. sumf -= dmin * vaddvq_s32(prod);
  5251. const uint8_t * scales = (const uint8_t *)utmp;
  5252. const uint8_t * restrict q4 = x[i].qs;
  5253. const int8_t * restrict q8 = y[i].qs;
  5254. int32_t sumi1 = 0;
  5255. int32_t sumi2 = 0;
  5256. for (int j = 0; j < QK_K/64; ++j) {
  5257. const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32;
  5258. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  5259. q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
  5260. q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
  5261. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  5262. sumi1 += vaddvq_s32(p1) * scales[2*j+0];
  5263. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  5264. q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
  5265. q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
  5266. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  5267. sumi2 += vaddvq_s32(p2) * scales[2*j+1];
  5268. }
  5269. sumf += d * (sumi1 + sumi2);
  5270. }
  5271. *s = sumf;
  5272. #elif defined __AVX2__
  5273. const __m256i m4 = _mm256_set1_epi8(0xF);
  5274. __m256 acc = _mm256_setzero_ps();
  5275. __m128 acc_m = _mm_setzero_ps();
  5276. for (int i = 0; i < nb; ++i) {
  5277. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5278. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5279. memcpy(utmp, x[i].scales, 12);
  5280. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5281. const uint32_t uaux = utmp[1] & kmask1;
  5282. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5283. utmp[2] = uaux;
  5284. utmp[0] &= kmask1;
  5285. const uint8_t * restrict q4 = x[i].qs;
  5286. const int8_t * restrict q8 = y[i].qs;
  5287. const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
  5288. const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
  5289. const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
  5290. const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
  5291. acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m);
  5292. const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
  5293. const __m256i scales = MM256_SET_M128I(sc128, sc128);
  5294. __m256i sumi = _mm256_setzero_si256();
  5295. for (int j = 0; j < QK_K/64; ++j) {
  5296. const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
  5297. const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
  5298. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  5299. const __m256i q4l = _mm256_and_si256(q4bits, m4);
  5300. const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
  5301. const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5302. __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
  5303. p16l = _mm256_madd_epi16(scale_l, p16l);
  5304. const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5305. __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
  5306. p16h = _mm256_madd_epi16(scale_h, p16h);
  5307. const __m256i sumj = _mm256_add_epi32(p16l, p16h);
  5308. sumi = _mm256_add_epi32(sumi, sumj);
  5309. }
  5310. __m256 vd = _mm256_set1_ps(d);
  5311. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
  5312. }
  5313. acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
  5314. acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
  5315. *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
  5316. #elif defined __AVX__
  5317. const __m128i m4 = _mm_set1_epi8(0xF);
  5318. const __m128i m2 = _mm_set1_epi8(0x2);
  5319. __m256 acc = _mm256_setzero_ps();
  5320. __m128 acc_m = _mm_setzero_ps();
  5321. for (int i = 0; i < nb; ++i) {
  5322. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5323. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5324. const uint8_t * restrict q4 = x[i].qs;
  5325. const int8_t * restrict q8 = y[i].qs;
  5326. memcpy(utmp, x[i].scales, 12);
  5327. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5328. const uint32_t uaux = utmp[1] & kmask1;
  5329. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5330. utmp[2] = uaux;
  5331. utmp[0] &= kmask1;
  5332. const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
  5333. const __m128i scales = _mm_cvtepu8_epi16(utmps);
  5334. const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
  5335. const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
  5336. const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
  5337. const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
  5338. const __m128i prod = _mm_madd_epi16(mins, q8s);
  5339. acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m);
  5340. __m128i sumi_0 = _mm_setzero_si128();
  5341. __m128i sumi_1 = _mm_setzero_si128();
  5342. __m128i shuffle = _mm_set1_epi16(0x0100);
  5343. for (int j = 0; j < QK_K/64; ++j) {
  5344. const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle);
  5345. shuffle = _mm_add_epi16(shuffle, m2);
  5346. const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle);
  5347. shuffle = _mm_add_epi16(shuffle, m2);
  5348. __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  5349. const __m128i q4l_0 = _mm_and_si128(q4bits, m4);
  5350. const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
  5351. q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  5352. const __m128i q4l_1 = _mm_and_si128(q4bits, m4);
  5353. const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
  5354. const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5355. __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0);
  5356. p16l = _mm_madd_epi16(scale_l, p16l);
  5357. sumi_0 = _mm_add_epi32(sumi_0, p16l);
  5358. const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5359. p16l = _mm_maddubs_epi16(q4l_1, q8l_1);
  5360. p16l = _mm_madd_epi16(scale_l, p16l);
  5361. sumi_1 = _mm_add_epi32(sumi_1, p16l);
  5362. const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5363. __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0);
  5364. p16h = _mm_madd_epi16(scale_h, p16h);
  5365. sumi_0 = _mm_add_epi32(sumi_0, p16h);
  5366. const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5367. p16h = _mm_maddubs_epi16(q4h_1, q8h_1);
  5368. p16h = _mm_madd_epi16(scale_h, p16h);
  5369. sumi_1 = _mm_add_epi32(sumi_1, p16h);
  5370. }
  5371. __m256 vd = _mm256_set1_ps(d);
  5372. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  5373. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
  5374. }
  5375. acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
  5376. acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
  5377. *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
  5378. #elif defined __riscv_v_intrinsic
  5379. const uint8_t * scales = (const uint8_t*)&utmp[0];
  5380. const uint8_t * mins = (const uint8_t*)&utmp[2];
  5381. float sumf = 0;
  5382. for (int i = 0; i < nb; ++i) {
  5383. size_t vl = 8;
  5384. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5385. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5386. vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
  5387. vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
  5388. vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
  5389. memcpy(utmp, x[i].scales, 12);
  5390. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5391. const uint32_t uaux = utmp[1] & kmask1;
  5392. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5393. utmp[2] = uaux;
  5394. utmp[0] &= kmask1;
  5395. vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
  5396. vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
  5397. vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
  5398. vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  5399. sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
  5400. const uint8_t * restrict q4 = x[i].qs;
  5401. const int8_t * restrict q8 = y[i].qs;
  5402. vl = 32;
  5403. int32_t sum_1 = 0;
  5404. int32_t sum_2 = 0;
  5405. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  5406. for (int j = 0; j < QK_K/64; ++j) {
  5407. // load Q4
  5408. vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
  5409. // load Q8 and multiply it with lower Q4 nibble
  5410. vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
  5411. vint8m1_t q4_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
  5412. vint16m2_t qv_0 = __riscv_vwmul_vv_i16m2(q4_0, q8_0, vl);
  5413. vint16m1_t vs_0 = __riscv_vredsum_vs_i16m2_i16m1(qv_0, vzero, vl);
  5414. sum_1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[2*j+0];
  5415. // load Q8 and multiply it with upper Q4 nibble
  5416. vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
  5417. vint8m1_t q4_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
  5418. vint16m2_t qv_1 = __riscv_vwmul_vv_i16m2(q4_1, q8_1, vl);
  5419. vint16m1_t vs_1 = __riscv_vredsum_vs_i16m2_i16m1(qv_1, vzero, vl);
  5420. sum_2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[2*j+1];
  5421. q4 += 32; q8 += 64;
  5422. }
  5423. sumf += d*(sum_1 + sum_2);
  5424. }
  5425. *s = sumf;
  5426. #else
  5427. const uint8_t * scales = (const uint8_t*)&utmp[0];
  5428. const uint8_t * mins = (const uint8_t*)&utmp[2];
  5429. int8_t aux8[QK_K];
  5430. int16_t aux16[8];
  5431. float sums [8];
  5432. int32_t aux32[8];
  5433. memset(sums, 0, 8*sizeof(float));
  5434. float sumf = 0;
  5435. for (int i = 0; i < nb; ++i) {
  5436. const uint8_t * restrict q4 = x[i].qs;
  5437. const int8_t * restrict q8 = y[i].qs;
  5438. memset(aux32, 0, 8*sizeof(int32_t));
  5439. int8_t * restrict a = aux8;
  5440. for (int j = 0; j < QK_K/64; ++j) {
  5441. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
  5442. a += 32;
  5443. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
  5444. a += 32; q4 += 32;
  5445. }
  5446. memcpy(utmp, x[i].scales, 12);
  5447. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5448. const uint32_t uaux = utmp[1] & kmask1;
  5449. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5450. utmp[2] = uaux;
  5451. utmp[0] &= kmask1;
  5452. int sumi = 0;
  5453. for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
  5454. a = aux8;
  5455. int is = 0;
  5456. for (int j = 0; j < QK_K/32; ++j) {
  5457. int32_t scale = scales[is++];
  5458. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5459. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5460. q8 += 8; a += 8;
  5461. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5462. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5463. q8 += 8; a += 8;
  5464. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5465. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5466. q8 += 8; a += 8;
  5467. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5468. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5469. q8 += 8; a += 8;
  5470. }
  5471. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5472. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  5473. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  5474. sumf -= dmin * sumi;
  5475. }
  5476. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5477. *s = sumf;
  5478. #endif
  5479. }
  5480. #else
  5481. void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  5482. assert(n % QK_K == 0);
  5483. assert(nrc == 1);
  5484. UNUSED(nrc);
  5485. UNUSED(bx);
  5486. UNUSED(by);
  5487. UNUSED(bs);
  5488. const block_q4_K * restrict x = vx;
  5489. const block_q8_K * restrict y = vy;
  5490. const int nb = n / QK_K;
  5491. #ifdef __ARM_NEON
  5492. const uint8x16_t m4b = vdupq_n_u8(0xf);
  5493. const int32x4_t mzero = vdupq_n_s32(0);
  5494. float sumf = 0;
  5495. ggml_int8x16x2_t q4bytes;
  5496. ggml_int8x16x4_t q8bytes;
  5497. float sum_mins = 0.f;
  5498. uint16_t aux16[2];
  5499. const uint8_t * restrict scales = (const uint8_t *)aux16;
  5500. for (int i = 0; i < nb; ++i) {
  5501. const uint8_t * restrict q4 = x[i].qs;
  5502. const int8_t * restrict q8 = y[i].qs;
  5503. const uint16_t * restrict a = (const uint16_t *)x[i].scales;
  5504. aux16[0] = a[0] & 0x0f0f;
  5505. aux16[1] = (a[0] >> 4) & 0x0f0f;
  5506. const int32_t summi = scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]);
  5507. sum_mins += y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * summi;
  5508. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
  5509. const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4);
  5510. q8bytes = ggml_vld1q_s8_x4(q8);
  5511. q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
  5512. q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
  5513. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  5514. const int32_t sumi1 = vaddvq_s32(p1) * scales[0];
  5515. q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
  5516. q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
  5517. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[2]), q4bytes.val[1], q8bytes.val[3]);
  5518. const int32_t sumi2 = vaddvq_s32(p2) * scales[1];
  5519. sumf += d * (sumi1 + sumi2);
  5520. }
  5521. *s = sumf - sum_mins;
  5522. #elif defined __AVX2__
  5523. const __m256i m4 = _mm256_set1_epi8(0xF);
  5524. __m256 acc = _mm256_setzero_ps();
  5525. float summs = 0;
  5526. uint16_t aux16[2];
  5527. const uint8_t * scales = (const uint8_t *)aux16;
  5528. for (int i = 0; i < nb; ++i) {
  5529. const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
  5530. const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
  5531. const __m256 vd = _mm256_set1_ps(d);
  5532. const uint16_t * a = (const uint16_t *)x[i].scales;
  5533. aux16[0] = a[0] & 0x0f0f;
  5534. aux16[1] = (a[0] >> 4) & 0x0f0f;
  5535. summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  5536. const uint8_t * restrict q4 = x[i].qs;
  5537. const int8_t * restrict q8 = y[i].qs;
  5538. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
  5539. const __m256i q4l = _mm256_and_si256(q4bits, m4);
  5540. const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
  5541. const __m256i q8l = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5542. const __m256i q8h = _mm256_loadu_si256((const __m256i*)(q8+32));
  5543. const __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
  5544. const __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
  5545. const __m256i p32l = _mm256_madd_epi16(_mm256_set1_epi16(scales[0]), p16l);
  5546. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32l), acc);
  5547. const __m256i p32h = _mm256_madd_epi16(_mm256_set1_epi16(scales[1]), p16h);
  5548. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32h), acc);
  5549. }
  5550. *s = hsum_float_8(acc) - summs;
  5551. #elif defined __AVX__
  5552. const __m128i m4 = _mm_set1_epi8(0xF);
  5553. __m256 acc = _mm256_setzero_ps();
  5554. float summs = 0;
  5555. uint16_t aux16[2];
  5556. const uint8_t * scales = (const uint8_t *)aux16;
  5557. for (int i = 0; i < nb; ++i) {
  5558. const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
  5559. const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
  5560. const __m256 vd = _mm256_set1_ps(d);
  5561. const uint16_t * a = (const uint16_t *)x[i].scales;
  5562. aux16[0] = a[0] & 0x0f0f;
  5563. aux16[1] = (a[0] >> 4) & 0x0f0f;
  5564. summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  5565. const uint8_t * restrict q4 = x[i].qs;
  5566. const int8_t * restrict q8 = y[i].qs;
  5567. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
  5568. const __m128i q4bits_0 = _mm256_extractf128_si256(q4bits, 0);
  5569. const __m128i q4bits_1 = _mm256_extractf128_si256(q4bits, 1);
  5570. const __m128i q4_0 = _mm_and_si128(q4bits_0, m4);
  5571. const __m128i q4_1 = _mm_and_si128(q4bits_1, m4);
  5572. const __m128i q4_2 = _mm_and_si128(_mm_srli_epi16(q4bits_0, 4), m4);
  5573. const __m128i q4_3 = _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4);
  5574. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5575. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5576. const __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
  5577. const __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
  5578. const __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
  5579. const __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
  5580. const __m128i p32_0 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_0);
  5581. const __m128i p32_1 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_1);
  5582. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_1, p32_0))), acc);
  5583. const __m128i p32_2 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_2);
  5584. const __m128i p32_3 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_3);
  5585. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_3, p32_2))), acc);
  5586. }
  5587. *s = hsum_float_8(acc) - summs;
  5588. #elif defined __riscv_v_intrinsic
  5589. uint16_t s16[2];
  5590. const uint8_t * restrict scales = (const uint8_t *)s16;
  5591. float sumf = 0;
  5592. for (int i = 0; i < nb; ++i) {
  5593. const uint8_t * restrict q4 = x[i].qs;
  5594. const int8_t * restrict q8 = y[i].qs;
  5595. const uint16_t * restrict b = (const uint16_t *)x[i].scales;
  5596. s16[0] = b[0] & 0x0f0f;
  5597. s16[1] = (b[0] >> 4) & 0x0f0f;
  5598. sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  5599. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
  5600. size_t vl = 32;
  5601. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  5602. // load Q4
  5603. vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
  5604. // load Q8 and multiply it with lower Q4 nibble
  5605. vint8m1_t q4_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
  5606. vint16m2_t va_0 = __riscv_vwmul_vv_i16m2(q4_a, __riscv_vle8_v_i8m1(q8, vl), vl);
  5607. vint16m1_t aux1 = __riscv_vredsum_vs_i16m2_i16m1(va_0, vzero, vl);
  5608. sumf += d*scales[0]*__riscv_vmv_x_s_i16m1_i16(aux1);
  5609. // load Q8 and multiply it with upper Q4 nibble
  5610. vint8m1_t q4_s = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
  5611. vint16m2_t va_1 = __riscv_vwmul_vv_i16m2(q4_s, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  5612. vint16m1_t aux2 = __riscv_vredsum_vs_i16m2_i16m1(va_1, vzero, vl);
  5613. sumf += d*scales[1]*__riscv_vmv_x_s_i16m1_i16(aux2);
  5614. }
  5615. *s = sumf;
  5616. #else
  5617. uint8_t aux8[QK_K];
  5618. int16_t aux16[16];
  5619. float sums [8];
  5620. memset(sums, 0, 8*sizeof(float));
  5621. uint16_t s16[2];
  5622. const uint8_t * restrict scales = (const uint8_t *)s16;
  5623. float sumf = 0;
  5624. for (int i = 0; i < nb; ++i) {
  5625. const uint8_t * restrict q4 = x[i].qs;
  5626. const int8_t * restrict q8 = y[i].qs;
  5627. uint8_t * restrict a = aux8;
  5628. for (int l = 0; l < 32; ++l) a[l+ 0] = q4[l] & 0xF;
  5629. for (int l = 0; l < 32; ++l) a[l+32] = q4[l] >> 4;
  5630. const uint16_t * restrict b = (const uint16_t *)x[i].scales;
  5631. s16[0] = b[0] & 0x0f0f;
  5632. s16[1] = (b[0] >> 4) & 0x0f0f;
  5633. sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  5634. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
  5635. for (int j = 0; j < QK_K/32; ++j) {
  5636. for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
  5637. q8 += 16; a += 16;
  5638. for (int l = 0; l < 16; ++l) aux16[l] += q8[l] * a[l];
  5639. q8 += 16; a += 16;
  5640. const float dl = d * scales[j];
  5641. for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[l+8]);
  5642. }
  5643. }
  5644. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5645. *s = sumf;
  5646. #endif
  5647. }
  5648. #endif
  5649. #if QK_K == 256
  5650. void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  5651. assert(n % QK_K == 0);
  5652. assert(nrc == 1);
  5653. UNUSED(nrc);
  5654. UNUSED(bx);
  5655. UNUSED(by);
  5656. UNUSED(bs);
  5657. const block_q5_K * restrict x = vx;
  5658. const block_q8_K * restrict y = vy;
  5659. const int nb = n / QK_K;
  5660. static const uint32_t kmask1 = 0x3f3f3f3f;
  5661. static const uint32_t kmask2 = 0x0f0f0f0f;
  5662. static const uint32_t kmask3 = 0x03030303;
  5663. uint32_t utmp[4];
  5664. #ifdef __ARM_NEON
  5665. const uint8x16_t m4b = vdupq_n_u8(0xf);
  5666. const uint8x16_t mone = vdupq_n_u8(1);
  5667. const uint8x16_t mtwo = vdupq_n_u8(2);
  5668. const int32x4_t mzero = vdupq_n_s32(0);
  5669. ggml_int8x16x4_t q5bytes;
  5670. float sumf = 0;
  5671. for (int i = 0; i < nb; ++i) {
  5672. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5673. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5674. const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
  5675. memcpy(utmp, x[i].scales, 12);
  5676. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5677. const uint32_t uaux = utmp[1] & kmask1;
  5678. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5679. utmp[2] = uaux;
  5680. utmp[0] &= kmask1;
  5681. const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8);
  5682. const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8));
  5683. const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
  5684. vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
  5685. int32_t sumi_mins = vaddvq_s32(prod);
  5686. const uint8_t * scales = (const uint8_t *)utmp;
  5687. const uint8_t * restrict q5 = x[i].qs;
  5688. const uint8_t * restrict qh = x[i].qh;
  5689. const int8_t * restrict q8 = y[i].qs;
  5690. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
  5691. ggml_uint8x16x4_t q5h;
  5692. int32_t sumi = 0;
  5693. for (int j = 0; j < QK_K/64; ++j) {
  5694. const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32;
  5695. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  5696. q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
  5697. q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
  5698. q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3);
  5699. q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3);
  5700. qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2);
  5701. qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2);
  5702. q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0]));
  5703. q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1]));
  5704. q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2]));
  5705. q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3]));
  5706. sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++;
  5707. sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++;
  5708. }
  5709. sumf += d * sumi - dmin * sumi_mins;
  5710. }
  5711. *s = sumf;
  5712. #elif defined __AVX2__
  5713. const __m256i m4 = _mm256_set1_epi8(0xF);
  5714. const __m128i mzero = _mm_setzero_si128();
  5715. const __m256i mone = _mm256_set1_epi8(1);
  5716. __m256 acc = _mm256_setzero_ps();
  5717. float summs = 0.f;
  5718. for (int i = 0; i < nb; ++i) {
  5719. const uint8_t * restrict q5 = x[i].qs;
  5720. const int8_t * restrict q8 = y[i].qs;
  5721. #if QK_K == 256
  5722. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5723. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5724. memcpy(utmp, x[i].scales, 12);
  5725. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5726. const uint32_t uaux = utmp[1] & kmask1;
  5727. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5728. utmp[2] = uaux;
  5729. utmp[0] &= kmask1;
  5730. #else
  5731. // TODO
  5732. const float d = 0, dmin = 0;
  5733. #endif
  5734. const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
  5735. const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
  5736. const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
  5737. const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
  5738. const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
  5739. summs += dmin * _mm_extract_epi32(hsum, 0);
  5740. const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
  5741. const __m256i scales = MM256_SET_M128I(sc128, sc128);
  5742. const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh);
  5743. __m256i hmask = mone;
  5744. __m256i sumi = _mm256_setzero_si256();
  5745. int bit = 0;
  5746. for (int j = 0; j < QK_K/64; ++j) {
  5747. const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
  5748. const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
  5749. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32;
  5750. const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
  5751. const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
  5752. const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0);
  5753. hmask = _mm256_slli_epi16(hmask, 1);
  5754. const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
  5755. const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
  5756. const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1);
  5757. hmask = _mm256_slli_epi16(hmask, 1);
  5758. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5759. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5760. __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0);
  5761. __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1);
  5762. p16_0 = _mm256_madd_epi16(scale_0, p16_0);
  5763. p16_1 = _mm256_madd_epi16(scale_1, p16_1);
  5764. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  5765. }
  5766. __m256 vd = _mm256_set1_ps(d);
  5767. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
  5768. }
  5769. *s = hsum_float_8(acc) + summs;
  5770. #elif defined __AVX__
  5771. const __m128i m4 = _mm_set1_epi8(0xF);
  5772. const __m128i mzero = _mm_setzero_si128();
  5773. const __m128i mone = _mm_set1_epi8(1);
  5774. const __m128i m2 = _mm_set1_epi8(2);
  5775. __m256 acc = _mm256_setzero_ps();
  5776. float summs = 0.f;
  5777. for (int i = 0; i < nb; ++i) {
  5778. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5779. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5780. const uint8_t * restrict q5 = x[i].qs;
  5781. const int8_t * restrict q8 = y[i].qs;
  5782. memcpy(utmp, x[i].scales, 12);
  5783. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5784. const uint32_t uaux = utmp[1] & kmask1;
  5785. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5786. utmp[2] = uaux;
  5787. utmp[0] &= kmask1;
  5788. const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
  5789. const __m128i scales = _mm_cvtepu8_epi16(utmps);
  5790. const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
  5791. const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
  5792. const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
  5793. const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
  5794. const __m128i prod = _mm_madd_epi16(mins, q8s);
  5795. const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
  5796. summs += dmin * _mm_extract_epi32(hsum, 0);
  5797. const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]);
  5798. const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]);
  5799. __m128i hmask = mone;
  5800. __m128i sumi_0 = _mm_setzero_si128();
  5801. __m128i sumi_1 = _mm_setzero_si128();
  5802. int bit = 0;
  5803. __m128i shuffle = _mm_set1_epi16(0x0100);
  5804. for (int j = 0; j < QK_K/64; ++j) {
  5805. const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
  5806. shuffle = _mm_add_epi16(shuffle, m2);
  5807. const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
  5808. shuffle = _mm_add_epi16(shuffle, m2);
  5809. const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
  5810. const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
  5811. __m128i q5l_0 = _mm_and_si128(q5bits_0, m4);
  5812. __m128i q5l_1 = _mm_and_si128(q5bits_1, m4);
  5813. __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
  5814. __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
  5815. __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0);
  5816. __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1);
  5817. hmask = _mm_slli_epi16(hmask, 1);
  5818. __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5819. __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5820. __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0);
  5821. __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1);
  5822. p16_0 = _mm_madd_epi16(scale_0, p16_0);
  5823. p16_1 = _mm_madd_epi16(scale_0, p16_1);
  5824. q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4);
  5825. q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4);
  5826. q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
  5827. q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
  5828. q5_0 = _mm_add_epi8(q5l_0, q5h_0);
  5829. q5_1 = _mm_add_epi8(q5l_1, q5h_1);
  5830. hmask = _mm_slli_epi16(hmask, 1);
  5831. q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5832. q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5833. __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0);
  5834. __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1);
  5835. p16_2 = _mm_madd_epi16(scale_1, p16_2);
  5836. p16_3 = _mm_madd_epi16(scale_1, p16_3);
  5837. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  5838. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  5839. }
  5840. __m256 vd = _mm256_set1_ps(d);
  5841. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  5842. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
  5843. }
  5844. *s = hsum_float_8(acc) + summs;
  5845. #elif defined __riscv_v_intrinsic
  5846. const uint8_t * scales = (const uint8_t*)&utmp[0];
  5847. const uint8_t * mins = (const uint8_t*)&utmp[2];
  5848. float sumf = 0;
  5849. float sums = 0.0;
  5850. size_t vl;
  5851. for (int i = 0; i < nb; ++i) {
  5852. vl = 8;
  5853. const uint8_t * restrict q5 = x[i].qs;
  5854. const uint8_t * restrict hm = x[i].qh;
  5855. const int8_t * restrict q8 = y[i].qs;
  5856. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5857. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  5858. vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
  5859. vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
  5860. vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
  5861. memcpy(utmp, x[i].scales, 12);
  5862. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5863. const uint32_t uaux = utmp[1] & kmask1;
  5864. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5865. utmp[2] = uaux;
  5866. utmp[0] &= kmask1;
  5867. vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
  5868. vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
  5869. vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
  5870. vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  5871. sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
  5872. vl = 32;
  5873. int32_t aux32 = 0;
  5874. int is = 0;
  5875. uint8_t m = 1;
  5876. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  5877. vuint8m1_t vqh = __riscv_vle8_v_u8m1(hm, vl);
  5878. for (int j = 0; j < QK_K/64; ++j) {
  5879. // load Q5 and Q8
  5880. vuint8m1_t q5_x = __riscv_vle8_v_u8m1(q5, vl);
  5881. vint8m1_t q8_y1 = __riscv_vle8_v_i8m1(q8, vl);
  5882. vint8m1_t q8_y2 = __riscv_vle8_v_i8m1(q8+32, vl);
  5883. // compute mask for addition
  5884. vint8m1_t q5_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q5_x, 0x0F, vl));
  5885. vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
  5886. vbool8_t vmask_1 = __riscv_vmsne_vx_u8m1_b8(qh_m1, 0, vl);
  5887. vint8m1_t q5_m1 = __riscv_vadd_vx_i8m1_m(vmask_1, q5_a, 16, vl);
  5888. m <<= 1;
  5889. vint8m1_t q5_l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q5_x, 0x04, vl));
  5890. vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
  5891. vbool8_t vmask_2 = __riscv_vmsne_vx_u8m1_b8(qh_m2, 0, vl);
  5892. vint8m1_t q5_m2 = __riscv_vadd_vx_i8m1_m(vmask_2, q5_l, 16, vl);
  5893. m <<= 1;
  5894. vint16m2_t v0 = __riscv_vwmul_vv_i16m2(q5_m1, q8_y1, vl);
  5895. vint16m2_t v1 = __riscv_vwmul_vv_i16m2(q5_m2, q8_y2, vl);
  5896. vint32m4_t vs1 = __riscv_vwmul_vx_i32m4(v0, scales[is++], vl);
  5897. vint32m4_t vs2 = __riscv_vwmul_vx_i32m4(v1, scales[is++], vl);
  5898. vint32m1_t vacc1 = __riscv_vredsum_vs_i32m4_i32m1(vs1, vzero, vl);
  5899. vint32m1_t vacc2 = __riscv_vredsum_vs_i32m4_i32m1(vs2, vzero, vl);
  5900. aux32 += __riscv_vmv_x_s_i32m1_i32(vacc1) + __riscv_vmv_x_s_i32m1_i32(vacc2);
  5901. q5 += 32; q8 += 64;
  5902. }
  5903. vfloat32m1_t vaux = __riscv_vfmul_vf_f32m1(__riscv_vfmv_v_f_f32m1(aux32, 1), d, 1);
  5904. sums += __riscv_vfmv_f_s_f32m1_f32(vaux);
  5905. }
  5906. *s = sumf+sums;
  5907. #else
  5908. const uint8_t * scales = (const uint8_t*)&utmp[0];
  5909. const uint8_t * mins = (const uint8_t*)&utmp[2];
  5910. int8_t aux8[QK_K];
  5911. int16_t aux16[8];
  5912. float sums [8];
  5913. int32_t aux32[8];
  5914. memset(sums, 0, 8*sizeof(float));
  5915. float sumf = 0;
  5916. for (int i = 0; i < nb; ++i) {
  5917. const uint8_t * restrict q4 = x[i].qs;
  5918. const uint8_t * restrict hm = x[i].qh;
  5919. const int8_t * restrict q8 = y[i].qs;
  5920. memset(aux32, 0, 8*sizeof(int32_t));
  5921. int8_t * restrict a = aux8;
  5922. uint8_t m = 1;
  5923. for (int j = 0; j < QK_K/64; ++j) {
  5924. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
  5925. for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
  5926. a += 32; m <<= 1;
  5927. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
  5928. for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
  5929. a += 32; m <<= 1;
  5930. q4 += 32;
  5931. }
  5932. memcpy(utmp, x[i].scales, 12);
  5933. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5934. const uint32_t uaux = utmp[1] & kmask1;
  5935. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5936. utmp[2] = uaux;
  5937. utmp[0] &= kmask1;
  5938. int sumi = 0;
  5939. for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
  5940. a = aux8;
  5941. int is = 0;
  5942. for (int j = 0; j < QK_K/32; ++j) {
  5943. int32_t scale = scales[is++];
  5944. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5945. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5946. q8 += 8; a += 8;
  5947. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5948. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5949. q8 += 8; a += 8;
  5950. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5951. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5952. q8 += 8; a += 8;
  5953. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5954. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5955. q8 += 8; a += 8;
  5956. }
  5957. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5958. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  5959. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  5960. sumf -= dmin * sumi;
  5961. }
  5962. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5963. *s = sumf;
  5964. #endif
  5965. }
  5966. #else
  5967. void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  5968. assert(n % QK_K == 0);
  5969. assert(nrc == 1);
  5970. UNUSED(nrc);
  5971. UNUSED(bx);
  5972. UNUSED(by);
  5973. UNUSED(bs);
  5974. const block_q5_K * restrict x = vx;
  5975. const block_q8_K * restrict y = vy;
  5976. const int nb = n / QK_K;
  5977. #ifdef __ARM_NEON
  5978. const uint8x16_t m4b = vdupq_n_u8(0xf);
  5979. const uint8x16_t mh = vdupq_n_u8(16);
  5980. const int32x4_t mzero = vdupq_n_s32(0);
  5981. ggml_int8x16x4_t q5bytes;
  5982. ggml_uint8x16x4_t q5h;
  5983. float sumf = 0;
  5984. for (int i = 0; i < nb; ++i) {
  5985. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5986. const int8_t * sc = x[i].scales;
  5987. const uint8_t * restrict q5 = x[i].qs;
  5988. const uint8_t * restrict qh = x[i].qh;
  5989. const int8_t * restrict q8 = y[i].qs;
  5990. const uint8x8_t qhbits = vld1_u8(qh);
  5991. const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5);
  5992. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  5993. const uint8x16_t htmp = vcombine_u8(qhbits, vshr_n_u8(qhbits, 1));
  5994. q5h.val[0] = vbicq_u8(mh, vshlq_n_u8(htmp, 4));
  5995. q5h.val[1] = vbicq_u8(mh, vshlq_n_u8(htmp, 2));
  5996. q5h.val[2] = vbicq_u8(mh, htmp);
  5997. q5h.val[3] = vbicq_u8(mh, vshrq_n_u8(htmp, 2));
  5998. q5bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[0], m4b)), vreinterpretq_s8_u8(q5h.val[0]));
  5999. q5bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[1], m4b)), vreinterpretq_s8_u8(q5h.val[1]));
  6000. q5bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[0], 4)), vreinterpretq_s8_u8(q5h.val[2]));
  6001. q5bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[1], 4)), vreinterpretq_s8_u8(q5h.val[3]));
  6002. int32_t sumi1 = sc[0] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]));
  6003. int32_t sumi2 = sc[1] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[1], q8bytes.val[1]));
  6004. int32_t sumi3 = sc[2] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]));
  6005. int32_t sumi4 = sc[3] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[3], q8bytes.val[3]));
  6006. sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
  6007. }
  6008. *s = sumf;
  6009. #elif defined __AVX2__
  6010. const __m256i m4 = _mm256_set1_epi8(0xF);
  6011. const __m256i mone = _mm256_set1_epi8(1);
  6012. __m256 acc = _mm256_setzero_ps();
  6013. for (int i = 0; i < nb; ++i) {
  6014. const uint8_t * restrict q5 = x[i].qs;
  6015. const int8_t * restrict q8 = y[i].qs;
  6016. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6017. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
  6018. const __m256i scale_l = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[1]), _mm_set1_epi16(x[i].scales[0]));
  6019. const __m256i scale_h = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[3]), _mm_set1_epi16(x[i].scales[2]));
  6020. int64_t aux64;
  6021. memcpy(&aux64, x[i].qh, 8);
  6022. const __m128i haux128 = _mm_set_epi64x(aux64 >> 1, aux64);
  6023. const __m256i haux256 = MM256_SET_M128I(_mm_srli_epi16(haux128, 2), haux128);
  6024. const __m256i q5h_0 = _mm256_slli_epi16(_mm256_andnot_si256(haux256, mone), 4);
  6025. const __m256i q5h_1 = _mm256_slli_epi16(_mm256_andnot_si256(_mm256_srli_epi16(haux256, 4), mone), 4);
  6026. const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
  6027. const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
  6028. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6029. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6030. const __m256i p16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5l_0, q8_0));
  6031. const __m256i p16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5l_1, q8_1));
  6032. const __m256i s16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5h_0, q8_0));
  6033. const __m256i s16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5h_1, q8_1));
  6034. const __m256i dot = _mm256_sub_epi32(_mm256_add_epi32(p16_0, p16_1), _mm256_add_epi32(s16_0, s16_1));
  6035. acc = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(dot), acc);
  6036. }
  6037. *s = hsum_float_8(acc);
  6038. #elif defined __AVX__
  6039. const __m128i m4 = _mm_set1_epi8(0xF);
  6040. const __m128i mone = _mm_set1_epi8(1);
  6041. __m256 acc = _mm256_setzero_ps();
  6042. for (int i = 0; i < nb; ++i) {
  6043. const uint8_t * restrict q5 = x[i].qs;
  6044. const int8_t * restrict q8 = y[i].qs;
  6045. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6046. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
  6047. const __m128i scale_0 = _mm_set1_epi16(x[i].scales[0]);
  6048. const __m128i scale_1 = _mm_set1_epi16(x[i].scales[1]);
  6049. const __m128i scale_2 = _mm_set1_epi16(x[i].scales[2]);
  6050. const __m128i scale_3 = _mm_set1_epi16(x[i].scales[3]);
  6051. int64_t aux64;
  6052. memcpy(&aux64, x[i].qh, 8);
  6053. const __m128i haux128_0 = _mm_set_epi64x(aux64 >> 1, aux64);
  6054. const __m128i haux128_1 = _mm_srli_epi16(haux128_0, 2);
  6055. const __m128i q5h_0 = _mm_slli_epi16(_mm_andnot_si128(haux128_0, mone), 4);
  6056. const __m128i q5h_1 = _mm_slli_epi16(_mm_andnot_si128(haux128_1, mone), 4);
  6057. const __m128i q5h_2 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_0, 4), mone), 4);
  6058. const __m128i q5h_3 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_1, 4), mone), 4);
  6059. const __m128i q5l_0 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 0), m4);
  6060. const __m128i q5l_1 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 1), m4);
  6061. const __m128i q5l_2 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 0), 4), m4);
  6062. const __m128i q5l_3 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 1), 4), m4);
  6063. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6064. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6065. const __m128i p16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5l_0, _mm256_extractf128_si256(q8_0, 0)));
  6066. const __m128i p16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5l_1, _mm256_extractf128_si256(q8_0, 1)));
  6067. const __m128i p16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5l_2, _mm256_extractf128_si256(q8_1, 0)));
  6068. const __m128i p16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5l_3, _mm256_extractf128_si256(q8_1, 1)));
  6069. const __m128i s16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5h_0, _mm256_extractf128_si256(q8_0, 0)));
  6070. const __m128i s16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5h_1, _mm256_extractf128_si256(q8_0, 1)));
  6071. const __m128i s16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5h_2, _mm256_extractf128_si256(q8_1, 0)));
  6072. const __m128i s16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5h_3, _mm256_extractf128_si256(q8_1, 1)));
  6073. const __m128i dot_0 = _mm_sub_epi32(_mm_add_epi32(p16_0, p16_2), _mm_add_epi32(s16_0, s16_2));
  6074. const __m128i dot_1 = _mm_sub_epi32(_mm_add_epi32(p16_1, p16_3), _mm_add_epi32(s16_1, s16_3));
  6075. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(dot_1, dot_0))), acc);
  6076. }
  6077. *s = hsum_float_8(acc);
  6078. #elif defined __riscv_v_intrinsic
  6079. float sumf = 0;
  6080. for (int i = 0; i < nb; ++i) {
  6081. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6082. const int8_t * sc = x[i].scales;
  6083. const uint8_t * restrict q5 = x[i].qs;
  6084. const uint8_t * restrict qh = x[i].qh;
  6085. const int8_t * restrict q8 = y[i].qs;
  6086. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  6087. // load qh
  6088. vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(qh, 8);
  6089. vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
  6090. size_t vl = 16;
  6091. // combine both qh_1 and qh_2
  6092. vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
  6093. vuint8mf2_t qh_h0 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
  6094. vuint8mf2_t qh_h1 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), vl), 16, vl);
  6095. vuint8mf2_t qh_h2 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(qh_x, vl), 16, vl);
  6096. vuint8mf2_t qh_h3 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
  6097. vint8mf2_t qh_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h0);
  6098. vint8mf2_t qh_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h1);
  6099. vint8mf2_t qh_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h2);
  6100. vint8mf2_t qh_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h3);
  6101. // load q5
  6102. vuint8mf2_t q5_x1 = __riscv_vle8_v_u8mf2(q5, vl);
  6103. vuint8mf2_t q5_x2 = __riscv_vle8_v_u8mf2(q5+16, vl);
  6104. vint8mf2_t q5s_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x1, 0xF, vl));
  6105. vint8mf2_t q5s_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x2, 0xF, vl));
  6106. vint8mf2_t q5s_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x1, 0x4, vl));
  6107. vint8mf2_t q5s_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x2, 0x4, vl));
  6108. vint8mf2_t q5_0 = __riscv_vsub_vv_i8mf2(q5s_0, qh_0, vl);
  6109. vint8mf2_t q5_1 = __riscv_vsub_vv_i8mf2(q5s_1, qh_1, vl);
  6110. vint8mf2_t q5_2 = __riscv_vsub_vv_i8mf2(q5s_2, qh_2, vl);
  6111. vint8mf2_t q5_3 = __riscv_vsub_vv_i8mf2(q5s_3, qh_3, vl);
  6112. // load Q8 and multiply it with Q5
  6113. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q5_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  6114. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q5_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  6115. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q5_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  6116. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q5_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  6117. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  6118. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  6119. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  6120. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  6121. int32_t sumi1 = sc[0] * __riscv_vmv_x_s_i32m1_i32(vs_0);
  6122. int32_t sumi2 = sc[1] * __riscv_vmv_x_s_i32m1_i32(vs_1);
  6123. int32_t sumi3 = sc[2] * __riscv_vmv_x_s_i32m1_i32(vs_2);
  6124. int32_t sumi4 = sc[3] * __riscv_vmv_x_s_i32m1_i32(vs_3);
  6125. sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
  6126. }
  6127. *s = sumf;
  6128. #else
  6129. int8_t aux8[QK_K];
  6130. int16_t aux16[16];
  6131. float sums [8];
  6132. memset(sums, 0, 8*sizeof(float));
  6133. float sumf = 0;
  6134. for (int i = 0; i < nb; ++i) {
  6135. const uint8_t * restrict q4 = x[i].qs;
  6136. const uint8_t * restrict hm = x[i].qh;
  6137. const int8_t * restrict q8 = y[i].qs;
  6138. int8_t * restrict a = aux8;
  6139. for (int l = 0; l < 32; ++l) {
  6140. a[l+ 0] = q4[l] & 0xF;
  6141. a[l+32] = q4[l] >> 4;
  6142. }
  6143. for (int is = 0; is < 8; ++is) {
  6144. uint8_t m = 1 << is;
  6145. for (int l = 0; l < 8; ++l) a[8*is + l] -= (hm[l] & m ? 0 : 16);
  6146. }
  6147. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6148. const int8_t * restrict sc = x[i].scales;
  6149. for (int j = 0; j < QK_K/16; ++j) {
  6150. const float dl = d * sc[j];
  6151. for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
  6152. for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[8+l]);
  6153. q8 += 16; a += 16;
  6154. }
  6155. }
  6156. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6157. *s = sumf;
  6158. #endif
  6159. }
  6160. #endif
  6161. #if QK_K == 256
  6162. void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6163. assert(n % QK_K == 0);
  6164. assert(nrc == 1);
  6165. UNUSED(nrc);
  6166. UNUSED(bx);
  6167. UNUSED(by);
  6168. UNUSED(bs);
  6169. const block_q6_K * restrict x = vx;
  6170. const block_q8_K * restrict y = vy;
  6171. const int nb = n / QK_K;
  6172. #ifdef __ARM_NEON
  6173. float sum = 0;
  6174. const uint8x16_t m4b = vdupq_n_u8(0xF);
  6175. const int32x4_t vzero = vdupq_n_s32(0);
  6176. //const int8x16_t m32s = vdupq_n_s8(32);
  6177. const uint8x16_t mone = vdupq_n_u8(3);
  6178. ggml_int8x16x4_t q6bytes;
  6179. ggml_uint8x16x4_t q6h;
  6180. for (int i = 0; i < nb; ++i) {
  6181. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  6182. const uint8_t * restrict q6 = x[i].ql;
  6183. const uint8_t * restrict qh = x[i].qh;
  6184. const int8_t * restrict q8 = y[i].qs;
  6185. const int8_t * restrict scale = x[i].scales;
  6186. const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
  6187. const int8x16_t scales = vld1q_s8(scale);
  6188. const ggml_int16x8x2_t q6scales = {{vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}};
  6189. const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])),
  6190. vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))),
  6191. vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])),
  6192. vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1]))));
  6193. int32_t isum_mins = vaddvq_s32(prod);
  6194. int32_t isum = 0;
  6195. for (int j = 0; j < QK_K/128; ++j) {
  6196. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32;
  6197. ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64;
  6198. ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  6199. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
  6200. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
  6201. uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2);
  6202. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6203. shifted = vshrq_n_u8(qhbits.val[1], 2);
  6204. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6205. //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
  6206. //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
  6207. //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s);
  6208. //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s);
  6209. q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0]));
  6210. q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1]));
  6211. q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2]));
  6212. q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3]));
  6213. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  6214. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  6215. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  6216. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  6217. scale += 4;
  6218. q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  6219. shifted = vshrq_n_u8(qhbits.val[0], 4);
  6220. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6221. shifted = vshrq_n_u8(qhbits.val[1], 4);
  6222. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6223. shifted = vshrq_n_u8(qhbits.val[0], 6);
  6224. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6225. shifted = vshrq_n_u8(qhbits.val[1], 6);
  6226. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6227. //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s);
  6228. //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s);
  6229. //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s);
  6230. //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s);
  6231. q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0]));
  6232. q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1]));
  6233. q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2]));
  6234. q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3]));
  6235. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  6236. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  6237. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  6238. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  6239. scale += 4;
  6240. }
  6241. //sum += isum * d_all * y[i].d;
  6242. sum += d_all * y[i].d * (isum - 32 * isum_mins);
  6243. }
  6244. *s = sum;
  6245. #elif defined __AVX2__
  6246. const __m256i m4 = _mm256_set1_epi8(0xF);
  6247. const __m256i m2 = _mm256_set1_epi8(3);
  6248. const __m256i m32s = _mm256_set1_epi8(32);
  6249. __m256 acc = _mm256_setzero_ps();
  6250. for (int i = 0; i < nb; ++i) {
  6251. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6252. const uint8_t * restrict q4 = x[i].ql;
  6253. const uint8_t * restrict qh = x[i].qh;
  6254. const int8_t * restrict q8 = y[i].qs;
  6255. const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  6256. __m256i sumi = _mm256_setzero_si256();
  6257. int is = 0;
  6258. for (int j = 0; j < QK_K/128; ++j) {
  6259. const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0));
  6260. const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1));
  6261. const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2));
  6262. const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3));
  6263. is += 4;
  6264. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  6265. const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  6266. const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32;
  6267. const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4);
  6268. const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4);
  6269. const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4);
  6270. const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4);
  6271. const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
  6272. const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1);
  6273. const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2);
  6274. const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3);
  6275. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6276. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6277. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6278. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6279. __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
  6280. __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
  6281. __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2);
  6282. __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3);
  6283. __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
  6284. __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
  6285. __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2);
  6286. __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3);
  6287. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  6288. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  6289. p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
  6290. p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
  6291. p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
  6292. p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
  6293. p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2);
  6294. p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3);
  6295. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  6296. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3));
  6297. }
  6298. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  6299. }
  6300. *s = hsum_float_8(acc);
  6301. #elif defined __AVX__
  6302. const __m128i m4 = _mm_set1_epi8(0xF);
  6303. const __m128i m3 = _mm_set1_epi8(3);
  6304. const __m128i m32s = _mm_set1_epi8(32);
  6305. const __m128i m2 = _mm_set1_epi8(2);
  6306. __m256 acc = _mm256_setzero_ps();
  6307. for (int i = 0; i < nb; ++i) {
  6308. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6309. const uint8_t * restrict q4 = x[i].ql;
  6310. const uint8_t * restrict qh = x[i].qh;
  6311. const int8_t * restrict q8 = y[i].qs;
  6312. const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  6313. __m128i sumi_0 = _mm_setzero_si128();
  6314. __m128i sumi_1 = _mm_setzero_si128();
  6315. __m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  6316. for (int j = 0; j < QK_K/128; ++j) {
  6317. const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
  6318. const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
  6319. const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4);
  6320. const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4);
  6321. const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4);
  6322. const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4);
  6323. const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4);
  6324. const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4);
  6325. const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4);
  6326. const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4);
  6327. const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  6328. const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  6329. const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  6330. const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  6331. const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0);
  6332. const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1);
  6333. const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2);
  6334. const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3);
  6335. const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4);
  6336. const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5);
  6337. const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6);
  6338. const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7);
  6339. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6340. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6341. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6342. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6343. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6344. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6345. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6346. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6347. __m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0);
  6348. __m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1);
  6349. __m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2);
  6350. __m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3);
  6351. __m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4);
  6352. __m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5);
  6353. __m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6);
  6354. __m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7);
  6355. __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0);
  6356. __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1);
  6357. __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2);
  6358. __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3);
  6359. __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4);
  6360. __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5);
  6361. __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6);
  6362. __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7);
  6363. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  6364. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  6365. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  6366. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  6367. p16_4 = _mm_sub_epi16(p16_4, q8s_4);
  6368. p16_5 = _mm_sub_epi16(p16_5, q8s_5);
  6369. p16_6 = _mm_sub_epi16(p16_6, q8s_6);
  6370. p16_7 = _mm_sub_epi16(p16_7, q8s_7);
  6371. const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
  6372. shuffle = _mm_add_epi8(shuffle, m2);
  6373. const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
  6374. shuffle = _mm_add_epi8(shuffle, m2);
  6375. const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle);
  6376. shuffle = _mm_add_epi8(shuffle, m2);
  6377. const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle);
  6378. shuffle = _mm_add_epi8(shuffle, m2);
  6379. p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
  6380. p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
  6381. p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
  6382. p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
  6383. p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4);
  6384. p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5);
  6385. p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6);
  6386. p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7);
  6387. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  6388. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  6389. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6));
  6390. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7));
  6391. }
  6392. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  6393. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
  6394. }
  6395. *s = hsum_float_8(acc);
  6396. #elif defined __riscv_v_intrinsic
  6397. float sumf = 0;
  6398. for (int i = 0; i < nb; ++i) {
  6399. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6400. const uint8_t * restrict q6 = x[i].ql;
  6401. const uint8_t * restrict qh = x[i].qh;
  6402. const int8_t * restrict q8 = y[i].qs;
  6403. const int8_t * restrict scale = x[i].scales;
  6404. size_t vl;
  6405. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  6406. int sum_t = 0;
  6407. int is = 0;
  6408. for (int j = 0; j < QK_K/128; ++j) {
  6409. vl = 32;
  6410. // load qh
  6411. vuint8m1_t qh_x = __riscv_vle8_v_u8m1(qh, vl);
  6412. // load Q6
  6413. vuint8m1_t q6_0 = __riscv_vle8_v_u8m1(q6, vl);
  6414. vuint8m1_t q6_1 = __riscv_vle8_v_u8m1(q6+32, vl);
  6415. vuint8m1_t q6a_0 = __riscv_vand_vx_u8m1(q6_0, 0x0F, vl);
  6416. vuint8m1_t q6a_1 = __riscv_vand_vx_u8m1(q6_1, 0x0F, vl);
  6417. vuint8m1_t q6s_0 = __riscv_vsrl_vx_u8m1(q6_0, 0x04, vl);
  6418. vuint8m1_t q6s_1 = __riscv_vsrl_vx_u8m1(q6_1, 0x04, vl);
  6419. vuint8m1_t qh_0 = __riscv_vand_vx_u8m1(qh_x, 0x03, vl);
  6420. vuint8m1_t qh_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x2, vl), 0x03 , vl);
  6421. vuint8m1_t qh_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x4, vl), 0x03 , vl);
  6422. vuint8m1_t qh_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x6, vl), 0x03 , vl);
  6423. vuint8m1_t qhi_0 = __riscv_vor_vv_u8m1(q6a_0, __riscv_vsll_vx_u8m1(qh_0, 0x04, vl), vl);
  6424. vuint8m1_t qhi_1 = __riscv_vor_vv_u8m1(q6a_1, __riscv_vsll_vx_u8m1(qh_1, 0x04, vl), vl);
  6425. vuint8m1_t qhi_2 = __riscv_vor_vv_u8m1(q6s_0, __riscv_vsll_vx_u8m1(qh_2, 0x04, vl), vl);
  6426. vuint8m1_t qhi_3 = __riscv_vor_vv_u8m1(q6s_1, __riscv_vsll_vx_u8m1(qh_3, 0x04, vl), vl);
  6427. vint8m1_t a_0 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_0), 32, vl);
  6428. vint8m1_t a_1 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_1), 32, vl);
  6429. vint8m1_t a_2 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_2), 32, vl);
  6430. vint8m1_t a_3 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_3), 32, vl);
  6431. // load Q8 and take product
  6432. vint16m2_t va_q_0 = __riscv_vwmul_vv_i16m2(a_0, __riscv_vle8_v_i8m1(q8, vl), vl);
  6433. vint16m2_t va_q_1 = __riscv_vwmul_vv_i16m2(a_1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  6434. vint16m2_t va_q_2 = __riscv_vwmul_vv_i16m2(a_2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
  6435. vint16m2_t va_q_3 = __riscv_vwmul_vv_i16m2(a_3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
  6436. vl = 16;
  6437. vint32m2_t vaux_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 0), scale[is+0], vl);
  6438. vint32m2_t vaux_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 1), scale[is+1], vl);
  6439. vint32m2_t vaux_2 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 0), scale[is+2], vl);
  6440. vint32m2_t vaux_3 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 1), scale[is+3], vl);
  6441. vint32m2_t vaux_4 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 0), scale[is+4], vl);
  6442. vint32m2_t vaux_5 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 1), scale[is+5], vl);
  6443. vint32m2_t vaux_6 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 0), scale[is+6], vl);
  6444. vint32m2_t vaux_7 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 1), scale[is+7], vl);
  6445. vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_0, vaux_1, vl), vzero, vl);
  6446. vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_2, vaux_3, vl), isum0, vl);
  6447. vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_4, vaux_5, vl), isum1, vl);
  6448. vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_6, vaux_7, vl), isum2, vl);
  6449. sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
  6450. q6 += 64; qh += 32; q8 += 128; is=8;
  6451. }
  6452. sumf += d * sum_t;
  6453. }
  6454. *s = sumf;
  6455. #else
  6456. int8_t aux8[QK_K];
  6457. int16_t aux16[8];
  6458. float sums [8];
  6459. int32_t aux32[8];
  6460. memset(sums, 0, 8*sizeof(float));
  6461. float sumf = 0;
  6462. for (int i = 0; i < nb; ++i) {
  6463. const uint8_t * restrict q4 = x[i].ql;
  6464. const uint8_t * restrict qh = x[i].qh;
  6465. const int8_t * restrict q8 = y[i].qs;
  6466. memset(aux32, 0, 8*sizeof(int32_t));
  6467. int8_t * restrict a = aux8;
  6468. for (int j = 0; j < QK_K; j += 128) {
  6469. for (int l = 0; l < 32; ++l) {
  6470. a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  6471. a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  6472. a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  6473. a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  6474. }
  6475. a += 128;
  6476. q4 += 64;
  6477. qh += 32;
  6478. }
  6479. a = aux8;
  6480. int is = 0;
  6481. for (int j = 0; j < QK_K/16; ++j) {
  6482. int scale = x[i].scales[is++];
  6483. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6484. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6485. q8 += 8; a += 8;
  6486. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6487. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6488. q8 += 8; a += 8;
  6489. }
  6490. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6491. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  6492. }
  6493. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6494. *s = sumf;
  6495. #endif
  6496. }
  6497. #else
  6498. void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6499. assert(n % QK_K == 0);
  6500. assert(nrc == 1);
  6501. UNUSED(nrc);
  6502. UNUSED(bx);
  6503. UNUSED(by);
  6504. UNUSED(bs);
  6505. const block_q6_K * restrict x = vx;
  6506. const block_q8_K * restrict y = vy;
  6507. const int nb = n / QK_K;
  6508. #ifdef __ARM_NEON
  6509. float sum = 0;
  6510. const uint8x16_t m4b = vdupq_n_u8(0xF);
  6511. const int8x16_t m32s = vdupq_n_s8(32);
  6512. const int32x4_t vzero = vdupq_n_s32(0);
  6513. const uint8x16_t mone = vdupq_n_u8(3);
  6514. ggml_int8x16x4_t q6bytes;
  6515. ggml_uint8x16x4_t q6h;
  6516. for (int i = 0; i < nb; ++i) {
  6517. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  6518. const uint8_t * restrict q6 = x[i].ql;
  6519. const uint8_t * restrict qh = x[i].qh;
  6520. const int8_t * restrict q8 = y[i].qs;
  6521. const int8_t * restrict scale = x[i].scales;
  6522. int32_t isum = 0;
  6523. uint8x16_t qhbits = vld1q_u8(qh);
  6524. ggml_uint8x16x2_t q6bits = ggml_vld1q_u8_x2(q6);
  6525. ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  6526. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits), 4);
  6527. uint8x16_t shifted = vshrq_n_u8(qhbits, 2);
  6528. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6529. shifted = vshrq_n_u8(qhbits, 4);
  6530. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6531. shifted = vshrq_n_u8(qhbits, 6);
  6532. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6533. q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
  6534. q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
  6535. q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[2])), m32s);
  6536. q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[3])), m32s);
  6537. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  6538. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  6539. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  6540. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  6541. sum += isum * d_all * y[i].d;
  6542. }
  6543. *s = sum;
  6544. #elif defined __AVX2__
  6545. const __m256i m4 = _mm256_set1_epi8(0xF);
  6546. const __m256i m2 = _mm256_set1_epi8(3);
  6547. const __m256i m32s = _mm256_set1_epi8(32);
  6548. __m256 acc = _mm256_setzero_ps();
  6549. for (int i = 0; i < nb; ++i) {
  6550. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6551. const uint8_t * restrict q4 = x[i].ql;
  6552. const uint8_t * restrict qh = x[i].qh;
  6553. const int8_t * restrict q8 = y[i].qs;
  6554. const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
  6555. const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
  6556. const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
  6557. const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
  6558. __m256i sumi = _mm256_setzero_si256();
  6559. const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
  6560. const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
  6561. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
  6562. const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
  6563. const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4);
  6564. const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4);
  6565. const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
  6566. const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_1);
  6567. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6568. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6569. __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
  6570. __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
  6571. __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
  6572. __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
  6573. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  6574. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  6575. p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
  6576. p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
  6577. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  6578. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  6579. }
  6580. *s = hsum_float_8(acc);
  6581. #elif defined __AVX__
  6582. const __m128i m4 = _mm_set1_epi8(0xF);
  6583. const __m128i m2 = _mm_set1_epi8(3);
  6584. const __m128i m32s = _mm_set1_epi8(32);
  6585. __m256 acc = _mm256_setzero_ps();
  6586. for (int i = 0; i < nb; ++i) {
  6587. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6588. const uint8_t * restrict q4 = x[i].ql;
  6589. const uint8_t * restrict qh = x[i].qh;
  6590. const int8_t * restrict q8 = y[i].qs;
  6591. const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
  6592. const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
  6593. const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
  6594. const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
  6595. __m128i sumi_0 = _mm_setzero_si128();
  6596. __m128i sumi_1 = _mm_setzero_si128();
  6597. const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
  6598. const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
  6599. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
  6600. const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
  6601. const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH, m2), 4);
  6602. const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 2), m2), 4);
  6603. const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 4), m2), 4);
  6604. const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 6), m2), 4);
  6605. const __m128i q4_0 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 0), m4), q4h_0);
  6606. const __m128i q4_1 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 1), m4), q4h_1);
  6607. const __m128i q4_2 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 0), 4), m4), q4h_2);
  6608. const __m128i q4_3 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 1), 4), m4), q4h_3);
  6609. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6610. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6611. __m128i q8s_0 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 0));
  6612. __m128i q8s_1 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 1));
  6613. __m128i q8s_2 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 0));
  6614. __m128i q8s_3 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 1));
  6615. __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
  6616. __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
  6617. __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
  6618. __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
  6619. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  6620. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  6621. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  6622. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  6623. p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
  6624. p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
  6625. p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
  6626. p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
  6627. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  6628. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  6629. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi_1, sumi_0))), acc);
  6630. }
  6631. *s = hsum_float_8(acc);
  6632. #elif defined __riscv_v_intrinsic
  6633. float sumf = 0;
  6634. for (int i = 0; i < nb; ++i) {
  6635. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  6636. const uint8_t * restrict q6 = x[i].ql;
  6637. const uint8_t * restrict qh = x[i].qh;
  6638. const int8_t * restrict q8 = y[i].qs;
  6639. const int8_t * restrict scale = x[i].scales;
  6640. int32_t isum = 0;
  6641. size_t vl = 16;
  6642. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  6643. // load Q6
  6644. vuint8mf2_t q6_0 = __riscv_vle8_v_u8mf2(q6, vl);
  6645. vuint8mf2_t q6_1 = __riscv_vle8_v_u8mf2(q6+16, vl);
  6646. // load qh
  6647. vuint8mf2_t qh_x = __riscv_vle8_v_u8mf2(qh, vl);
  6648. vuint8mf2_t qh0 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  6649. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  6650. vuint8mf2_t qh1 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  6651. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  6652. vuint8mf2_t qh2 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  6653. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  6654. vuint8mf2_t qh3 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  6655. vuint8mf2_t q6h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_0, 0xF, vl), qh0, vl);
  6656. vuint8mf2_t q6h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_1, 0xF, vl), qh1, vl);
  6657. vuint8mf2_t q6h_2 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_0, 0x4, vl), qh2, vl);
  6658. vuint8mf2_t q6h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_1, 0x4, vl), qh3, vl);
  6659. vint8mf2_t q6v_0 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_0), 32, vl);
  6660. vint8mf2_t q6v_1 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_1), 32, vl);
  6661. vint8mf2_t q6v_2 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_2), 32, vl);
  6662. vint8mf2_t q6v_3 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_3), 32, vl);
  6663. // load Q8 and take product
  6664. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q6v_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  6665. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q6v_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  6666. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q6v_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  6667. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q6v_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  6668. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  6669. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  6670. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  6671. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  6672. isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scale[0];
  6673. isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scale[1];
  6674. isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scale[2];
  6675. isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scale[3];
  6676. sumf += isum * d_all * y[i].d;
  6677. }
  6678. *s = sumf;
  6679. #else
  6680. int8_t aux8[QK_K];
  6681. int16_t aux16[8];
  6682. float sums [8];
  6683. int32_t aux32[8];
  6684. memset(sums, 0, 8*sizeof(float));
  6685. float sumf = 0;
  6686. for (int i = 0; i < nb; ++i) {
  6687. const uint8_t * restrict q4 = x[i].ql;
  6688. const uint8_t * restrict qh = x[i].qh;
  6689. const int8_t * restrict q8 = y[i].qs;
  6690. memset(aux32, 0, 8*sizeof(int32_t));
  6691. int8_t * restrict a = aux8;
  6692. for (int l = 0; l < 16; ++l) {
  6693. a[l+ 0] = (int8_t)((q4[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  6694. a[l+16] = (int8_t)((q4[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  6695. a[l+32] = (int8_t)((q4[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  6696. a[l+48] = (int8_t)((q4[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  6697. }
  6698. int is = 0;
  6699. for (int j = 0; j < QK_K/16; ++j) {
  6700. int scale = x[i].scales[is++];
  6701. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6702. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6703. q8 += 8; a += 8;
  6704. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6705. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6706. q8 += 8; a += 8;
  6707. }
  6708. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6709. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  6710. }
  6711. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6712. *s = sumf;
  6713. #endif
  6714. }
  6715. #endif
  6716. #if defined (__AVX2__) || defined (__ARM_NEON)
  6717. static const int8_t keven_signs_q2xs[1024] = {
  6718. 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1,
  6719. 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1,
  6720. 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1,
  6721. 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1,
  6722. 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1,
  6723. 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1,
  6724. 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1,
  6725. 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1,
  6726. 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1,
  6727. 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1,
  6728. 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1,
  6729. 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1,
  6730. 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1,
  6731. 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1,
  6732. 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1,
  6733. 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1,
  6734. 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1,
  6735. 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1,
  6736. 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1,
  6737. 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1,
  6738. 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1,
  6739. 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1,
  6740. 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1,
  6741. 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1,
  6742. 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1,
  6743. 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
  6744. 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,
  6745. 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1,
  6746. 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1,
  6747. 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1,
  6748. 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,
  6749. 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1,
  6750. };
  6751. #endif
  6752. void ggml_vec_dot_iq2_xxs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6753. assert(n % QK_K == 0);
  6754. assert(nrc == 1);
  6755. UNUSED(nrc);
  6756. UNUSED(bx);
  6757. UNUSED(by);
  6758. UNUSED(bs);
  6759. const block_iq2_xxs * restrict x = vx;
  6760. const block_q8_K * restrict y = vy;
  6761. const int nb = n / QK_K;
  6762. #if defined(__ARM_NEON)
  6763. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  6764. uint32_t aux32[4];
  6765. const uint8_t * aux8 = (const uint8_t *)aux32;
  6766. ggml_int8x16x4_t q2u;
  6767. ggml_int8x16x4_t q2s;
  6768. ggml_int8x16x4_t q8b;
  6769. float sumf = 0;
  6770. for (int i = 0; i < nb; ++i) {
  6771. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6772. const uint16_t * restrict q2 = x[i].qs;
  6773. const int8_t * restrict q8 = y[i].qs;
  6774. float sumf1 = 0, sumf2 = 0;
  6775. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  6776. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  6777. memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8;
  6778. q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 0])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 1])));
  6779. q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 2])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 3])));
  6780. q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 8])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 9])));
  6781. q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[10])), vld1_s8((const void *)(iq2xxs_grid + aux8[11])));
  6782. q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127))));
  6783. q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127))));
  6784. q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 7) & 127))));
  6785. q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 21) & 127))));
  6786. q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]);
  6787. q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]);
  6788. q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]);
  6789. q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]);
  6790. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]);
  6791. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]);
  6792. sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28));
  6793. sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28));
  6794. }
  6795. sumf += d*(sumf1 + sumf2);
  6796. }
  6797. *s = 0.25f * sumf;
  6798. #elif defined(__AVX2__)
  6799. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  6800. uint32_t aux32[4];
  6801. const uint8_t * aux8 = (const uint8_t *)aux32;
  6802. __m256 accumf = _mm256_setzero_ps();
  6803. for (int i = 0; i < nb; ++i) {
  6804. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6805. const uint16_t * restrict q2 = x[i].qs;
  6806. const int8_t * restrict q8 = y[i].qs;
  6807. __m256i sumi1 = _mm256_setzero_si256();
  6808. __m256i sumi2 = _mm256_setzero_si256();
  6809. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  6810. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  6811. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  6812. memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8;
  6813. const __m256i q2_1 = _mm256_set_epi64x(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]);
  6814. const __m256i q2_2 = _mm256_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]);
  6815. const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127],
  6816. signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]);
  6817. const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127],
  6818. signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]);
  6819. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1);
  6820. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2);
  6821. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  6822. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  6823. const uint16_t ls1 = aux32[1] >> 28;
  6824. const uint16_t ls2 = aux32[3] >> 28;
  6825. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
  6826. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
  6827. sumi1 = _mm256_add_epi32(sumi1, p1);
  6828. sumi2 = _mm256_add_epi32(sumi2, p2);
  6829. }
  6830. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  6831. }
  6832. *s = 0.125f * hsum_float_8(accumf);
  6833. #else
  6834. uint32_t aux32[2];
  6835. const uint8_t * aux8 = (const uint8_t *)aux32;
  6836. float sumf = 0.f;
  6837. for (int i = 0; i < nb; ++i) {
  6838. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6839. const uint16_t * restrict q2 = x[i].qs;
  6840. const int8_t * restrict q8 = y[i].qs;
  6841. int32_t bsum = 0;
  6842. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  6843. memcpy(aux32, q2, 2*sizeof(uint32_t));
  6844. q2 += 4;
  6845. const uint32_t ls = 2*(aux32[1] >> 28) + 1;
  6846. int32_t sumi = 0;
  6847. for (int l = 0; l < 4; ++l) {
  6848. const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
  6849. const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
  6850. for (int j = 0; j < 8; ++j) {
  6851. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  6852. }
  6853. q8 += 8;
  6854. }
  6855. bsum += sumi * ls;
  6856. }
  6857. sumf += d * bsum;
  6858. }
  6859. *s = 0.125f * sumf;
  6860. #endif
  6861. }
  6862. void ggml_vec_dot_iq2_xs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6863. assert(n % QK_K == 0);
  6864. assert(nrc == 1);
  6865. UNUSED(nrc);
  6866. UNUSED(bx);
  6867. UNUSED(by);
  6868. UNUSED(bs);
  6869. const block_iq2_xs * restrict x = vx;
  6870. const block_q8_K * restrict y = vy;
  6871. const int nb = n / QK_K;
  6872. #if defined(__ARM_NEON)
  6873. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  6874. ggml_int8x16x4_t q2u;
  6875. ggml_int8x16x4_t q2s;
  6876. ggml_int8x16x4_t q8b;
  6877. int32x4x4_t scales32;
  6878. float sumf = 0;
  6879. for (int i = 0; i < nb; ++i) {
  6880. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6881. const uint16_t * restrict q2 = x[i].qs;
  6882. const int8_t * restrict q8 = y[i].qs;
  6883. const uint8x8_t scales8 = vld1_u8(x[i].scales);
  6884. const uint8x8_t scales_l = vand_u8(scales8, vdup_n_u8(0xf));
  6885. const uint8x8_t scales_h = vshr_n_u8(scales8, 4);
  6886. uint8x16_t scales = vcombine_u8(vzip1_u8(scales_l, scales_h), vzip2_u8(scales_l, scales_h));
  6887. scales = vaddq_u8(vshlq_n_u8(scales, 1), vdupq_n_u8(1));
  6888. const uint16x8_t scales1 = vmovl_u8(vget_low_u8(scales));
  6889. const uint16x8_t scales2 = vmovl_u8(vget_high_u8(scales));
  6890. scales32.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales1)));
  6891. scales32.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales1)));
  6892. scales32.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales2)));
  6893. scales32.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales2)));
  6894. int32x4_t sumi = vdupq_n_s32(0);
  6895. for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
  6896. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  6897. q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[0] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[1] & 511))));
  6898. q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[2] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[3] & 511))));
  6899. q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[4] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[5] & 511))));
  6900. q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[6] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[7] & 511))));
  6901. q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[0] >> 9))), vld1_s8((const void *)(signs64 + (q2[1] >> 9))));
  6902. q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[2] >> 9))), vld1_s8((const void *)(signs64 + (q2[3] >> 9))));
  6903. q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[4] >> 9))), vld1_s8((const void *)(signs64 + (q2[5] >> 9))));
  6904. q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[6] >> 9))), vld1_s8((const void *)(signs64 + (q2[7] >> 9))));
  6905. q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]);
  6906. q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]);
  6907. q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]);
  6908. q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]);
  6909. const int32x4_t p1 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]);
  6910. const int32x4_t p2 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[1], q8b.val[1]);
  6911. const int32x4_t p3 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]);
  6912. const int32x4_t p4 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[3], q8b.val[3]);
  6913. const int32x4_t p = vpaddq_s32(vpaddq_s32(p1, p2), vpaddq_s32(p3, p4));
  6914. sumi = vmlaq_s32(sumi, p, scales32.val[ib64]);
  6915. q2 += 8;
  6916. }
  6917. sumf += d*vaddvq_s32(sumi);
  6918. }
  6919. *s = 0.125f * sumf;
  6920. #elif defined(__AVX2__)
  6921. const __m256i mone = _mm256_set1_epi8(1);
  6922. static const char block_sign_shuffle_mask_1[32] = {
  6923. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
  6924. 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
  6925. };
  6926. static const char block_sign_shuffle_mask_2[32] = {
  6927. 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
  6928. 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
  6929. };
  6930. static const uint8_t bit_selector_mask_bytes[32] = {
  6931. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  6932. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  6933. };
  6934. const __m256i bit_selector_mask = _mm256_loadu_si256((const __m256i*)bit_selector_mask_bytes);
  6935. const __m256i block_sign_shuffle_1 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_1);
  6936. const __m256i block_sign_shuffle_2 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_2);
  6937. #if QK_K == 64
  6938. static const uint8_t k_bit_helper[16] = {
  6939. 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
  6940. };
  6941. const __m128i bit_helper = _mm_loadu_si128((const __m128i*)k_bit_helper);
  6942. const __m128i m511 = _mm_set1_epi16(511);
  6943. typedef union {
  6944. __m128i vec_index;
  6945. uint16_t index[8];
  6946. } index_t;
  6947. index_t idx;
  6948. __m256 accumf = _mm256_setzero_ps();
  6949. for (int i = 0; i < nb; ++i) {
  6950. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6951. const __m128i q2_data = _mm_loadu_si128((const __m128i*)x[i].qs);
  6952. idx.vec_index = _mm_and_si128(q2_data, m511);
  6953. const __m128i partial_sign_bits = _mm_srli_epi16(q2_data, 9);
  6954. const __m128i partial_sign_bits_upper = _mm_srli_epi16(q2_data, 13);
  6955. const __m128i partial_sign_bits_for_counting = _mm_xor_si128(partial_sign_bits, partial_sign_bits_upper);
  6956. const __m128i odd_bits = _mm_shuffle_epi8(bit_helper, partial_sign_bits_for_counting);
  6957. const __m128i full_sign_bits = _mm_or_si128(partial_sign_bits, odd_bits);
  6958. const __m256i full_signs = MM256_SET_M128I(full_sign_bits, full_sign_bits);
  6959. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)y[i].qs);
  6960. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)(y[i].qs+32));
  6961. const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[idx.index[3]], iq2xs_grid[idx.index[2]],
  6962. iq2xs_grid[idx.index[1]], iq2xs_grid[idx.index[0]]);
  6963. const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[idx.index[7]], iq2xs_grid[idx.index[6]],
  6964. iq2xs_grid[idx.index[5]], iq2xs_grid[idx.index[4]]);
  6965. __m256i signs;
  6966. signs = _mm256_shuffle_epi8(full_signs, block_sign_shuffle_1);
  6967. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  6968. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone));
  6969. signs = _mm256_shuffle_epi8(full_signs, block_sign_shuffle_2);
  6970. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  6971. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone));
  6972. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  6973. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  6974. const __m256i sc1 = MM256_SET_M128I(_mm_set1_epi16(2*(x[i].scales[0] >> 4)+1), _mm_set1_epi16(2*(x[i].scales[0] & 0xf)+1));
  6975. const __m256i sc2 = MM256_SET_M128I(_mm_set1_epi16(2*(x[i].scales[1] >> 4)+1), _mm_set1_epi16(2*(x[i].scales[1] & 0xf)+1));
  6976. const __m256i sum = _mm256_add_epi32(_mm256_madd_epi16(sc1, dot1), _mm256_madd_epi16(sc2, dot2));
  6977. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sum), accumf);
  6978. }
  6979. *s = 0.125f * hsum_float_8(accumf);
  6980. #else
  6981. static const uint8_t k_bit_helper[32] = {
  6982. 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
  6983. 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
  6984. };
  6985. const __m256i bit_helper = _mm256_loadu_si256((const __m256i*)k_bit_helper);
  6986. const __m256i m511 = _mm256_set1_epi16(511);
  6987. const __m128i m4 = _mm_set1_epi8(0xf);
  6988. const __m128i m1 = _mm_set1_epi8(1);
  6989. uint64_t aux64;
  6990. // somewhat hacky, but gives a significant boost in performance
  6991. __m256i aux_gindex;
  6992. const uint16_t * gindex = (const uint16_t *)&aux_gindex;
  6993. __m256 accumf = _mm256_setzero_ps();
  6994. for (int i = 0; i < nb; ++i) {
  6995. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6996. const uint16_t * restrict q2 = x[i].qs;
  6997. const int8_t * restrict q8 = y[i].qs;
  6998. memcpy(&aux64, x[i].scales, 8);
  6999. __m128i stmp = _mm_set1_epi64x(aux64);
  7000. stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4));
  7001. const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1);
  7002. __m256i sumi1 = _mm256_setzero_si256();
  7003. __m256i sumi2 = _mm256_setzero_si256();
  7004. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) {
  7005. const __m256i q2_data = _mm256_loadu_si256((const __m256i*)q2); q2 += 16;
  7006. aux_gindex = _mm256_and_si256(q2_data, m511);
  7007. const __m256i partial_sign_bits = _mm256_srli_epi16(q2_data, 9);
  7008. const __m256i partial_sign_bits_upper = _mm256_srli_epi16(q2_data, 13);
  7009. const __m256i partial_sign_bits_for_counting = _mm256_xor_si256(partial_sign_bits, partial_sign_bits_upper);
  7010. const __m256i odd_bits = _mm256_shuffle_epi8(bit_helper, partial_sign_bits_for_counting);
  7011. const __m256i full_sign_bits = _mm256_or_si256(partial_sign_bits, odd_bits);
  7012. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7013. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7014. const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7015. const __m256i q8_4 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7016. const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]],
  7017. iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]);
  7018. const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]],
  7019. iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]);
  7020. const __m256i q2_3 = _mm256_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]],
  7021. iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]);
  7022. const __m256i q2_4 = _mm256_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]],
  7023. iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]);
  7024. const __m128i full_signs_l = _mm256_castsi256_si128(full_sign_bits);
  7025. const __m128i full_signs_h = _mm256_extractf128_si256(full_sign_bits, 1);
  7026. const __m256i full_signs_1 = MM256_SET_M128I(full_signs_l, full_signs_l);
  7027. const __m256i full_signs_2 = MM256_SET_M128I(full_signs_h, full_signs_h);
  7028. __m256i signs;
  7029. signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_1);
  7030. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7031. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone));
  7032. signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_2);
  7033. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7034. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone));
  7035. signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_1);
  7036. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7037. const __m256i q8s_3 = _mm256_sign_epi8(q8_3, _mm256_or_si256(signs, mone));
  7038. signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_2);
  7039. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7040. const __m256i q8s_4 = _mm256_sign_epi8(q8_4, _mm256_or_si256(signs, mone));
  7041. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  7042. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  7043. const __m256i dot3 = _mm256_maddubs_epi16(q2_3, q8s_3);
  7044. const __m256i dot4 = _mm256_maddubs_epi16(q2_4, q8s_4);
  7045. const __m256i sc1 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0)));
  7046. const __m256i sc2 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1)));
  7047. const __m256i sc3 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2)));
  7048. const __m256i sc4 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3)));
  7049. sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot1, sc1));
  7050. sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot2, sc2));
  7051. sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot3, sc3));
  7052. sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot4, sc4));
  7053. }
  7054. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  7055. }
  7056. *s = 0.125f * hsum_float_8(accumf);
  7057. #endif
  7058. #else
  7059. float sumf = 0.f;
  7060. for (int i = 0; i < nb; ++i) {
  7061. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7062. const uint16_t * restrict q2 = x[i].qs;
  7063. const uint8_t * restrict sc = x[i].scales;
  7064. const int8_t * restrict q8 = y[i].qs;
  7065. int32_t bsum = 0;
  7066. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  7067. const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1;
  7068. const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1;
  7069. int32_t sumi = 0;
  7070. for (int l = 0; l < 2; ++l) {
  7071. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
  7072. const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
  7073. for (int j = 0; j < 8; ++j) {
  7074. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  7075. }
  7076. q8 += 8;
  7077. }
  7078. bsum += sumi * ls1;
  7079. sumi = 0;
  7080. for (int l = 2; l < 4; ++l) {
  7081. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
  7082. const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
  7083. for (int j = 0; j < 8; ++j) {
  7084. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  7085. }
  7086. q8 += 8;
  7087. }
  7088. bsum += sumi * ls2;
  7089. q2 += 4;
  7090. }
  7091. sumf += d * bsum;
  7092. }
  7093. *s = 0.125f * sumf;
  7094. #endif
  7095. }
  7096. void ggml_vec_dot_iq2_s_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7097. assert(n % QK_K == 0);
  7098. assert(nrc == 1);
  7099. UNUSED(nrc);
  7100. UNUSED(bx);
  7101. UNUSED(by);
  7102. UNUSED(bs);
  7103. const block_iq2_s * restrict x = vx;
  7104. const block_q8_K * restrict y = vy;
  7105. const int nb = n / QK_K;
  7106. #if defined(__ARM_NEON)
  7107. static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
  7108. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
  7109. };
  7110. static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,};
  7111. const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1);
  7112. const uint8x16_t mask2 = vld1q_u8(k_mask2);
  7113. const uint8x16_t m1 = vdupq_n_u8(1);
  7114. const int32x4_t vzero = vdupq_n_s32(0);
  7115. uint8x16x2_t vs;
  7116. ggml_int8x16x4_t q2s;
  7117. ggml_int8x16x4_t q8b;
  7118. float sumf = 0;
  7119. for (int i = 0; i < nb; ++i) {
  7120. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7121. const uint8_t * restrict qs = x[i].qs;
  7122. const uint8_t * restrict qh = x[i].qh;
  7123. const uint16_t * restrict signs = (const uint16_t *)(x[i].qs + QK_K/8);
  7124. const int8_t * restrict q8 = y[i].qs;
  7125. int sumi1 = 0, sumi2 = 0;
  7126. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7127. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  7128. q2s.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[0] | ((qh[ib32+0] << 8) & 0x300)))),
  7129. vld1_s8((const int8_t *)(iq2s_grid + (qs[1] | ((qh[ib32+0] << 6) & 0x300)))));
  7130. q2s.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[2] | ((qh[ib32+0] << 4) & 0x300)))),
  7131. vld1_s8((const int8_t *)(iq2s_grid + (qs[3] | ((qh[ib32+0] << 2) & 0x300)))));
  7132. q2s.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[4] | ((qh[ib32+1] << 8) & 0x300)))),
  7133. vld1_s8((const int8_t *)(iq2s_grid + (qs[5] | ((qh[ib32+1] << 6) & 0x300)))));
  7134. q2s.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[6] | ((qh[ib32+1] << 4) & 0x300)))),
  7135. vld1_s8((const int8_t *)(iq2s_grid + (qs[7] | ((qh[ib32+1] << 2) & 0x300)))));
  7136. qs += 8;
  7137. vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16)));
  7138. vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
  7139. vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
  7140. vs.val[0] = vceqq_u8(vs.val[0], mask2);
  7141. vs.val[1] = vceqq_u8(vs.val[1], mask2);
  7142. q2s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[0]);
  7143. q2s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[1]);
  7144. vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16)));
  7145. vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
  7146. vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
  7147. vs.val[0] = vceqq_u8(vs.val[0], mask2);
  7148. vs.val[1] = vceqq_u8(vs.val[1], mask2);
  7149. signs += 4;
  7150. q2s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[2]);
  7151. q2s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[3]);
  7152. const int32x4_t p1 = ggml_vdotq_s32(vzero, q2s.val[0], q8b.val[0]);
  7153. const int32x4_t p2 = ggml_vdotq_s32(vzero, q2s.val[1], q8b.val[1]);
  7154. const int32x4_t p3 = ggml_vdotq_s32(vzero, q2s.val[2], q8b.val[2]);
  7155. const int32x4_t p4 = ggml_vdotq_s32(vzero, q2s.val[3], q8b.val[3]);
  7156. sumi1 += vaddvq_s32(p1) * (1 + 2*(x[i].scales[ib32+0] & 0xf));
  7157. sumi2 += vaddvq_s32(p2) * (1 + 2*(x[i].scales[ib32+0] >> 4));
  7158. sumi1 += vaddvq_s32(p3) * (1 + 2*(x[i].scales[ib32+1] & 0xf));
  7159. sumi2 += vaddvq_s32(p4) * (1 + 2*(x[i].scales[ib32+1] >> 4));
  7160. }
  7161. sumf += d*(sumi1 + sumi2);
  7162. }
  7163. *s = 0.125f * sumf;
  7164. #elif defined(__AVX2__)
  7165. static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
  7166. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
  7167. };
  7168. static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7169. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7170. };
  7171. const __m128i m4 = _mm_set1_epi8(0xf);
  7172. const __m128i m1 = _mm_set1_epi8(1);
  7173. const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1);
  7174. const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2);
  7175. uint64_t aux64;
  7176. __m256 accumf = _mm256_setzero_ps();
  7177. for (int i = 0; i < nb; ++i) {
  7178. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7179. const uint8_t * restrict qs = x[i].qs;
  7180. const uint8_t * restrict qh = x[i].qh;
  7181. const uint16_t * restrict signs = (const uint16_t *)(x[i].qs + QK_K/8);
  7182. const int8_t * restrict q8 = y[i].qs;
  7183. memcpy(&aux64, x[i].scales, 8);
  7184. const __m128i scales8 = _mm_add_epi8(_mm_slli_epi16(_mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), m4), 1), m1);
  7185. const __m256i scales16 = _mm256_cvtepi8_epi16(scales8); // 0 2 4 6 8 10 12 14 1 3 5 7 9 11 13 15
  7186. __m256i sumi1 = _mm256_setzero_si256();
  7187. __m256i sumi2 = _mm256_setzero_si256();
  7188. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7189. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7190. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7191. const __m256i q2_1 = _mm256_set_epi64x(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)],
  7192. iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)],
  7193. iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)],
  7194. iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]);
  7195. const __m256i q2_2 = _mm256_set_epi64x(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)],
  7196. iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)],
  7197. iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)],
  7198. iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]);
  7199. qs += 8;
  7200. __m256i aux256 = _mm256_set1_epi32(signs[0] | ((uint32_t) signs[1] << 16));
  7201. aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2);
  7202. const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2);
  7203. const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1);
  7204. aux256 = _mm256_set1_epi32(signs[2] | ((uint32_t) signs[3] << 16));
  7205. aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2);
  7206. const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2);
  7207. const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2);
  7208. signs += 4;
  7209. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); // blocks 2*ib32+0, 2*ib32+1
  7210. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); // blocks 2*ib32+2, 2*ib32+3
  7211. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+0)));
  7212. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+1)));
  7213. sumi1 = _mm256_add_epi32(sumi1, p1);
  7214. sumi2 = _mm256_add_epi32(sumi2, p2);
  7215. }
  7216. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  7217. }
  7218. *s = 0.125f * hsum_float_8(accumf);
  7219. #else
  7220. float sumf = 0;
  7221. for (int i = 0; i < nb; i++) {
  7222. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7223. const int8_t * q8 = y[i].qs;
  7224. const uint8_t * qs = x[i].qs;
  7225. const uint8_t * qh = x[i].qh;
  7226. const uint8_t * signs = qs + QK_K/8;
  7227. int bsum = 0;
  7228. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  7229. int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf);
  7230. int ls2 = 1 + 2*(x[i].scales[ib32] >> 4);
  7231. int sumi1 = 0, sumi2 = 0;
  7232. for (int l = 0; l < 2; ++l) {
  7233. const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
  7234. for (int j = 0; j < 8; ++j) {
  7235. sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
  7236. }
  7237. q8 += 8;
  7238. }
  7239. for (int l = 2; l < 4; ++l) {
  7240. const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
  7241. for (int j = 0; j < 8; ++j) {
  7242. sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
  7243. }
  7244. q8 += 8;
  7245. }
  7246. bsum += ls1 * sumi1 + ls2 * sumi2;
  7247. qs += 4;
  7248. signs += 4;
  7249. }
  7250. sumf += d * bsum;
  7251. }
  7252. *s = 0.125f * sumf;
  7253. #endif
  7254. }
  7255. void ggml_vec_dot_iq3_xxs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7256. assert(n % QK_K == 0);
  7257. assert(nrc == 1);
  7258. UNUSED(nrc);
  7259. UNUSED(bx);
  7260. UNUSED(by);
  7261. UNUSED(bs);
  7262. const block_iq3_xxs * restrict x = vx;
  7263. const block_q8_K * restrict y = vy;
  7264. const int nb = n / QK_K;
  7265. #if defined(__ARM_NEON)
  7266. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  7267. uint32_t aux32[2];
  7268. ggml_int8x16x4_t q3s;
  7269. ggml_int8x16x4_t q8b;
  7270. float sumf = 0;
  7271. for (int i = 0; i < nb; ++i) {
  7272. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7273. const uint8_t * restrict q3 = x[i].qs;
  7274. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  7275. const int8_t * restrict q8 = y[i].qs;
  7276. float sumf1 = 0, sumf2 = 0;
  7277. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7278. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  7279. memcpy(aux32, gas, 2*sizeof(uint32_t)); gas += 2*sizeof(uint32_t);
  7280. const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]);
  7281. const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]);
  7282. const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]);
  7283. const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]);
  7284. q3 += 16;
  7285. q3s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 7) & 127))));
  7286. q3s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 21) & 127))));
  7287. q3s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127))));
  7288. q3s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127))));
  7289. q3s.val[0] = vmulq_s8(q3s.val[0], vreinterpretq_s8_u32(aux32x4_0));
  7290. q3s.val[1] = vmulq_s8(q3s.val[1], vreinterpretq_s8_u32(aux32x4_1));
  7291. q3s.val[2] = vmulq_s8(q3s.val[2], vreinterpretq_s8_u32(aux32x4_2));
  7292. q3s.val[3] = vmulq_s8(q3s.val[3], vreinterpretq_s8_u32(aux32x4_3));
  7293. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]);
  7294. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]);
  7295. sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[0] >> 28));
  7296. sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[1] >> 28));
  7297. }
  7298. sumf += d*(sumf1 + sumf2);
  7299. }
  7300. *s = 0.5f * sumf;
  7301. #elif defined(__AVX2__)
  7302. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  7303. uint32_t aux32[2];
  7304. __m256 accumf = _mm256_setzero_ps();
  7305. for (int i = 0; i < nb; ++i) {
  7306. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7307. const uint8_t * restrict q3 = x[i].qs;
  7308. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  7309. const int8_t * restrict q8 = y[i].qs;
  7310. __m256i sumi1 = _mm256_setzero_si256();
  7311. __m256i sumi2 = _mm256_setzero_si256();
  7312. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7313. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7314. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7315. const __m256i q2_1 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]],
  7316. iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]);
  7317. q3 += 8;
  7318. const __m256i q2_2 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]],
  7319. iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]);
  7320. q3 += 8;
  7321. memcpy(aux32, gas, 8); gas += 8;
  7322. const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127],
  7323. signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]);
  7324. const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127],
  7325. signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]);
  7326. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1);
  7327. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2);
  7328. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  7329. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  7330. const uint16_t ls1 = aux32[0] >> 28;
  7331. const uint16_t ls2 = aux32[1] >> 28;
  7332. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
  7333. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
  7334. sumi1 = _mm256_add_epi32(sumi1, p1);
  7335. sumi2 = _mm256_add_epi32(sumi2, p2);
  7336. }
  7337. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  7338. }
  7339. *s = 0.25f * hsum_float_8(accumf);
  7340. #else
  7341. uint32_t aux32;
  7342. float sumf = 0.f;
  7343. for (int i = 0; i < nb; ++i) {
  7344. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7345. const uint8_t * restrict q3 = x[i].qs;
  7346. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  7347. const int8_t * restrict q8 = y[i].qs;
  7348. int32_t bsum = 0;
  7349. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  7350. memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t);
  7351. const uint32_t ls = 2*(aux32 >> 28) + 1;
  7352. int32_t sumi = 0;
  7353. for (int l = 0; l < 4; ++l) {
  7354. const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]);
  7355. const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]);
  7356. const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
  7357. for (int j = 0; j < 4; ++j) {
  7358. sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1);
  7359. sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1);
  7360. }
  7361. q8 += 8;
  7362. }
  7363. q3 += 8;
  7364. bsum += sumi * ls;
  7365. }
  7366. sumf += d * bsum;
  7367. }
  7368. *s = 0.25f * sumf;
  7369. #endif
  7370. }
  7371. void ggml_vec_dot_iq3_s_q8_K (int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7372. assert(n % QK_K == 0);
  7373. assert(nrc == 1);
  7374. UNUSED(nrc);
  7375. UNUSED(bx);
  7376. UNUSED(by);
  7377. UNUSED(bs);
  7378. const block_iq3_s * restrict x = vx;
  7379. const block_q8_K * restrict y = vy;
  7380. const int nb = n / QK_K;
  7381. #if defined(__ARM_NEON)
  7382. typedef union {
  7383. uint16x8_t vec_index;
  7384. uint16_t index[8];
  7385. } vec_index_t;
  7386. static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
  7387. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
  7388. };
  7389. static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,};
  7390. static const int16_t k_shift[8] = {8, 7, 6, 5, 4, 3, 2, 1};
  7391. const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1);
  7392. const uint8x16_t mask2 = vld1q_u8(k_mask2);
  7393. const int16x8_t hshift = vld1q_s16(k_shift);
  7394. const uint16x8_t m256 = vdupq_n_u16(256);
  7395. const uint8x16_t m1 = vdupq_n_u8(1);
  7396. uint8x16x2_t vs;
  7397. ggml_int8x16x4_t q3s;
  7398. ggml_int8x16x4_t q8b;
  7399. vec_index_t idx;
  7400. #if QK_K == 256
  7401. uint32_t scales32[2];
  7402. const uint8_t * scales8 = (const uint8_t *)scales32;
  7403. #endif
  7404. float sumf = 0;
  7405. for (int i = 0; i < nb; ++i) {
  7406. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7407. const uint8_t * restrict qs = x[i].qs;
  7408. const uint8_t * restrict qh = x[i].qh;
  7409. const uint16_t * restrict signs = (const uint16_t *)x[i].signs;
  7410. const int8_t * restrict q8 = y[i].qs;
  7411. #if QK_K == 256
  7412. memcpy(scales32, x[i].scales, 4);
  7413. scales32[1] = (((scales32[0] >> 4) & 0x0f0f0f0f) << 1) | 0x01010101;
  7414. scales32[0] = ((scales32[0] & 0x0f0f0f0f) << 1) | 0x01010101;
  7415. #endif
  7416. int sumi1 = 0, sumi2 = 0;
  7417. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7418. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  7419. const uint8x16_t idx_l = vld1q_u8(qs); qs += 16;
  7420. idx.vec_index = vorrq_u16(vmovl_u8(vget_low_u8 (idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+0]), hshift), m256));
  7421. const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]],
  7422. iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]);
  7423. const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]],
  7424. iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]);
  7425. idx.vec_index = vorrq_u16(vmovl_u8(vget_high_u8(idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+1]), hshift), m256));
  7426. const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]],
  7427. iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]);
  7428. const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]],
  7429. iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]);
  7430. vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16)));
  7431. vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
  7432. vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
  7433. vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1);
  7434. vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1);
  7435. q3s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_0));
  7436. q3s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_1));
  7437. vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16)));
  7438. vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
  7439. vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
  7440. vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1);
  7441. vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1);
  7442. signs += 4;
  7443. q3s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_2));
  7444. q3s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_3));
  7445. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]);
  7446. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]);
  7447. #if QK_K == 256
  7448. sumi1 += vaddvq_s32(p1) * scales8[ib32/2+0];
  7449. sumi2 += vaddvq_s32(p2) * scales8[ib32/2+4];
  7450. #else
  7451. sumi1 += vaddvq_s32(p1) * (1 + 2*(x[i].scales[ib32/2] & 0xf));
  7452. sumi2 += vaddvq_s32(p2) * (1 + 2*(x[i].scales[ib32/2] >> 4));
  7453. #endif
  7454. }
  7455. sumf += d*(sumi1 + sumi2);
  7456. }
  7457. *s = sumf;
  7458. #elif defined(__AVX2__)
  7459. static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
  7460. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
  7461. };
  7462. static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7463. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7464. };
  7465. const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1);
  7466. const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2);
  7467. const __m256i idx_shift = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8);
  7468. const __m256i idx_mask = _mm256_set1_epi32(256);
  7469. typedef union {
  7470. __m256i vec[2];
  7471. uint32_t index[16];
  7472. } index_t;
  7473. index_t idx;
  7474. __m256 accumf = _mm256_setzero_ps();
  7475. for (int i = 0; i < nb; ++i) {
  7476. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7477. const uint8_t * restrict qs = x[i].qs;
  7478. const uint8_t * restrict qh = x[i].qh;
  7479. const uint16_t * restrict signs = (const uint16_t *)x[i].signs;
  7480. const int8_t * restrict q8 = y[i].qs;
  7481. __m256i sumi1 = _mm256_setzero_si256();
  7482. __m256i sumi2 = _mm256_setzero_si256();
  7483. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7484. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7485. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7486. const __m256i idx_l = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i *)qs)); qs += 16;
  7487. idx.vec[0] = _mm256_set1_epi32(qh[ib32+0]);
  7488. idx.vec[1] = _mm256_set1_epi32(qh[ib32+1]);
  7489. idx.vec[0] = _mm256_and_si256(_mm256_sllv_epi32(idx.vec[0], idx_shift), idx_mask);
  7490. idx.vec[1] = _mm256_and_si256(_mm256_sllv_epi32(idx.vec[1], idx_shift), idx_mask);
  7491. idx.vec[0] = _mm256_or_si256(idx.vec[0], _mm256_cvtepi16_epi32(_mm256_castsi256_si128(idx_l)));
  7492. idx.vec[1] = _mm256_or_si256(idx.vec[1], _mm256_cvtepi16_epi32(_mm256_extractf128_si256(idx_l, 1)));
  7493. // At leat on my CPU (Ryzen 7950X), using _mm256_i32gather_epi32 is slower than _mm256_set_epi32. Strange.
  7494. //const __m256i q2_1 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[0], 4);
  7495. //const __m256i q2_2 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[1], 4);
  7496. const __m256i q2_1 = _mm256_set_epi32(
  7497. iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]],
  7498. iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]]
  7499. );
  7500. const __m256i q2_2 = _mm256_set_epi32(
  7501. iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]],
  7502. iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[ 9]], iq3s_grid[idx.index[ 8]]
  7503. );
  7504. __m256i aux256 = _mm256_set1_epi32(signs[0] | (signs[1] << 16));
  7505. aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2);
  7506. const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2);
  7507. const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1);
  7508. aux256 = _mm256_set1_epi32(signs[2] | (signs[3] << 16));
  7509. aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2);
  7510. const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2);
  7511. const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2);
  7512. signs += 4;
  7513. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  7514. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  7515. const uint16_t ls1 = x[i].scales[ib32/2] & 0xf;
  7516. const uint16_t ls2 = x[i].scales[ib32/2] >> 4;
  7517. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
  7518. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
  7519. sumi1 = _mm256_add_epi32(sumi1, p1);
  7520. sumi2 = _mm256_add_epi32(sumi2, p2);
  7521. }
  7522. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  7523. }
  7524. *s = hsum_float_8(accumf);
  7525. #else
  7526. float sumf = 0.f;
  7527. for (int i = 0; i < nb; ++i) {
  7528. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7529. const uint8_t * restrict qs = x[i].qs;
  7530. const uint8_t * restrict qh = x[i].qh;
  7531. const uint8_t * restrict signs = x[i].signs;
  7532. const int8_t * restrict q8 = y[i].qs;
  7533. int32_t bsum = 0;
  7534. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7535. const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1;
  7536. const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1;
  7537. int32_t sumi = 0;
  7538. for (int l = 0; l < 4; ++l) {
  7539. const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256)));
  7540. const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256)));
  7541. for (int j = 0; j < 4; ++j) {
  7542. sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
  7543. sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
  7544. }
  7545. q8 += 8;
  7546. }
  7547. qs += 8;
  7548. signs += 4;
  7549. bsum += sumi * ls1;
  7550. sumi = 0;
  7551. for (int l = 0; l < 4; ++l) {
  7552. const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256)));
  7553. const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256)));
  7554. for (int j = 0; j < 4; ++j) {
  7555. sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
  7556. sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
  7557. }
  7558. q8 += 8;
  7559. }
  7560. qs += 8;
  7561. signs += 4;
  7562. bsum += sumi * ls2;
  7563. }
  7564. sumf += d * bsum;
  7565. }
  7566. *s = sumf;
  7567. #endif
  7568. }
  7569. #ifdef __AVX2__
  7570. static inline __m256i mul_add_epi8(const __m256i x, const __m256i y) {
  7571. const __m256i ax = _mm256_sign_epi8(x, x);
  7572. const __m256i sy = _mm256_sign_epi8(y, x);
  7573. return _mm256_maddubs_epi16(ax, sy);
  7574. }
  7575. #endif
  7576. void ggml_vec_dot_iq1_s_q8_K (int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7577. assert(n % QK_K == 0);
  7578. assert(nrc == 1);
  7579. UNUSED(nrc);
  7580. UNUSED(bx);
  7581. UNUSED(by);
  7582. UNUSED(bs);
  7583. const block_iq1_s * restrict x = vx;
  7584. const block_q8_K * restrict y = vy;
  7585. const int nb = n / QK_K;
  7586. #if defined __ARM_NEON
  7587. ggml_int8x16x4_t q1b;
  7588. ggml_int8x16x4_t q8b;
  7589. float sumf = 0;
  7590. for (int i = 0; i < nb; ++i) {
  7591. const int8_t * q8 = y[i].qs;
  7592. const uint8_t * qs = x[i].qs;
  7593. const uint16_t * qh = x[i].qh;
  7594. int sumi1 = 0, sumi2 = 0, sumi3 = 0;
  7595. for (int ib = 0; ib < QK_K/32; ib += 2) {
  7596. q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[ib+0] << 8) & 0x700)))),
  7597. vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[ib+0] << 5) & 0x700)))));
  7598. q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[ib+0] << 2) & 0x700)))),
  7599. vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[ib+0] >> 1) & 0x700)))));
  7600. q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[ib+1] << 8) & 0x700)))),
  7601. vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[ib+1] << 5) & 0x700)))));
  7602. q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[ib+1] << 2) & 0x700)))),
  7603. vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[ib+1] >> 1) & 0x700)))));
  7604. qs += 8;
  7605. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  7606. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[0], q8b.val[0]), q1b.val[1], q8b.val[1]);
  7607. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[2], q8b.val[2]), q1b.val[3], q8b.val[3]);
  7608. const int ls1 = 2*((qh[ib+0] >> 12) & 7) + 1;
  7609. const int ls2 = 2*((qh[ib+1] >> 12) & 7) + 1;
  7610. sumi1 += vaddvq_s32(p1) * ls1;
  7611. sumi2 += vaddvq_s32(p2) * ls2;
  7612. sumi3 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * ls1 * (qh[ib+0] & 0x8000 ? -1 : 1)
  7613. + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * ls2 * (qh[ib+1] & 0x8000 ? -1 : 1);
  7614. }
  7615. sumf += y[i].d * GGML_FP16_TO_FP32(x[i].d) * (sumi1 + sumi2 + IQ1S_DELTA * sumi3);
  7616. }
  7617. *s = sumf;
  7618. #elif defined __AVX2__
  7619. __m256 accum = _mm256_setzero_ps();
  7620. float accum1 = 0;
  7621. for (int i = 0; i < nb; ++i) {
  7622. const int8_t * q8 = y[i].qs;
  7623. const uint8_t * qs = x[i].qs;
  7624. const uint16_t * qh = x[i].qh;
  7625. __m256i sumi = _mm256_setzero_si256();
  7626. int sumi1 = 0;
  7627. for (int ib = 0; ib < QK_K/32; ib += 2) {
  7628. const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)],
  7629. iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)]);
  7630. const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)],
  7631. iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)]);
  7632. qs += 8;
  7633. const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  7634. const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  7635. const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1);
  7636. const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2);
  7637. const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1;
  7638. const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1;
  7639. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(ls1));
  7640. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(ls2));
  7641. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p1, p2));
  7642. sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1
  7643. + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2;
  7644. }
  7645. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  7646. accum = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi), accum);
  7647. accum1 += d * sumi1;
  7648. }
  7649. *s = hsum_float_8(accum) + IQ1S_DELTA * accum1;
  7650. #else
  7651. float sumf = 0;
  7652. for (int i = 0; i < nb; i++) {
  7653. const int8_t * q8 = y[i].qs;
  7654. const uint8_t * qs = x[i].qs;
  7655. const uint16_t * qh = x[i].qh;
  7656. int sumi = 0, sumi1 = 0;
  7657. for (int ib = 0; ib < QK_K/32; ++ib) {
  7658. const int ls = 2*((qh[ib] >> 12) & 7) + 1;
  7659. const int delta = qh[ib] & 0x8000 ? -1 : 1;
  7660. int lsum = 0;
  7661. for (int l = 0; l < 4; ++l) {
  7662. const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8)));
  7663. for (int j = 0; j < 8; ++j) {
  7664. lsum += q8[j] * grid[j];
  7665. }
  7666. q8 += 8;
  7667. }
  7668. sumi += ls * lsum;
  7669. sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]);
  7670. qs += 4;
  7671. }
  7672. sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1);
  7673. }
  7674. *s = sumf;
  7675. #endif
  7676. }
  7677. void ggml_vec_dot_iq4_nl_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7678. assert(nrc == 1);
  7679. UNUSED(nrc);
  7680. UNUSED(bx);
  7681. UNUSED(by);
  7682. UNUSED(bs);
  7683. assert(n % QK4_NL == 0);
  7684. static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same");
  7685. const block_iq4_nl * restrict x = vx;
  7686. const block_q8_0 * restrict y = vy;
  7687. const int nb = n / QK4_NL;
  7688. #if defined __ARM_NEON
  7689. const int8x16_t values = vld1q_s8(kvalues_iq4nl);
  7690. const uint8x16_t m4b = vdupq_n_u8(0x0f);
  7691. uint8x16x2_t q4bits;
  7692. int8x16x4_t q4b;
  7693. int8x16x4_t q8b;
  7694. int32x4_t prod_1, prod_2;
  7695. float sumf = 0;
  7696. for (int ib = 0; ib < nb; ib += 2) {
  7697. q4bits.val[0] = vld1q_u8(x[ib+0].qs);
  7698. q4bits.val[1] = vld1q_u8(x[ib+1].qs);
  7699. q8b.val[0] = vld1q_s8(y[ib+0].qs);
  7700. q8b.val[1] = vld1q_s8(y[ib+0].qs + 16);
  7701. q8b.val[2] = vld1q_s8(y[ib+1].qs);
  7702. q8b.val[3] = vld1q_s8(y[ib+1].qs + 16);
  7703. q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b));
  7704. q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4));
  7705. q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b));
  7706. q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4));
  7707. prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]);
  7708. prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]);
  7709. sumf +=
  7710. GGML_FP16_TO_FP32(x[ib+0].d) * GGML_FP16_TO_FP32(y[ib+0].d) * vaddvq_s32(prod_1) +
  7711. GGML_FP16_TO_FP32(x[ib+1].d) * GGML_FP16_TO_FP32(y[ib+1].d) * vaddvq_s32(prod_2);
  7712. }
  7713. *s = sumf;
  7714. #elif defined __AVX2__
  7715. const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl);
  7716. const __m128i m4b = _mm_set1_epi8(0x0f);
  7717. const __m256i mone = _mm256_set1_epi16(1);
  7718. __m256 accum1 = _mm256_setzero_ps();
  7719. __m256 accum2 = _mm256_setzero_ps();
  7720. for (int ib = 0; ib < nb; ib += 2) {
  7721. const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)x[0].qs);
  7722. const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)x[1].qs);
  7723. const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)y[0].qs);
  7724. const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)y[1].qs);
  7725. const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)),
  7726. _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)));
  7727. const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)),
  7728. _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)));
  7729. const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1);
  7730. const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2);
  7731. const __m256i p_1 = _mm256_madd_epi16(p16_1, mone);
  7732. const __m256i p_2 = _mm256_madd_epi16(p16_2, mone);
  7733. accum1 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[0].d)*GGML_FP16_TO_FP32(x[0].d)),
  7734. _mm256_cvtepi32_ps(p_1), accum1);
  7735. accum2 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[1].d)*GGML_FP16_TO_FP32(x[1].d)),
  7736. _mm256_cvtepi32_ps(p_2), accum2);
  7737. y += 2;
  7738. x += 2;
  7739. }
  7740. *s = hsum_float_8(_mm256_add_ps(accum1, accum2));
  7741. #else
  7742. float sumf = 0;
  7743. for (int ib = 0; ib < nb; ++ib) {
  7744. const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d);
  7745. int sumi1 = 0, sumi2 = 0;
  7746. for (int j = 0; j < QK4_NL/2; ++j) {
  7747. sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf];
  7748. sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4];
  7749. }
  7750. sumf += d * (sumi1 + sumi2);
  7751. }
  7752. *s = sumf;
  7753. #endif
  7754. }
  7755. void ggml_vec_dot_iq4_xs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7756. assert(nrc == 1);
  7757. UNUSED(nrc);
  7758. UNUSED(bx);
  7759. UNUSED(by);
  7760. UNUSED(bs);
  7761. assert(n % QK_K == 0);
  7762. #if QK_K == 64
  7763. ggml_vec_dot_iq4_nl_q8_0(n, s, bs, vx, bx, vy, by, nrc);
  7764. #else
  7765. const block_iq4_xs * restrict x = vx;
  7766. const block_q8_K * restrict y = vy;
  7767. const int nb = n / QK_K;
  7768. #if defined __ARM_NEON
  7769. const int8x16_t values = vld1q_s8(kvalues_iq4nl);
  7770. const uint8x16_t m4b = vdupq_n_u8(0x0f);
  7771. ggml_uint8x16x2_t q4bits;
  7772. ggml_int8x16x4_t q4b;
  7773. ggml_int8x16x4_t q8b;
  7774. int32x4_t prod_1, prod_2;
  7775. float sumf = 0;
  7776. for (int ibl = 0; ibl < nb; ++ibl) {
  7777. const int8_t * q8 = y[ibl].qs;
  7778. const uint8_t * q4 = x[ibl].qs;
  7779. uint16_t h = x[ibl].scales_h;
  7780. int sumi1 = 0, sumi2 = 0;
  7781. for (int ib = 0; ib < QK_K/64; ++ib) {
  7782. q4bits = ggml_vld1q_u8_x2(q4); q4 += 32;
  7783. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  7784. q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b));
  7785. q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4));
  7786. q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b));
  7787. q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4));
  7788. prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]);
  7789. prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]);
  7790. int ls1 = ((x[ibl].scales_l[ib] & 0xf) | ((h << 4) & 0x30)) - 32;
  7791. int ls2 = ((x[ibl].scales_l[ib] >> 4) | ((h << 2) & 0x30)) - 32;
  7792. h >>= 4;
  7793. sumi1 += vaddvq_s32(prod_1) * ls1;
  7794. sumi2 += vaddvq_s32(prod_2) * ls2;
  7795. }
  7796. sumf += GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2);
  7797. }
  7798. *s = sumf;
  7799. #elif defined __AVX2__
  7800. const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl);
  7801. const __m128i m4b = _mm_set1_epi8(0x0f);
  7802. __m256 accum = _mm256_setzero_ps();
  7803. for (int ibl = 0; ibl < nb; ++ibl) {
  7804. const uint8_t * qs = x[ibl].qs;
  7805. const int8_t * q8 = y[ibl].qs;
  7806. uint16_t sh = x[ibl].scales_h;
  7807. __m256i sumi1 = _mm256_setzero_si256();
  7808. __m256i sumi2 = _mm256_setzero_si256();
  7809. for (int ib = 0; ib < QK_K/32; ib += 2) {
  7810. const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)qs); qs += 16;
  7811. const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)qs); qs += 16;
  7812. const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7813. const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7814. const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)),
  7815. _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)));
  7816. const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)),
  7817. _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)));
  7818. const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1);
  7819. const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2);
  7820. const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32;
  7821. const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32;
  7822. sh >>= 4;
  7823. const __m256i p_1 = _mm256_madd_epi16(p16_1, _mm256_set1_epi16(ls1));
  7824. const __m256i p_2 = _mm256_madd_epi16(p16_2, _mm256_set1_epi16(ls2));
  7825. sumi1 = _mm256_add_epi32(p_1, sumi1);
  7826. sumi2 = _mm256_add_epi32(p_2, sumi2);
  7827. }
  7828. accum = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d),
  7829. _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accum);
  7830. }
  7831. *s = hsum_float_8(accum);
  7832. #else
  7833. float sumf = 0;
  7834. for (int ibl = 0; ibl < nb; ++ibl) {
  7835. const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d;
  7836. uint16_t h = x[ibl].scales_h;
  7837. const uint8_t * qs = x[ibl].qs;
  7838. const int8_t * q8 = y[ibl].qs;
  7839. for (int ib = 0; ib < QK_K/32; ib += 2) {
  7840. const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30);
  7841. const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30);
  7842. h >>= 4;
  7843. const float d1 = d4d8*(ls1 - 32);
  7844. const float d2 = d4d8*(ls2 - 32);
  7845. int sumi1 = 0, sumi2 = 0;
  7846. for (int j = 0; j < 16; ++j) {
  7847. sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
  7848. sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
  7849. }
  7850. sumf += d1 * (sumi1 + sumi2);
  7851. qs += 16;
  7852. q8 += 32;
  7853. sumi1 = sumi2 = 0;
  7854. for (int j = 0; j < 16; ++j) {
  7855. sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
  7856. sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
  7857. }
  7858. sumf += d2 * (sumi1 + sumi2);
  7859. qs += 16;
  7860. q8 += 32;
  7861. }
  7862. }
  7863. *s = sumf;
  7864. #endif
  7865. #endif
  7866. }
  7867. // ================================ IQ2 quantization =============================================
  7868. typedef struct {
  7869. uint64_t * grid;
  7870. int * map;
  7871. uint16_t * neighbours;
  7872. } iq2_entry_t;
  7873. static iq2_entry_t iq2_data[4] = {
  7874. {NULL, NULL, NULL},
  7875. {NULL, NULL, NULL},
  7876. {NULL, NULL, NULL},
  7877. {NULL, NULL, NULL},
  7878. };
  7879. static inline int iq2_data_index(enum ggml_type type) {
  7880. GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ2_S);
  7881. return type == GGML_TYPE_IQ2_XXS ? 0 :
  7882. type == GGML_TYPE_IQ2_XS ? 1 :
  7883. type == GGML_TYPE_IQ1_S ? 2 : 3;
  7884. }
  7885. static inline int iq2_grid_size(enum ggml_type type) {
  7886. GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ2_S);
  7887. return type == GGML_TYPE_IQ2_XXS ? 256 :
  7888. type == GGML_TYPE_IQ2_XS ? 512 :
  7889. type == GGML_TYPE_IQ1_S ? NGRID_IQ1S : 1024;
  7890. }
  7891. static int iq2_compare_func(const void * left, const void * right) {
  7892. const int * l = (const int *)left;
  7893. const int * r = (const int *)right;
  7894. return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0;
  7895. }
  7896. void iq2xs_init_impl(enum ggml_type type) {
  7897. const int gindex = iq2_data_index(type);
  7898. const int grid_size = iq2_grid_size(type);
  7899. if (iq2_data[gindex].grid) {
  7900. return;
  7901. }
  7902. static const uint16_t kgrid_2bit_256[256] = {
  7903. 0, 2, 5, 8, 10, 17, 20, 32, 34, 40, 42, 65, 68, 80, 88, 97,
  7904. 100, 128, 130, 138, 162, 257, 260, 272, 277, 320, 388, 408, 512, 514, 546, 642,
  7905. 1025, 1028, 1040, 1057, 1060, 1088, 1090, 1096, 1120, 1153, 1156, 1168, 1188, 1280, 1282, 1288,
  7906. 1312, 1350, 1385, 1408, 1425, 1545, 1552, 1600, 1668, 1700, 2048, 2053, 2056, 2068, 2088, 2113,
  7907. 2116, 2128, 2130, 2184, 2308, 2368, 2562, 2580, 4097, 4100, 4112, 4129, 4160, 4192, 4228, 4240,
  7908. 4245, 4352, 4360, 4384, 4432, 4442, 4480, 4644, 4677, 5120, 5128, 5152, 5157, 5193, 5248, 5400,
  7909. 5474, 5632, 5654, 6145, 6148, 6160, 6208, 6273, 6400, 6405, 6560, 6737, 8192, 8194, 8202, 8260,
  7910. 8289, 8320, 8322, 8489, 8520, 8704, 8706, 9217, 9220, 9232, 9280, 9302, 9472, 9537, 9572, 9872,
  7911. 10248, 10272, 10388, 10820, 16385, 16388, 16400, 16408, 16417, 16420, 16448, 16456, 16470, 16480, 16513, 16516,
  7912. 16528, 16640, 16672, 16737, 16768, 16773, 16897, 16912, 16968, 16982, 17000, 17408, 17416, 17440, 17536, 17561,
  7913. 17682, 17700, 17920, 18433, 18436, 18448, 18496, 18501, 18688, 18776, 18785, 18818, 19013, 19088, 20480, 20488,
  7914. 20497, 20505, 20512, 20608, 20616, 20740, 20802, 20900, 21137, 21648, 21650, 21770, 22017, 22100, 22528, 22545,
  7915. 22553, 22628, 22848, 23048, 24580, 24592, 24640, 24680, 24832, 24917, 25112, 25184, 25600, 25605, 25872, 25874,
  7916. 25988, 26690, 32768, 32770, 32778, 32833, 32898, 33028, 33048, 33088, 33297, 33793, 33796, 33808, 33813, 33856,
  7917. 33888, 34048, 34118, 34196, 34313, 34368, 34400, 34818, 35076, 35345, 36868, 36880, 36900, 36928, 37025, 37142,
  7918. 37248, 37445, 37888, 37922, 37956, 38225, 39041, 39200, 40962, 41040, 41093, 41225, 41472, 42008, 43088, 43268,
  7919. };
  7920. static const uint16_t kgrid_2bit_512[512] = {
  7921. 0, 2, 5, 8, 10, 17, 20, 22, 25, 32, 34, 37, 40, 65, 68, 70,
  7922. 73, 80, 82, 85, 88, 97, 100, 128, 130, 133, 136, 145, 148, 153, 160, 257,
  7923. 260, 262, 265, 272, 274, 277, 280, 282, 289, 292, 320, 322, 325, 328, 337, 340,
  7924. 352, 360, 385, 388, 400, 512, 514, 517, 520, 529, 532, 544, 577, 580, 592, 597,
  7925. 640, 650, 1025, 1028, 1030, 1033, 1040, 1042, 1045, 1048, 1057, 1060, 1088, 1090, 1093, 1096,
  7926. 1105, 1108, 1110, 1120, 1153, 1156, 1168, 1280, 1282, 1285, 1288, 1297, 1300, 1312, 1345, 1348,
  7927. 1360, 1377, 1408, 1537, 1540, 1552, 1574, 1600, 1602, 1668, 2048, 2050, 2053, 2056, 2058, 2065,
  7928. 2068, 2080, 2085, 2113, 2116, 2128, 2136, 2176, 2208, 2218, 2305, 2308, 2320, 2368, 2433, 2441,
  7929. 2560, 2592, 2600, 2710, 2720, 4097, 4100, 4102, 4105, 4112, 4114, 4117, 4120, 4129, 4132, 4160,
  7930. 4162, 4165, 4168, 4177, 4180, 4192, 4202, 4225, 4228, 4240, 4352, 4354, 4357, 4360, 4369, 4372,
  7931. 4384, 4417, 4420, 4432, 4480, 4500, 4502, 4609, 4612, 4614, 4624, 4672, 4704, 5120, 5122, 5125,
  7932. 5128, 5137, 5140, 5152, 5185, 5188, 5193, 5200, 5220, 5248, 5377, 5380, 5392, 5440, 5632, 5652,
  7933. 5705, 6145, 6148, 6160, 6162, 6208, 6228, 6278, 6400, 6405, 6502, 6737, 6825, 8192, 8194, 8197,
  7934. 8200, 8202, 8209, 8212, 8224, 8257, 8260, 8272, 8320, 8352, 8449, 8452, 8464, 8512, 8520, 8549,
  7935. 8704, 8738, 8832, 8872, 9217, 9220, 9232, 9257, 9280, 9472, 9537, 9554, 9625, 9729, 9754, 9894,
  7936. 10240, 10248, 10250, 10272, 10325, 10376, 10402, 10600, 10640, 10760, 10784, 10882, 10888, 10890, 16385, 16388,
  7937. 16390, 16393, 16400, 16402, 16405, 16408, 16417, 16420, 16448, 16450, 16453, 16456, 16458, 16465, 16468, 16480,
  7938. 16485, 16513, 16516, 16528, 16640, 16642, 16645, 16648, 16657, 16660, 16672, 16705, 16708, 16720, 16768, 16773,
  7939. 16802, 16897, 16900, 16912, 16914, 16937, 16960, 17408, 17410, 17413, 17416, 17425, 17428, 17433, 17440, 17473,
  7940. 17476, 17488, 17536, 17556, 17665, 17668, 17680, 17700, 17728, 17818, 17920, 17930, 17988, 18000, 18433, 18436,
  7941. 18448, 18496, 18501, 18516, 18530, 18688, 18705, 18756, 18768, 18793, 18948, 20480, 20482, 20485, 20488, 20497,
  7942. 20500, 20512, 20520, 20545, 20548, 20560, 20608, 20737, 20740, 20752, 20757, 20800, 20802, 20992, 21060, 21162,
  7943. 21505, 21508, 21520, 21537, 21568, 21600, 21633, 21665, 21760, 21768, 21888, 21896, 22049, 22120, 22177, 22528,
  7944. 22548, 22593, 22608, 22681, 22810, 22848, 22850, 23173, 24577, 24580, 24592, 24640, 24660, 24674, 24710, 24745,
  7945. 24832, 25124, 25162, 25234, 25600, 25622, 25872, 25920, 25925, 26020, 26625, 26730, 26917, 27142, 27220, 27234,
  7946. 32768, 32770, 32773, 32776, 32785, 32788, 32800, 32810, 32833, 32836, 32848, 32896, 32898, 32936, 32938, 33025,
  7947. 33028, 33030, 33040, 33088, 33105, 33113, 33280, 33312, 33408, 33410, 33440, 33448, 33793, 33796, 33808, 33810,
  7948. 33813, 33856, 33888, 33929, 34048, 34116, 34213, 34328, 34410, 34816, 34824, 34853, 34906, 34944, 34946, 34984,
  7949. 35078, 35362, 35456, 35464, 35478, 35496, 36865, 36868, 36880, 36928, 36950, 36996, 37120, 37154, 37220, 37462,
  7950. 37513, 37888, 37893, 37956, 37968, 37976, 38185, 38288, 38290, 38465, 38993, 39078, 39241, 39445, 39520, 40960,
  7951. 40962, 40968, 40970, 40992, 41002, 41120, 41297, 41305, 41382, 41472, 41474, 41480, 41514, 41600, 41632, 42048,
  7952. 42133, 42597, 42648, 43018, 43040, 43042, 43048, 43168, 43176, 43268, 43396, 43398, 43560, 43562, 43665, 43690,
  7953. };
  7954. static const uint16_t kgrid_1bit_2048[NGRID_IQ1S] = {
  7955. 0, 2, 5, 8, 10, 17, 21, 32, 34, 40, 42, 69, 81, 84, 86, 101,
  7956. 128, 130, 136, 138, 149, 160, 162, 168, 170, 260, 261, 273, 276, 278, 281, 282,
  7957. 293, 321, 326, 329, 338, 341, 346, 353, 356, 358, 360, 389, 401, 404, 406, 421,
  7958. 512, 514, 520, 522, 533, 544, 546, 552, 554, 581, 593, 601, 612, 617, 640, 642,
  7959. 648, 650, 657, 661, 665, 672, 674, 680, 682, 1041, 1044, 1046, 1061, 1089, 1097, 1109,
  7960. 1114, 1124, 1125, 1169, 1177, 1189, 1281, 1284, 1285, 1286, 1301, 1304, 1306, 1321, 1344, 1349,
  7961. 1354, 1360, 1361, 1364, 1365, 1366, 1369, 1376, 1378, 1381, 1384, 1386, 1409, 1425, 1429, 1432,
  7962. 1434, 1441, 1444, 1445, 1446, 1449, 1556, 1561, 1601, 1604, 1616, 1618, 1621, 1624, 1632, 1633,
  7963. 1638, 1641, 1669, 1681, 1684, 1689, 2048, 2050, 2056, 2058, 2069, 2080, 2082, 2088, 2090, 2117,
  7964. 2129, 2134, 2149, 2176, 2178, 2184, 2186, 2197, 2208, 2210, 2216, 2218, 2309, 2321, 2324, 2329,
  7965. 2340, 2341, 2369, 2384, 2385, 2389, 2401, 2404, 2409, 2449, 2452, 2454, 2457, 2469, 2560, 2562,
  7966. 2568, 2570, 2581, 2592, 2594, 2600, 2602, 2629, 2641, 2649, 2657, 2661, 2688, 2690, 2693, 2696,
  7967. 2698, 2709, 2720, 2722, 2728, 2730, 4112, 4113, 4116, 4121, 4132, 4133, 4161, 4164, 4176, 4181,
  7968. 4184, 4193, 4196, 4197, 4201, 4241, 4244, 4246, 4257, 4261, 4353, 4356, 4358, 4361, 4368, 4370,
  7969. 4373, 4376, 4385, 4388, 4393, 4421, 4426, 4432, 4433, 4434, 4436, 4437, 4438, 4441, 4448, 4453,
  7970. 4484, 4498, 4501, 4513, 4516, 4625, 4628, 4630, 4645, 4672, 4678, 4681, 4690, 4693, 4696, 4698,
  7971. 4708, 4710, 4741, 4753, 4756, 4758, 4773, 5121, 5126, 5129, 5140, 5141, 5144, 5145, 5153, 5158,
  7972. 5185, 5189, 5190, 5192, 5194, 5201, 5204, 5205, 5206, 5209, 5218, 5221, 5224, 5252, 5257, 5264,
  7973. 5268, 5269, 5272, 5273, 5274, 5281, 5284, 5285, 5289, 5378, 5381, 5386, 5393, 5396, 5397, 5398,
  7974. 5401, 5408, 5410, 5413, 5416, 5418, 5441, 5444, 5445, 5446, 5457, 5458, 5460, 5461, 5462, 5465,
  7975. 5466, 5473, 5476, 5477, 5478, 5481, 5504, 5506, 5508, 5509, 5512, 5514, 5520, 5521, 5524, 5525,
  7976. 5526, 5529, 5530, 5536, 5538, 5541, 5633, 5636, 5637, 5638, 5653, 5654, 5656, 5658, 5665, 5670,
  7977. 5696, 5698, 5700, 5701, 5704, 5706, 5713, 5717, 5718, 5720, 5721, 5729, 5732, 5733, 5736, 5737,
  7978. 5738, 5766, 5770, 5778, 5781, 5796, 5801, 6161, 6166, 6181, 6209, 6212, 6214, 6217, 6224, 6229,
  7979. 6232, 6234, 6240, 6241, 6244, 6246, 6249, 6277, 6289, 6292, 6309, 6416, 6418, 6421, 6426, 6433,
  7980. 6437, 6466, 6468, 6469, 6472, 6481, 6484, 6485, 6486, 6489, 6490, 6496, 6501, 6506, 6537, 6545,
  7981. 6546, 6549, 6552, 6561, 6566, 6569, 6665, 6678, 6692, 6694, 6724, 6726, 6729, 6736, 6738, 6741,
  7982. 6744, 6753, 6758, 6761, 6789, 6801, 6806, 6810, 8192, 8194, 8200, 8202, 8213, 8224, 8226, 8229,
  7983. 8232, 8234, 8261, 8273, 8281, 8289, 8293, 8320, 8322, 8328, 8330, 8341, 8352, 8354, 8357, 8360,
  7984. 8362, 8453, 8465, 8468, 8473, 8485, 8514, 8516, 8521, 8533, 8536, 8538, 8545, 8548, 8549, 8550,
  7985. 8581, 8592, 8598, 8601, 8613, 8705, 8712, 8714, 8721, 8725, 8736, 8738, 8744, 8746, 8773, 8785,
  7986. 8790, 8793, 8805, 8833, 8840, 8842, 8849, 8853, 8864, 8866, 8872, 8874, 9221, 9236, 9238, 9241,
  7987. 9253, 9284, 9285, 9286, 9289, 9298, 9301, 9304, 9306, 9318, 9349, 9361, 9364, 9369, 9377, 9381,
  7988. 9481, 9493, 9505, 9513, 9536, 9541, 9544, 9553, 9556, 9557, 9561, 9570, 9573, 9576, 9609, 9616,
  7989. 9620, 9621, 9624, 9626, 9633, 9636, 9638, 9641, 9733, 9744, 9746, 9753, 9765, 9793, 9801, 9813,
  7990. 9824, 9825, 9833, 9860, 9862, 9872, 9882, 10240, 10242, 10248, 10250, 10261, 10272, 10274, 10280, 10282,
  7991. 10309, 10321, 10324, 10341, 10368, 10370, 10376, 10378, 10400, 10402, 10408, 10410, 10505, 10513, 10516, 10521,
  7992. 10533, 10566, 10569, 10578, 10581, 10593, 10596, 10598, 10601, 10629, 10640, 10646, 10649, 10660, 10661, 10752,
  7993. 10754, 10760, 10762, 10784, 10786, 10792, 10794, 10821, 10833, 10838, 10841, 10853, 10880, 10882, 10888, 10890,
  7994. 10901, 10912, 10914, 10920, 10922, 16389, 16401, 16406, 16421, 16457, 16466, 16469, 16472, 16474, 16481, 16484,
  7995. 16486, 16532, 16537, 16545, 16550, 16640, 16641, 16644, 16646, 16649, 16658, 16661, 16662, 16664, 16666, 16673,
  7996. 16678, 16681, 16709, 16712, 16714, 16721, 16724, 16725, 16726, 16729, 16730, 16741, 16744, 16746, 16769, 16772,
  7997. 16774, 16784, 16786, 16789, 16800, 16801, 16802, 16901, 16913, 16916, 16918, 16933, 16961, 16978, 16981, 16986,
  7998. 16996, 17001, 17033, 17044, 17061, 17409, 17429, 17433, 17449, 17477, 17480, 17482, 17489, 17492, 17493, 17494,
  7999. 17505, 17506, 17509, 17512, 17514, 17537, 17542, 17545, 17552, 17554, 17557, 17568, 17569, 17577, 17665, 17666,
  8000. 17669, 17674, 17681, 17684, 17685, 17686, 17689, 17696, 17701, 17706, 17729, 17732, 17733, 17734, 17737, 17744,
  8001. 17745, 17748, 17749, 17750, 17752, 17753, 17761, 17764, 17765, 17766, 17769, 17794, 17796, 17797, 17800, 17809,
  8002. 17812, 17813, 17814, 17817, 17818, 17829, 17832, 17834, 17921, 17925, 17929, 17940, 17941, 17944, 17946, 17953,
  8003. 17956, 17961, 17984, 17986, 17989, 17992, 18000, 18001, 18002, 18005, 18006, 18009, 18018, 18021, 18024, 18049,
  8004. 18053, 18058, 18068, 18069, 18081, 18084, 18086, 18437, 18449, 18453, 18458, 18469, 18498, 18505, 18512, 18517,
  8005. 18520, 18529, 18532, 18534, 18537, 18565, 18577, 18580, 18582, 18585, 18597, 18689, 18693, 18694, 18698, 18704,
  8006. 18708, 18709, 18712, 18721, 18724, 18726, 18752, 18757, 18762, 18769, 18770, 18772, 18773, 18774, 18777, 18784,
  8007. 18786, 18789, 18790, 18794, 18822, 18825, 18834, 18837, 18838, 18840, 18849, 18852, 18854, 18857, 18966, 19012,
  8008. 19014, 19017, 19029, 19032, 19034, 19044, 19049, 19092, 19109, 20481, 20484, 20485, 20486, 20489, 20498, 20501,
  8009. 20506, 20513, 20516, 20521, 20544, 20549, 20552, 20561, 20564, 20565, 20566, 20569, 20581, 20584, 20614, 20617,
  8010. 20629, 20632, 20640, 20641, 20646, 20649, 20741, 20744, 20745, 20746, 20753, 20756, 20757, 20758, 20760, 20761,
  8011. 20768, 20773, 20774, 20776, 20778, 20801, 20804, 20805, 20806, 20809, 20816, 20817, 20818, 20820, 20821, 20822,
  8012. 20824, 20825, 20826, 20833, 20836, 20837, 20838, 20841, 20866, 20869, 20881, 20884, 20885, 20886, 20889, 20896,
  8013. 20901, 20906, 20993, 20998, 21010, 21013, 21018, 21025, 21028, 21058, 21061, 21066, 21073, 21076, 21077, 21078,
  8014. 21081, 21090, 21093, 21125, 21136, 21138, 21141, 21145, 21146, 21156, 21508, 21509, 21521, 21524, 21525, 21526,
  8015. 21528, 21529, 21537, 21541, 21544, 21546, 21569, 21572, 21573, 21574, 21577, 21578, 21584, 21585, 21588, 21589,
  8016. 21590, 21592, 21593, 21594, 21601, 21602, 21604, 21605, 21606, 21609, 21632, 21640, 21642, 21649, 21652, 21653,
  8017. 21654, 21657, 21665, 21668, 21669, 21674, 21761, 21762, 21764, 21765, 21766, 21769, 21776, 21777, 21778, 21780,
  8018. 21781, 21782, 21785, 21786, 21793, 21796, 21797, 21798, 21801, 21824, 21825, 21826, 21828, 21829, 21830, 21832,
  8019. 21833, 21840, 21841, 21842, 21844, 21845, 21846, 21848, 21849, 21850, 21856, 21857, 21860, 21861, 21862, 21864,
  8020. 21865, 21866, 21889, 21892, 21893, 21897, 21898, 21904, 21905, 21908, 21909, 21910, 21912, 21913, 21921, 21924,
  8021. 21925, 21926, 21929, 22016, 22017, 22018, 22020, 22022, 22024, 22025, 22033, 22036, 22037, 22040, 22041, 22048,
  8022. 22049, 22050, 22052, 22053, 22054, 22056, 22057, 22081, 22085, 22086, 22088, 22089, 22090, 22096, 22097, 22098,
  8023. 22100, 22101, 22102, 22104, 22105, 22106, 22113, 22116, 22117, 22121, 22146, 22149, 22150, 22152, 22153, 22154,
  8024. 22161, 22165, 22170, 22178, 22181, 22182, 22184, 22185, 22532, 22533, 22534, 22537, 22544, 22549, 22552, 22561,
  8025. 22570, 22597, 22600, 22602, 22609, 22612, 22613, 22614, 22616, 22617, 22624, 22626, 22628, 22629, 22658, 22665,
  8026. 22672, 22674, 22677, 22680, 22689, 22697, 22785, 22786, 22789, 22794, 22801, 22804, 22805, 22806, 22809, 22821,
  8027. 22849, 22852, 22853, 22854, 22857, 22864, 22865, 22866, 22868, 22869, 22870, 22872, 22873, 22874, 22881, 22884,
  8028. 22885, 22886, 22889, 22913, 22917, 22921, 22929, 22932, 22933, 22934, 22936, 22937, 22949, 23044, 23048, 23061,
  8029. 23066, 23072, 23077, 23078, 23081, 23109, 23112, 23113, 23121, 23125, 23126, 23128, 23129, 23138, 23141, 23144,
  8030. 23146, 23169, 23178, 23186, 23189, 23190, 23192, 23194, 23201, 24581, 24596, 24598, 24601, 24613, 24644, 24656,
  8031. 24661, 24662, 24664, 24666, 24673, 24676, 24678, 24681, 24705, 24726, 24741, 24833, 24836, 24838, 24841, 24850,
  8032. 24853, 24865, 24866, 24870, 24873, 24901, 24905, 24913, 24917, 24918, 24921, 24933, 24934, 24938, 24964, 24970,
  8033. 24978, 24981, 24993, 24998, 25001, 25105, 25110, 25113, 25152, 25153, 25158, 25173, 25174, 25176, 25184, 25221,
  8034. 25233, 25238, 25253, 25617, 25618, 25621, 25622, 25626, 25633, 25638, 25641, 25664, 25666, 25669, 25672, 25674,
  8035. 25681, 25684, 25685, 25686, 25689, 25690, 25696, 25698, 25701, 25732, 25733, 25737, 25744, 25746, 25748, 25749,
  8036. 25750, 25752, 25754, 25761, 25764, 25769, 25861, 25864, 25866, 25873, 25877, 25878, 25881, 25924, 25925, 25926,
  8037. 25929, 25936, 25937, 25940, 25941, 25942, 25945, 25953, 25956, 25957, 25958, 25961, 25990, 25993, 25994, 26001,
  8038. 26005, 26006, 26009, 26010, 26018, 26021, 26022, 26024, 26114, 26121, 26133, 26144, 26150, 26152, 26153, 26176,
  8039. 26181, 26184, 26186, 26193, 26196, 26197, 26198, 26200, 26202, 26208, 26213, 26216, 26240, 26242, 26245, 26250,
  8040. 26260, 26262, 26264, 26265, 26272, 26276, 26278, 26282, 26646, 26649, 26661, 26689, 26706, 26709, 26714, 26721,
  8041. 26729, 26757, 26769, 26776, 26790, 26881, 26884, 26896, 26901, 26913, 26916, 26918, 26921, 26944, 26945, 26949,
  8042. 26950, 26952, 26961, 26964, 26965, 26966, 26969, 26976, 26981, 26986, 27010, 27012, 27018, 27029, 27041, 27044,
  8043. 27045, 27049, 27153, 27158, 27160, 27201, 27204, 27209, 27216, 27221, 27224, 27226, 27236, 27237, 27241, 27270,
  8044. 27284, 27288, 27290, 27302, 32768, 32770, 32776, 32778, 32800, 32802, 32808, 32810, 32837, 32848, 32849, 32852,
  8045. 32854, 32857, 32869, 32896, 32898, 32904, 32906, 32917, 32928, 32930, 32936, 32938, 33029, 33041, 33044, 33046,
  8046. 33049, 33061, 33089, 33092, 33097, 33104, 33106, 33109, 33110, 33112, 33113, 33124, 33126, 33129, 33157, 33161,
  8047. 33172, 33174, 33177, 33189, 33280, 33282, 33288, 33290, 33301, 33312, 33314, 33320, 33322, 33361, 33364, 33369,
  8048. 33381, 33408, 33410, 33416, 33418, 33429, 33440, 33442, 33448, 33450, 33812, 33817, 33857, 33860, 33873, 33877,
  8049. 33882, 33889, 33892, 33897, 33940, 33945, 34049, 34057, 34066, 34069, 34074, 34086, 34089, 34112, 34113, 34117,
  8050. 34120, 34129, 34132, 34133, 34134, 34137, 34138, 34149, 34150, 34152, 34154, 34177, 34180, 34182, 34185, 34192,
  8051. 34194, 34197, 34200, 34214, 34321, 34326, 34329, 34341, 34369, 34372, 34377, 34378, 34384, 34389, 34393, 34394,
  8052. 34401, 34406, 34410, 34437, 34449, 34458, 34468, 34816, 34818, 34824, 34826, 34837, 34848, 34850, 34856, 34858,
  8053. 34881, 34885, 34897, 34900, 34905, 34917, 34921, 34944, 34946, 34952, 34954, 34965, 34976, 34978, 34984, 34986,
  8054. 35077, 35078, 35089, 35092, 35094, 35109, 35137, 35140, 35142, 35145, 35152, 35154, 35157, 35162, 35169, 35172,
  8055. 35205, 35222, 35225, 35237, 35328, 35330, 35336, 35338, 35349, 35360, 35362, 35368, 35370, 35397, 35409, 35412,
  8056. 35414, 35456, 35458, 35464, 35466, 35477, 35488, 35490, 35496, 35498, 36869, 36881, 36886, 36888, 36889, 36901,
  8057. 36929, 36934, 36937, 36949, 36952, 36954, 36969, 36970, 36997, 37009, 37012, 37014, 37017, 37029, 37121, 37124,
  8058. 37126, 37129, 37136, 37141, 37144, 37146, 37153, 37156, 37158, 37161, 37184, 37189, 37200, 37201, 37204, 37205,
  8059. 37206, 37209, 37218, 37221, 37252, 37254, 37266, 37269, 37272, 37281, 37284, 37286, 37289, 37381, 37393, 37396,
  8060. 37401, 37413, 37444, 37446, 37449, 37456, 37458, 37461, 37464, 37478, 37481, 37509, 37524, 37526, 37545, 37889,
  8061. 37892, 37894, 37904, 37909, 37912, 37926, 37952, 37962, 37969, 37972, 37973, 37974, 37976, 37977, 37984, 37985,
  8062. 37986, 37989, 38020, 38022, 38034, 38036, 38037, 38040, 38049, 38057, 38144, 38149, 38152, 38154, 38160, 38161,
  8063. 38164, 38165, 38166, 38169, 38177, 38181, 38185, 38186, 38209, 38212, 38213, 38214, 38217, 38224, 38225, 38226,
  8064. 38228, 38229, 38230, 38232, 38233, 38234, 38241, 38244, 38245, 38246, 38249, 38273, 38277, 38280, 38289, 38290,
  8065. 38292, 38293, 38294, 38297, 38298, 38304, 38306, 38309, 38312, 38314, 38401, 38404, 38416, 38421, 38425, 38432,
  8066. 38438, 38441, 38469, 38472, 38473, 38481, 38482, 38485, 38486, 38489, 38501, 38504, 38530, 38532, 38537, 38538,
  8067. 38546, 38548, 38549, 38564, 38566, 38569, 38917, 38934, 38937, 38949, 38977, 38982, 38992, 38994, 38997, 38998,
  8068. 39002, 39012, 39013, 39045, 39057, 39062, 39065, 39077, 39172, 39174, 39177, 39184, 39186, 39189, 39192, 39194,
  8069. 39200, 39201, 39204, 39206, 39232, 39234, 39237, 39240, 39242, 39249, 39252, 39253, 39254, 39257, 39266, 39269,
  8070. 39270, 39274, 39297, 39300, 39312, 39314, 39317, 39322, 39329, 39334, 39429, 39445, 39461, 39492, 39494, 39497,
  8071. 39504, 39509, 39512, 39521, 39557, 39569, 39572, 39573, 39574, 40960, 40962, 40968, 40970, 40981, 40992, 40994,
  8072. 41000, 41002, 41029, 41041, 41044, 41046, 41049, 41088, 41090, 41096, 41098, 41109, 41120, 41122, 41128, 41130,
  8073. 41221, 41225, 41233, 41236, 41238, 41241, 41242, 41286, 41289, 41297, 41301, 41304, 41306, 41313, 41316, 41349,
  8074. 41360, 41362, 41366, 41369, 41474, 41480, 41482, 41488, 41497, 41506, 41512, 41514, 41541, 41553, 41558, 41561,
  8075. 41573, 41600, 41602, 41608, 41610, 41621, 41632, 41634, 41640, 41642, 42009, 42021, 42049, 42052, 42064, 42068,
  8076. 42069, 42072, 42074, 42081, 42085, 42086, 42088, 42089, 42117, 42246, 42249, 42256, 42258, 42261, 42264, 42278,
  8077. 42281, 42306, 42309, 42321, 42324, 42325, 42326, 42329, 42341, 42346, 42369, 42372, 42373, 42374, 42377, 42386,
  8078. 42389, 42392, 42501, 42513, 42518, 42522, 42529, 42533, 42564, 42566, 42570, 42578, 42581, 42582, 42584, 42592,
  8079. 42594, 42630, 42640, 42645, 42646, 42649, 42657, 42660, 42662, 43008, 43010, 43016, 43018, 43040, 43042, 43048,
  8080. 43050, 43089, 43092, 43094, 43097, 43136, 43138, 43144, 43146, 43157, 43168, 43170, 43176, 43178, 43269, 43284,
  8081. 43289, 43297, 43301, 43329, 43344, 43349, 43354, 43361, 43366, 43369, 43408, 43414, 43520, 43522, 43528, 43530,
  8082. 43552, 43554, 43560, 43562, 43601, 43604, 43606, 43648, 43650, 43656, 43658, 43669, 43680, 43682, 43688, 43690,
  8083. };
  8084. static const uint16_t kgrid_2bit_1024[1024] = {
  8085. 0, 2, 5, 8, 10, 17, 20, 22, 25, 32, 34, 37, 40, 65, 68, 70,
  8086. 73, 80, 82, 85, 88, 97, 100, 102, 105, 128, 130, 133, 136, 145, 148, 160,
  8087. 165, 170, 257, 260, 262, 265, 272, 274, 277, 280, 289, 292, 320, 322, 325, 328,
  8088. 337, 340, 342, 345, 352, 357, 360, 385, 388, 400, 402, 405, 417, 420, 512, 514,
  8089. 517, 520, 529, 532, 544, 554, 577, 580, 582, 585, 592, 597, 640, 645, 650, 660,
  8090. 674, 1025, 1028, 1030, 1033, 1040, 1042, 1045, 1048, 1057, 1060, 1062, 1065, 1088, 1090, 1093,
  8091. 1096, 1098, 1105, 1108, 1110, 1113, 1120, 1122, 1125, 1153, 1156, 1158, 1161, 1168, 1173, 1176,
  8092. 1185, 1188, 1280, 1282, 1285, 1288, 1290, 1297, 1300, 1302, 1305, 1312, 1317, 1320, 1345, 1348,
  8093. 1350, 1353, 1360, 1362, 1365, 1368, 1377, 1380, 1408, 1410, 1413, 1416, 1425, 1428, 1440, 1537,
  8094. 1540, 1542, 1545, 1552, 1557, 1600, 1605, 1608, 1617, 1620, 1632, 1665, 1668, 1680, 2048, 2050,
  8095. 2053, 2056, 2065, 2068, 2070, 2073, 2080, 2085, 2090, 2113, 2116, 2118, 2121, 2128, 2130, 2133,
  8096. 2136, 2145, 2148, 2176, 2181, 2196, 2218, 2305, 2308, 2320, 2322, 2325, 2328, 2337, 2368, 2373,
  8097. 2376, 2385, 2388, 2400, 2433, 2448, 2560, 2577, 2580, 2594, 2600, 2602, 2640, 2713, 4097, 4100,
  8098. 4102, 4105, 4112, 4114, 4117, 4120, 4129, 4132, 4134, 4160, 4162, 4165, 4168, 4177, 4180, 4182,
  8099. 4185, 4192, 4194, 4197, 4200, 4225, 4228, 4230, 4240, 4245, 4248, 4257, 4260, 4352, 4354, 4357,
  8100. 4360, 4362, 4369, 4372, 4374, 4377, 4384, 4386, 4389, 4392, 4417, 4420, 4422, 4425, 4432, 4434,
  8101. 4437, 4440, 4449, 4452, 4480, 4482, 4485, 4488, 4497, 4500, 4609, 4612, 4617, 4624, 4629, 4641,
  8102. 4644, 4672, 4677, 4689, 4692, 4737, 4740, 4752, 5120, 5122, 5125, 5128, 5137, 5140, 5142, 5145,
  8103. 5152, 5157, 5160, 5185, 5188, 5190, 5193, 5200, 5202, 5205, 5208, 5217, 5220, 5248, 5250, 5253,
  8104. 5256, 5265, 5268, 5280, 5377, 5380, 5382, 5385, 5392, 5394, 5397, 5400, 5409, 5412, 5440, 5442,
  8105. 5445, 5448, 5457, 5460, 5472, 5505, 5508, 5520, 5632, 5637, 5640, 5649, 5652, 5664, 5697, 5700,
  8106. 5712, 5760, 5802, 6145, 6148, 6150, 6153, 6160, 6165, 6168, 6177, 6208, 6210, 6213, 6216, 6225,
  8107. 6228, 6240, 6273, 6276, 6400, 6402, 6405, 6408, 6417, 6420, 6432, 6465, 6468, 6480, 6505, 6562,
  8108. 6660, 6672, 6720, 6742, 8192, 8194, 8197, 8200, 8209, 8212, 8214, 8217, 8224, 8229, 8234, 8257,
  8109. 8260, 8272, 8274, 8277, 8292, 8320, 8330, 8340, 8362, 8449, 8452, 8464, 8466, 8469, 8481, 8512,
  8110. 8514, 8517, 8529, 8532, 8544, 8577, 8580, 8592, 8704, 8714, 8738, 8744, 8746, 8772, 8784, 8840,
  8111. 8842, 8872, 9217, 9220, 9222, 9225, 9232, 9237, 9240, 9249, 9252, 9280, 9282, 9285, 9288, 9297,
  8112. 9300, 9312, 9345, 9348, 9360, 9472, 9477, 9480, 9489, 9492, 9504, 9537, 9540, 9552, 9574, 9600,
  8113. 9729, 9732, 9744, 9792, 9817, 10240, 10245, 10257, 10260, 10305, 10308, 10320, 10378, 10410, 10497, 10500,
  8114. 10512, 10645, 10762, 10786, 10852, 10888, 10890, 16385, 16388, 16390, 16393, 16400, 16402, 16405, 16408, 16410,
  8115. 16417, 16420, 16422, 16448, 16450, 16453, 16456, 16458, 16465, 16468, 16470, 16473, 16480, 16482, 16485, 16513,
  8116. 16516, 16528, 16533, 16536, 16545, 16548, 16640, 16642, 16645, 16648, 16657, 16660, 16662, 16665, 16672, 16674,
  8117. 16677, 16705, 16708, 16710, 16713, 16720, 16722, 16725, 16728, 16737, 16740, 16768, 16770, 16773, 16776, 16785,
  8118. 16788, 16800, 16897, 16900, 16912, 16914, 16917, 16920, 16932, 16960, 16965, 16968, 16977, 16980, 16992, 17025,
  8119. 17028, 17408, 17410, 17413, 17416, 17418, 17425, 17428, 17430, 17433, 17440, 17442, 17445, 17448, 17473, 17476,
  8120. 17478, 17481, 17488, 17490, 17493, 17496, 17505, 17508, 17536, 17538, 17541, 17544, 17553, 17556, 17568, 17665,
  8121. 17668, 17670, 17673, 17680, 17682, 17685, 17688, 17697, 17700, 17728, 17730, 17733, 17736, 17745, 17748, 17760,
  8122. 17770, 17793, 17796, 17808, 17920, 17922, 17925, 17928, 17937, 17940, 17952, 17985, 17988, 18000, 18048, 18085,
  8123. 18433, 18436, 18441, 18448, 18450, 18453, 18456, 18465, 18468, 18496, 18498, 18501, 18504, 18513, 18516, 18528,
  8124. 18564, 18576, 18688, 18690, 18693, 18696, 18705, 18708, 18720, 18753, 18756, 18768, 18816, 18838, 18945, 18948,
  8125. 18960, 19008, 20480, 20482, 20485, 20488, 20497, 20500, 20502, 20505, 20512, 20514, 20517, 20520, 20545, 20548,
  8126. 20550, 20553, 20560, 20562, 20565, 20568, 20577, 20580, 20608, 20610, 20613, 20616, 20625, 20628, 20737, 20740,
  8127. 20742, 20745, 20752, 20754, 20757, 20760, 20769, 20772, 20800, 20802, 20805, 20808, 20817, 20820, 20832, 20865,
  8128. 20868, 20880, 20992, 20997, 21000, 21009, 21012, 21024, 21057, 21060, 21072, 21097, 21120, 21505, 21508, 21510,
  8129. 21513, 21520, 21522, 21525, 21528, 21537, 21540, 21568, 21570, 21573, 21576, 21585, 21588, 21600, 21633, 21636,
  8130. 21648, 21760, 21762, 21765, 21768, 21777, 21780, 21792, 21825, 21828, 21840, 21888, 22017, 22020, 22032, 22054,
  8131. 22080, 22528, 22530, 22533, 22536, 22545, 22548, 22560, 22593, 22596, 22608, 22618, 22656, 22785, 22788, 22800,
  8132. 22848, 23040, 23065, 23173, 23208, 24577, 24580, 24582, 24592, 24594, 24597, 24600, 24609, 24612, 24640, 24645,
  8133. 24648, 24657, 24660, 24672, 24708, 24720, 24832, 24834, 24837, 24840, 24849, 24852, 24864, 24897, 24900, 24912,
  8134. 24960, 24985, 25092, 25104, 25152, 25174, 25249, 25600, 25605, 25608, 25617, 25620, 25632, 25665, 25668, 25680,
  8135. 25728, 25857, 25860, 25872, 25920, 25930, 25960, 26002, 26112, 26260, 26625, 26628, 26640, 26725, 26776, 26880,
  8136. 26922, 27202, 27297, 32768, 32770, 32773, 32776, 32785, 32788, 32793, 32800, 32805, 32833, 32836, 32848, 32850,
  8137. 32853, 32856, 32865, 32896, 32901, 32913, 32916, 33025, 33028, 33033, 33040, 33042, 33045, 33048, 33057, 33060,
  8138. 33088, 33090, 33093, 33096, 33105, 33108, 33153, 33156, 33168, 33193, 33280, 33285, 33290, 33297, 33300, 33345,
  8139. 33348, 33360, 33793, 33796, 33798, 33801, 33808, 33810, 33813, 33816, 33825, 33856, 33858, 33861, 33864, 33873,
  8140. 33876, 33888, 33921, 33924, 33936, 34048, 34050, 34053, 34056, 34065, 34068, 34080, 34113, 34116, 34128, 34176,
  8141. 34186, 34305, 34308, 34320, 34345, 34368, 34816, 34821, 34833, 34836, 34881, 34884, 34896, 34978, 35073, 35076,
  8142. 35136, 35173, 35362, 35416, 35418, 35458, 35490, 36865, 36868, 36873, 36880, 36882, 36885, 36888, 36900, 36928,
  8143. 36930, 36933, 36936, 36945, 36948, 36960, 36993, 36996, 37008, 37120, 37125, 37137, 37140, 37185, 37188, 37200,
  8144. 37210, 37377, 37380, 37392, 37440, 37542, 37888, 37890, 37893, 37896, 37905, 37908, 37920, 37953, 37956, 37968,
  8145. 38016, 38038, 38145, 38148, 38160, 38208, 38296, 38305, 38400, 38470, 38500, 38913, 38916, 38928, 38950, 38976,
  8146. 39081, 39168, 39241, 39250, 39568, 40960, 40965, 40970, 40980, 40994, 41002, 41025, 41028, 41040, 41122, 41130,
  8147. 41280, 41317, 41474, 41482, 41506, 41512, 41514, 41602, 41608, 41610, 41640, 41985, 41988, 42000, 42048, 42121,
  8148. 42148, 42240, 42265, 42577, 43018, 43048, 43170, 43348, 43398, 43528, 43530, 43552, 43554, 43560, 43656, 43690,
  8149. };
  8150. const int kmap_size = 43692;
  8151. //const int nwant = type == GGML_TYPE_IQ1_S ? 3 : 2;
  8152. const int nwant = type == GGML_TYPE_IQ1_S ? 3 : type == GGML_TYPE_IQ2_S ? 1 : 2;
  8153. const uint16_t * kgrid = type == GGML_TYPE_IQ2_XXS ? kgrid_2bit_256 :
  8154. type == GGML_TYPE_IQ2_XS ? kgrid_2bit_512 :
  8155. type == GGML_TYPE_IQ1_S ? kgrid_1bit_2048 : kgrid_2bit_1024;
  8156. uint64_t * kgrid_q2xs;
  8157. int * kmap_q2xs;
  8158. uint16_t * kneighbors_q2xs;
  8159. //printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size);
  8160. uint64_t * the_grid = (uint64_t *)malloc(grid_size*sizeof(uint64_t));
  8161. for (int k = 0; k < grid_size; ++k) {
  8162. int8_t * pos = (int8_t *)(the_grid + k);
  8163. for (int i = 0; i < 8; ++i) {
  8164. int l = (kgrid[k] >> 2*i) & 0x3;
  8165. pos[i] = 2*l + 1;
  8166. }
  8167. }
  8168. kgrid_q2xs = the_grid;
  8169. iq2_data[gindex].grid = the_grid;
  8170. kmap_q2xs = (int *)malloc(kmap_size*sizeof(int));
  8171. iq2_data[gindex].map = kmap_q2xs;
  8172. for (int i = 0; i < kmap_size; ++i) kmap_q2xs[i] = -1;
  8173. uint64_t aux64;
  8174. uint8_t * aux8 = (uint8_t *)&aux64;
  8175. for (int i = 0; i < grid_size; ++i) {
  8176. aux64 = kgrid_q2xs[i];
  8177. uint16_t index = 0;
  8178. for (int k=0; k<8; ++k) {
  8179. uint16_t q = (aux8[k] - 1)/2;
  8180. index |= (q << 2*k);
  8181. }
  8182. kmap_q2xs[index] = i;
  8183. }
  8184. int8_t pos[8];
  8185. int * dist2 = (int *)malloc(2*grid_size*sizeof(int));
  8186. int num_neighbors = 0, num_not_in_map = 0;
  8187. for (int i = 0; i < kmap_size; ++i) {
  8188. if (kmap_q2xs[i] >= 0) continue;
  8189. ++num_not_in_map;
  8190. for (int k = 0; k < 8; ++k) {
  8191. int l = (i >> 2*k) & 0x3;
  8192. pos[k] = 2*l + 1;
  8193. }
  8194. for (int j = 0; j < grid_size; ++j) {
  8195. const int8_t * pg = (const int8_t *)(kgrid_q2xs + j);
  8196. int d2 = 0;
  8197. for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  8198. dist2[2*j+0] = d2;
  8199. dist2[2*j+1] = j;
  8200. }
  8201. qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func);
  8202. int n = 0; int d2 = dist2[0];
  8203. int nhave = 1;
  8204. for (int j = 0; j < grid_size; ++j) {
  8205. if (dist2[2*j] > d2) {
  8206. if (nhave == nwant) break;
  8207. d2 = dist2[2*j];
  8208. ++nhave;
  8209. }
  8210. ++n;
  8211. }
  8212. num_neighbors += n;
  8213. }
  8214. //printf("%s: %d neighbours in total\n", __func__, num_neighbors);
  8215. kneighbors_q2xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t));
  8216. iq2_data[gindex].neighbours = kneighbors_q2xs;
  8217. int counter = 0;
  8218. for (int i = 0; i < kmap_size; ++i) {
  8219. if (kmap_q2xs[i] >= 0) continue;
  8220. for (int k = 0; k < 8; ++k) {
  8221. int l = (i >> 2*k) & 0x3;
  8222. pos[k] = 2*l + 1;
  8223. }
  8224. for (int j = 0; j < grid_size; ++j) {
  8225. const int8_t * pg = (const int8_t *)(kgrid_q2xs + j);
  8226. int d2 = 0;
  8227. for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  8228. dist2[2*j+0] = d2;
  8229. dist2[2*j+1] = j;
  8230. }
  8231. qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func);
  8232. kmap_q2xs[i] = -(counter + 1);
  8233. int d2 = dist2[0];
  8234. uint16_t * start = &kneighbors_q2xs[counter++];
  8235. int n = 0, nhave = 1;
  8236. for (int j = 0; j < grid_size; ++j) {
  8237. if (dist2[2*j] > d2) {
  8238. if (nhave == nwant) break;
  8239. d2 = dist2[2*j];
  8240. ++nhave;
  8241. }
  8242. kneighbors_q2xs[counter++] = dist2[2*j+1];
  8243. ++n;
  8244. }
  8245. *start = n;
  8246. }
  8247. free(dist2);
  8248. }
  8249. void iq2xs_free_impl(enum ggml_type type) {
  8250. GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ2_S);
  8251. const int gindex = iq2_data_index(type);
  8252. if (iq2_data[gindex].grid) {
  8253. free(iq2_data[gindex].grid); iq2_data[gindex].grid = NULL;
  8254. free(iq2_data[gindex].map); iq2_data[gindex].map = NULL;
  8255. free(iq2_data[gindex].neighbours); iq2_data[gindex].neighbours = NULL;
  8256. }
  8257. }
  8258. static int iq2_find_best_neighbour(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
  8259. const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) {
  8260. int num_neighbors = neighbours[0];
  8261. GGML_ASSERT(num_neighbors > 0);
  8262. float best_d2 = FLT_MAX;
  8263. int grid_index = -1;
  8264. for (int j = 1; j <= num_neighbors; ++j) {
  8265. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  8266. float d2 = 0;
  8267. for (int i = 0; i < 8; ++i) {
  8268. float q = pg[i];
  8269. float diff = scale*q - xval[i];
  8270. d2 += weight[i]*diff*diff;
  8271. }
  8272. if (d2 < best_d2) {
  8273. best_d2 = d2; grid_index = neighbours[j];
  8274. }
  8275. }
  8276. GGML_ASSERT(grid_index >= 0);
  8277. const int8_t * pg = (const int8_t *)(grid + grid_index);
  8278. for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
  8279. return grid_index;
  8280. }
  8281. static void quantize_row_iq2_xxs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
  8282. const int gindex = iq2_data_index(GGML_TYPE_IQ2_XXS);
  8283. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  8284. const int * kmap_q2xs = iq2_data[gindex].map;
  8285. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  8286. GGML_ASSERT(quant_weights && "missing quantization weights");
  8287. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  8288. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  8289. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  8290. GGML_ASSERT(n%QK_K == 0);
  8291. const int kMaxQ = 3;
  8292. const int nbl = n/QK_K;
  8293. block_iq2_xxs * y = vy;
  8294. float scales[QK_K/32];
  8295. float weight[32];
  8296. float xval[32];
  8297. int8_t L[32];
  8298. int8_t Laux[32];
  8299. float waux[32];
  8300. uint8_t block_signs[4];
  8301. uint32_t q2[2*(QK_K/32)];
  8302. for (int ibl = 0; ibl < nbl; ++ibl) {
  8303. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  8304. memset(q2, 0, QK_K/4);
  8305. float max_scale = 0;
  8306. const float * xbl = x + QK_K*ibl;
  8307. float sumx2 = 0;
  8308. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  8309. float sigma2 = sumx2/QK_K;
  8310. for (int ib = 0; ib < QK_K/32; ++ib) {
  8311. const float * xb = xbl + 32*ib;
  8312. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  8313. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  8314. for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
  8315. for (int k = 0; k < 4; ++k) {
  8316. int nflip = 0;
  8317. uint8_t s = 0;
  8318. for (int i = 0; i < 8; ++i) {
  8319. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  8320. else {
  8321. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  8322. }
  8323. }
  8324. if (nflip%2) {
  8325. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  8326. for (int i = 1; i < 8; ++i) {
  8327. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  8328. if (ax < min) {
  8329. min = ax; imin = i;
  8330. }
  8331. }
  8332. xval[8*k+imin] = -xval[8*k+imin];
  8333. s ^= (1 << imin);
  8334. }
  8335. block_signs[k] = s & 127;
  8336. }
  8337. float max = xval[0];
  8338. for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]);
  8339. if (!max) {
  8340. scales[ib] = 0;
  8341. memset(L, 0, 32);
  8342. continue;
  8343. }
  8344. float scale = make_qp_quants(32, kMaxQ+1, xval, (uint8_t*)L, weight);
  8345. float eff_max = scale*kMaxQ;
  8346. float best = 0;
  8347. for (int is = -6; is <= 6; ++is) {
  8348. float id = (2*kMaxQ-1+is*0.1f)/eff_max;
  8349. float this_scale = 1/id;
  8350. for (int k = 0; k < 4; ++k) {
  8351. for (int i = 0; i < 8; ++i) {
  8352. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  8353. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  8354. }
  8355. uint16_t u = 0;
  8356. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  8357. int grid_index = kmap_q2xs[u];
  8358. if (grid_index < 0) {
  8359. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  8360. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  8361. }
  8362. }
  8363. float sumqx = 0, sumq2 = 0;
  8364. for (int i = 0; i < 32; ++i) {
  8365. float w = weight[i];
  8366. float q = 2*Laux[i] + 1;
  8367. sumqx += w*xval[i]*q;
  8368. sumq2 += w*q*q;
  8369. }
  8370. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  8371. scale = sumqx/sumq2; best = scale*sumqx;
  8372. memcpy(L, Laux, 32);
  8373. }
  8374. }
  8375. if (scale > 0) {
  8376. float id = 1/scale;
  8377. for (int k = 0; k < 4; ++k) {
  8378. uint16_t u = 0;
  8379. for (int i = 0; i < 8; ++i) {
  8380. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  8381. l = MAX(0, MIN(kMaxQ-1, l));
  8382. u |= (l << 2*i);
  8383. }
  8384. int grid_index = kmap_q2xs[u];
  8385. if (grid_index < 0) {
  8386. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  8387. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  8388. }
  8389. const int8_t * pg = (const int8_t *)(kgrid_q2xs + grid_index);
  8390. for (int i = 0; i < 8; ++i) L[8*k+i] = (pg[i] - 1)/2;
  8391. }
  8392. float sumqx = 0, sumq2 = 0;
  8393. for (int i = 0; i < 32; ++i) {
  8394. float w = weight[i];
  8395. float q = 2*L[i] + 1;
  8396. sumqx += w*xval[i]*q;
  8397. sumq2 += w*q*q;
  8398. }
  8399. if (sumq2 > 0) scale = sumqx/sumq2;
  8400. }
  8401. if (scale < 0) {
  8402. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  8403. // and correspondingly flip quant signs.
  8404. scale = -scale;
  8405. for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127;
  8406. }
  8407. for (int k = 0; k < 4; ++k) {
  8408. uint16_t u = 0;
  8409. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  8410. int grid_index = kmap_q2xs[u];
  8411. if (grid_index < 0) {
  8412. printf("Oops: found point %u not on grid:", u);
  8413. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  8414. printf("\n");
  8415. GGML_ASSERT(false);
  8416. }
  8417. q2[2*ib+0] |= (grid_index << 8*k);
  8418. q2[2*ib+1] |= (block_signs[k] << 7*k);
  8419. }
  8420. GGML_ASSERT(scale >= 0);
  8421. scales[ib] = scale;
  8422. max_scale = MAX(max_scale, scale);
  8423. }
  8424. if (!max_scale) {
  8425. memset(y[ibl].qs, 0, QK_K/4);
  8426. continue;
  8427. }
  8428. float d = max_scale/31;
  8429. y[ibl].d = GGML_FP32_TO_FP16(d);
  8430. float id = 1/d;
  8431. for (int ib = 0; ib < QK_K/32; ++ib) {
  8432. int l = nearest_int(0.5f*(id*scales[ib]-1));
  8433. l = MAX(0, MIN(15, l));
  8434. q2[2*ib+1] |= ((uint32_t)l << 28);
  8435. }
  8436. memcpy(y[ibl].qs, q2, QK_K/4);
  8437. }
  8438. }
  8439. static void quantize_row_iq2_xs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
  8440. const int gindex = iq2_data_index(GGML_TYPE_IQ2_XS);
  8441. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  8442. const int * kmap_q2xs = iq2_data[gindex].map;
  8443. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  8444. GGML_ASSERT(quant_weights && "missing quantization weights");
  8445. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  8446. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  8447. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  8448. GGML_ASSERT(n%QK_K == 0);
  8449. const int kMaxQ = 3;
  8450. const int nbl = n/QK_K;
  8451. block_iq2_xs * y = vy;
  8452. float scales[QK_K/16];
  8453. float weight[16];
  8454. float xval[16];
  8455. int8_t L[16];
  8456. int8_t Laux[16];
  8457. float waux[16];
  8458. bool is_on_grid[2];
  8459. bool is_on_grid_aux[2];
  8460. uint8_t block_signs[2];
  8461. uint16_t q2[2*(QK_K/16)];
  8462. for (int ibl = 0; ibl < nbl; ++ibl) {
  8463. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  8464. memset(q2, 0, QK_K/4);
  8465. memset(y[ibl].scales, 0, QK_K/32);
  8466. float max_scale = 0;
  8467. const float * xbl = x + QK_K*ibl;
  8468. float sumx2 = 0;
  8469. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  8470. float sigma2 = sumx2/QK_K;
  8471. for (int ib = 0; ib < QK_K/16; ++ib) {
  8472. const float * xb = xbl + 16*ib;
  8473. const float * qw = quant_weights + QK_K*ibl + 16*ib;
  8474. for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  8475. for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]);
  8476. for (int k = 0; k < 2; ++k) {
  8477. int nflip = 0;
  8478. uint8_t s = 0;
  8479. for (int i = 0; i < 8; ++i) {
  8480. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  8481. else {
  8482. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  8483. }
  8484. }
  8485. if (nflip%2) {
  8486. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  8487. for (int i = 1; i < 8; ++i) {
  8488. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  8489. if (ax < min) {
  8490. min = ax; imin = i;
  8491. }
  8492. }
  8493. xval[8*k+imin] = -xval[8*k+imin];
  8494. s ^= (1 << imin);
  8495. }
  8496. block_signs[k] = s & 127;
  8497. }
  8498. float max = xval[0];
  8499. for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]);
  8500. if (!max) {
  8501. scales[ib] = 0;
  8502. memset(L, 0, 16);
  8503. continue;
  8504. }
  8505. float best = 0;
  8506. float scale = max/(2*kMaxQ-1);
  8507. is_on_grid[0] = is_on_grid[1] = true;
  8508. for (int is = -9; is <= 9; ++is) {
  8509. float id = (2*kMaxQ-1+is*0.1f)/max;
  8510. float this_scale = 1/id;
  8511. for (int k = 0; k < 2; ++k) {
  8512. for (int i = 0; i < 8; ++i) {
  8513. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  8514. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  8515. }
  8516. uint16_t u = 0;
  8517. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  8518. int grid_index = kmap_q2xs[u];
  8519. is_on_grid_aux[k] = true;
  8520. if (grid_index < 0) {
  8521. is_on_grid_aux[k] = false;
  8522. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  8523. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  8524. }
  8525. }
  8526. float sumqx = 0, sumq2 = 0;
  8527. for (int i = 0; i < 16; ++i) {
  8528. float w = weight[i];
  8529. float q = 2*Laux[i] + 1;
  8530. sumqx += w*xval[i]*q;
  8531. sumq2 += w*q*q;
  8532. }
  8533. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  8534. scale = sumqx/sumq2; best = scale*sumqx;
  8535. for (int i = 0; i < 16; ++i) L[i] = Laux[i];
  8536. for (int k = 0; k < 2; ++k) is_on_grid[k] = is_on_grid_aux[k];
  8537. }
  8538. }
  8539. int n_not_ongrid = 0;
  8540. for (int k = 0; k < 2; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  8541. if (n_not_ongrid > 0 && scale > 0) {
  8542. float id = 1/scale;
  8543. for (int k = 0; k < 2; ++k) {
  8544. if (is_on_grid[k]) continue;
  8545. uint16_t u = 0;
  8546. for (int i = 0; i < 8; ++i) {
  8547. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  8548. l = MAX(0, MIN(kMaxQ-1, l));
  8549. u |= (l << 2*i);
  8550. L[8*k + i] = l;
  8551. }
  8552. int grid_index = kmap_q2xs[u];
  8553. if (grid_index < 0) {
  8554. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  8555. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  8556. }
  8557. }
  8558. float sumqx = 0, sumq2 = 0;
  8559. for (int i = 0; i < 16; ++i) {
  8560. float w = weight[i];
  8561. float q = 2*L[i] + 1;
  8562. sumqx += w*xval[i]*q;
  8563. sumq2 += w*q*q;
  8564. }
  8565. if (sumq2 > 0) scale = sumqx/sumq2;
  8566. }
  8567. if (scale < 0) {
  8568. scale = -scale;
  8569. for (int k = 0; k < 2; ++k) block_signs[k] = (~block_signs[k]) & 127;
  8570. }
  8571. for (int k = 0; k < 2; ++k) {
  8572. uint16_t u = 0;
  8573. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  8574. int grid_index = kmap_q2xs[u];
  8575. if (grid_index < 0) {
  8576. printf("Oops: found point %u not on grid:", u);
  8577. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  8578. printf("\n");
  8579. GGML_ASSERT(false);
  8580. }
  8581. q2[2*ib+k] = grid_index | (block_signs[k] << 9);
  8582. }
  8583. GGML_ASSERT(scale >= 0);
  8584. scales[ib] = scale;
  8585. max_scale = MAX(max_scale, scale);
  8586. }
  8587. if (!max_scale) {
  8588. memset(y[ibl].qs, 0, QK_K/4);
  8589. continue;
  8590. }
  8591. float d = max_scale/31;
  8592. y[ibl].d = GGML_FP32_TO_FP16(d);
  8593. float id = 1/d;
  8594. for (int ib = 0; ib < QK_K/16; ++ib) {
  8595. int l = nearest_int(0.5f*(id*scales[ib]-1));
  8596. l = MAX(0, MIN(15, l));
  8597. if (ib%2 == 0) y[ibl].scales[ib/2] = l;
  8598. else y[ibl].scales[ib/2] |= (l << 4);
  8599. }
  8600. memcpy(y[ibl].qs, q2, QK_K/4);
  8601. }
  8602. }
  8603. size_t quantize_iq2_xxs(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  8604. GGML_ASSERT(n_per_row%QK_K == 0);
  8605. int nblock = n_per_row/QK_K;
  8606. char * qrow = (char *)dst;
  8607. for (int row = 0; row < nrow; ++row) {
  8608. quantize_row_iq2_xxs_impl(src, qrow, n_per_row, quant_weights);
  8609. src += n_per_row;
  8610. qrow += nblock*sizeof(block_iq2_xxs);
  8611. }
  8612. return nrow * nblock * sizeof(block_iq2_xxs);
  8613. }
  8614. size_t quantize_iq2_xs(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  8615. GGML_ASSERT(n_per_row%QK_K == 0);
  8616. int nblock = n_per_row/QK_K;
  8617. char * qrow = (char *)dst;
  8618. for (int row = 0; row < nrow; ++row) {
  8619. quantize_row_iq2_xs_impl(src, qrow, n_per_row, quant_weights);
  8620. src += n_per_row;
  8621. qrow += nblock*sizeof(block_iq2_xs);
  8622. }
  8623. return nrow * nblock * sizeof(block_iq2_xs);
  8624. }
  8625. //
  8626. // ============================================= 3-bit using D4 lattice
  8627. //
  8628. typedef struct {
  8629. uint32_t * grid;
  8630. int * map;
  8631. uint16_t * neighbours;
  8632. } iq3_entry_t;
  8633. static iq3_entry_t iq3_data[2] = {
  8634. {NULL, NULL, NULL},
  8635. {NULL, NULL, NULL},
  8636. };
  8637. static inline int iq3_data_index(int grid_size) {
  8638. (void)grid_size;
  8639. GGML_ASSERT(grid_size == 256 || grid_size == 512);
  8640. return grid_size == 256 ? 0 : 1;
  8641. }
  8642. static int iq3_compare_func(const void * left, const void * right) {
  8643. const int * l = (const int *)left;
  8644. const int * r = (const int *)right;
  8645. return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0;
  8646. }
  8647. void iq3xs_init_impl(int grid_size) {
  8648. const int gindex = iq3_data_index(grid_size);
  8649. if (iq3_data[gindex].grid) {
  8650. return;
  8651. }
  8652. static const uint16_t kgrid_256[256] = {
  8653. 0, 2, 4, 9, 11, 15, 16, 18, 25, 34, 59, 61, 65, 67, 72, 74,
  8654. 81, 85, 88, 90, 97, 108, 120, 128, 130, 132, 137, 144, 146, 153, 155, 159,
  8655. 169, 175, 189, 193, 199, 200, 202, 213, 248, 267, 287, 292, 303, 315, 317, 321,
  8656. 327, 346, 362, 413, 436, 456, 460, 462, 483, 497, 513, 515, 520, 522, 529, 531,
  8657. 536, 538, 540, 551, 552, 576, 578, 585, 592, 594, 641, 643, 648, 650, 657, 664,
  8658. 698, 704, 706, 720, 729, 742, 758, 769, 773, 808, 848, 852, 870, 889, 901, 978,
  8659. 992, 1024, 1026, 1033, 1035, 1040, 1042, 1046, 1049, 1058, 1089, 1091, 1093, 1096, 1098, 1105,
  8660. 1112, 1139, 1143, 1144, 1152, 1154, 1161, 1167, 1168, 1170, 1183, 1184, 1197, 1217, 1224, 1228,
  8661. 1272, 1276, 1309, 1323, 1347, 1367, 1377, 1404, 1473, 1475, 1486, 1509, 1537, 1544, 1546, 1553,
  8662. 1555, 1576, 1589, 1594, 1600, 1602, 1616, 1625, 1636, 1638, 1665, 1667, 1672, 1685, 1706, 1722,
  8663. 1737, 1755, 1816, 1831, 1850, 1856, 1862, 1874, 1901, 1932, 1950, 1971, 2011, 2032, 2052, 2063,
  8664. 2077, 2079, 2091, 2095, 2172, 2192, 2207, 2208, 2224, 2230, 2247, 2277, 2308, 2345, 2356, 2389,
  8665. 2403, 2424, 2501, 2504, 2506, 2520, 2570, 2593, 2616, 2624, 2630, 2646, 2669, 2700, 2714, 2746,
  8666. 2754, 2795, 2824, 2835, 2839, 2874, 2882, 2905, 2984, 3028, 3042, 3092, 3108, 3110, 3124, 3153,
  8667. 3185, 3215, 3252, 3288, 3294, 3364, 3397, 3434, 3483, 3523, 3537, 3587, 3589, 3591, 3592, 3610,
  8668. 3626, 3670, 3680, 3722, 3749, 3754, 3776, 3789, 3803, 3824, 3857, 3873, 3904, 3906, 3924, 3992,
  8669. };
  8670. static const uint16_t kgrid_512[512] = {
  8671. 0, 1, 2, 5, 7, 8, 9, 10, 12, 14, 16, 17, 21, 27, 32, 34,
  8672. 37, 39, 41, 43, 48, 50, 57, 60, 63, 64, 65, 66, 68, 72, 73, 77,
  8673. 80, 83, 87, 89, 93, 100, 113, 117, 122, 128, 129, 133, 135, 136, 139, 142,
  8674. 145, 149, 152, 156, 162, 165, 167, 169, 171, 184, 187, 195, 201, 205, 208, 210,
  8675. 217, 219, 222, 228, 232, 234, 247, 249, 253, 256, 267, 271, 273, 276, 282, 288,
  8676. 291, 297, 312, 322, 324, 336, 338, 342, 347, 353, 357, 359, 374, 379, 390, 393,
  8677. 395, 409, 426, 441, 448, 450, 452, 464, 466, 470, 475, 488, 492, 512, 513, 514,
  8678. 516, 520, 521, 523, 525, 527, 528, 530, 537, 540, 542, 556, 558, 561, 570, 576,
  8679. 577, 579, 582, 584, 588, 593, 600, 603, 609, 616, 618, 632, 638, 640, 650, 653,
  8680. 655, 656, 660, 666, 672, 675, 685, 688, 698, 705, 708, 711, 712, 715, 721, 727,
  8681. 728, 732, 737, 754, 760, 771, 773, 778, 780, 793, 795, 802, 806, 808, 812, 833,
  8682. 840, 843, 849, 856, 858, 873, 912, 916, 919, 932, 934, 961, 963, 968, 970, 977,
  8683. 989, 993, 1010, 1016, 1024, 1025, 1027, 1029, 1031, 1032, 1034, 1036, 1038, 1041, 1043, 1047,
  8684. 1048, 1050, 1057, 1059, 1061, 1064, 1066, 1079, 1080, 1083, 1085, 1088, 1090, 1096, 1099, 1103,
  8685. 1106, 1109, 1113, 1116, 1122, 1129, 1153, 1156, 1159, 1169, 1171, 1176, 1183, 1185, 1195, 1199,
  8686. 1209, 1212, 1216, 1218, 1221, 1225, 1234, 1236, 1241, 1243, 1250, 1256, 1270, 1281, 1287, 1296,
  8687. 1299, 1306, 1309, 1313, 1338, 1341, 1348, 1353, 1362, 1375, 1376, 1387, 1400, 1408, 1410, 1415,
  8688. 1425, 1453, 1457, 1477, 1481, 1494, 1496, 1507, 1512, 1538, 1545, 1547, 1549, 1551, 1554, 1561,
  8689. 1563, 1565, 1570, 1572, 1575, 1577, 1587, 1593, 1601, 1603, 1605, 1612, 1617, 1619, 1632, 1648,
  8690. 1658, 1662, 1664, 1674, 1680, 1690, 1692, 1704, 1729, 1736, 1740, 1745, 1747, 1751, 1752, 1761,
  8691. 1763, 1767, 1773, 1787, 1795, 1801, 1806, 1810, 1817, 1834, 1840, 1844, 1857, 1864, 1866, 1877,
  8692. 1882, 1892, 1902, 1915, 1934, 1953, 1985, 1987, 2000, 2002, 2013, 2048, 2052, 2058, 2064, 2068,
  8693. 2071, 2074, 2081, 2088, 2104, 2114, 2119, 2121, 2123, 2130, 2136, 2141, 2147, 2153, 2157, 2177,
  8694. 2179, 2184, 2189, 2193, 2203, 2208, 2223, 2226, 2232, 2244, 2249, 2251, 2256, 2258, 2265, 2269,
  8695. 2304, 2306, 2324, 2335, 2336, 2361, 2373, 2375, 2385, 2418, 2443, 2460, 2480, 2504, 2509, 2520,
  8696. 2531, 2537, 2562, 2568, 2572, 2578, 2592, 2596, 2599, 2602, 2614, 2620, 2625, 2627, 2629, 2634,
  8697. 2641, 2650, 2682, 2688, 2697, 2707, 2712, 2718, 2731, 2754, 2759, 2760, 2775, 2788, 2793, 2805,
  8698. 2811, 2817, 2820, 2832, 2842, 2854, 2890, 2902, 2921, 2923, 2978, 3010, 3012, 3026, 3081, 3083,
  8699. 3085, 3097, 3099, 3120, 3136, 3152, 3159, 3188, 3210, 3228, 3234, 3245, 3250, 3256, 3264, 3276,
  8700. 3281, 3296, 3349, 3363, 3378, 3392, 3395, 3420, 3440, 3461, 3488, 3529, 3531, 3584, 3588, 3591,
  8701. 3600, 3602, 3614, 3616, 3628, 3634, 3650, 3657, 3668, 3683, 3685, 3713, 3716, 3720, 3726, 3729,
  8702. 3736, 3753, 3778, 3802, 3805, 3819, 3841, 3845, 3851, 3856, 3880, 3922, 3938, 3970, 3993, 4032,
  8703. };
  8704. const int kmap_size = 4096;
  8705. const int nwant = grid_size == 256 ? 2 : 3;
  8706. const uint16_t * kgrid = grid_size == 256 ? kgrid_256 : kgrid_512;
  8707. uint32_t * kgrid_q3xs;
  8708. int * kmap_q3xs;
  8709. uint16_t * kneighbors_q3xs;
  8710. //printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size);
  8711. uint32_t * the_grid = (uint32_t *)malloc(grid_size*sizeof(uint32_t));
  8712. for (int k = 0; k < grid_size; ++k) {
  8713. int8_t * pos = (int8_t *)(the_grid + k);
  8714. for (int i = 0; i < 4; ++i) {
  8715. int l = (kgrid[k] >> 3*i) & 0x7;
  8716. pos[i] = 2*l + 1;
  8717. }
  8718. }
  8719. kgrid_q3xs = the_grid;
  8720. iq3_data[gindex].grid = the_grid;
  8721. kmap_q3xs = (int *)malloc(kmap_size*sizeof(int));
  8722. iq3_data[gindex].map = kmap_q3xs;
  8723. for (int i = 0; i < kmap_size; ++i) kmap_q3xs[i] = -1;
  8724. uint32_t aux32;
  8725. uint8_t * aux8 = (uint8_t *)&aux32;
  8726. for (int i = 0; i < grid_size; ++i) {
  8727. aux32 = kgrid_q3xs[i];
  8728. uint16_t index = 0;
  8729. for (int k=0; k<4; ++k) {
  8730. uint16_t q = (aux8[k] - 1)/2;
  8731. index |= (q << 3*k);
  8732. }
  8733. kmap_q3xs[index] = i;
  8734. }
  8735. int8_t pos[4];
  8736. int * dist2 = (int *)malloc(2*grid_size*sizeof(int));
  8737. int num_neighbors = 0, num_not_in_map = 0;
  8738. for (int i = 0; i < kmap_size; ++i) {
  8739. if (kmap_q3xs[i] >= 0) continue;
  8740. ++num_not_in_map;
  8741. for (int k = 0; k < 4; ++k) {
  8742. int l = (i >> 3*k) & 0x7;
  8743. pos[k] = 2*l + 1;
  8744. }
  8745. for (int j = 0; j < grid_size; ++j) {
  8746. const int8_t * pg = (const int8_t *)(kgrid_q3xs + j);
  8747. int d2 = 0;
  8748. for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  8749. dist2[2*j+0] = d2;
  8750. dist2[2*j+1] = j;
  8751. }
  8752. qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func);
  8753. int n = 0; int d2 = dist2[0];
  8754. int nhave = 1;
  8755. for (int j = 0; j < grid_size; ++j) {
  8756. if (dist2[2*j] > d2) {
  8757. if (nhave == nwant) break;
  8758. d2 = dist2[2*j];
  8759. ++nhave;
  8760. }
  8761. ++n;
  8762. }
  8763. num_neighbors += n;
  8764. }
  8765. //printf("%s: %d neighbours in total\n", __func__, num_neighbors);
  8766. kneighbors_q3xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t));
  8767. iq3_data[gindex].neighbours = kneighbors_q3xs;
  8768. int counter = 0;
  8769. for (int i = 0; i < kmap_size; ++i) {
  8770. if (kmap_q3xs[i] >= 0) continue;
  8771. for (int k = 0; k < 4; ++k) {
  8772. int l = (i >> 3*k) & 0x7;
  8773. pos[k] = 2*l + 1;
  8774. }
  8775. for (int j = 0; j < grid_size; ++j) {
  8776. const int8_t * pg = (const int8_t *)(kgrid_q3xs + j);
  8777. int d2 = 0;
  8778. for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  8779. dist2[2*j+0] = d2;
  8780. dist2[2*j+1] = j;
  8781. }
  8782. qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func);
  8783. kmap_q3xs[i] = -(counter + 1);
  8784. int d2 = dist2[0];
  8785. uint16_t * start = &kneighbors_q3xs[counter++];
  8786. int n = 0, nhave = 1;
  8787. for (int j = 0; j < grid_size; ++j) {
  8788. if (dist2[2*j] > d2) {
  8789. if (nhave == nwant) break;
  8790. d2 = dist2[2*j];
  8791. ++nhave;
  8792. }
  8793. kneighbors_q3xs[counter++] = dist2[2*j+1];
  8794. ++n;
  8795. }
  8796. *start = n;
  8797. }
  8798. free(dist2);
  8799. }
  8800. void iq3xs_free_impl(int grid_size) {
  8801. GGML_ASSERT(grid_size == 256 || grid_size == 512);
  8802. const int gindex = iq3_data_index(grid_size);
  8803. if (iq3_data[gindex].grid) {
  8804. free(iq3_data[gindex].grid); iq3_data[gindex].grid = NULL;
  8805. free(iq3_data[gindex].map); iq3_data[gindex].map = NULL;
  8806. free(iq3_data[gindex].neighbours); iq3_data[gindex].neighbours = NULL;
  8807. }
  8808. }
  8809. static int iq3_find_best_neighbour(const uint16_t * restrict neighbours, const uint32_t * restrict grid,
  8810. const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) {
  8811. int num_neighbors = neighbours[0];
  8812. GGML_ASSERT(num_neighbors > 0);
  8813. float best_d2 = FLT_MAX;
  8814. int grid_index = -1;
  8815. for (int j = 1; j <= num_neighbors; ++j) {
  8816. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  8817. float d2 = 0;
  8818. for (int i = 0; i < 4; ++i) {
  8819. float q = pg[i];
  8820. float diff = scale*q - xval[i];
  8821. d2 += weight[i]*diff*diff;
  8822. }
  8823. if (d2 < best_d2) {
  8824. best_d2 = d2; grid_index = neighbours[j];
  8825. }
  8826. }
  8827. GGML_ASSERT(grid_index >= 0);
  8828. const int8_t * pg = (const int8_t *)(grid + grid_index);
  8829. for (int i = 0; i < 4; ++i) L[i] = (pg[i] - 1)/2;
  8830. return grid_index;
  8831. }
  8832. static void quantize_row_iq3_xxs_impl(int grid_size, const float * restrict x, void * restrict vy, int n,
  8833. const float * restrict quant_weights) {
  8834. const int gindex = iq3_data_index(grid_size);
  8835. const uint32_t * kgrid_q3xs = iq3_data[gindex].grid;
  8836. const int * kmap_q3xs = iq3_data[gindex].map;
  8837. const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours;
  8838. //GGML_ASSERT(quant_weights && "missing quantization weights");
  8839. GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?");
  8840. GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?");
  8841. GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?");
  8842. GGML_ASSERT(n%QK_K == 0);
  8843. const int kMaxQ = 8;
  8844. const int nbl = n/QK_K;
  8845. ggml_fp16_t * dh;
  8846. uint8_t * qs;
  8847. int block_size;
  8848. if (grid_size == 256) {
  8849. block_iq3_xxs * y = vy;
  8850. dh = &y->d;
  8851. qs = y->qs;
  8852. block_size = sizeof(block_iq3_xxs);
  8853. } else {
  8854. block_iq3_s * y = vy;
  8855. dh = &y->d;
  8856. qs = y->qs;
  8857. block_size = sizeof(block_iq3_s);
  8858. }
  8859. int quant_size = block_size - sizeof(ggml_fp16_t);
  8860. float scales[QK_K/32];
  8861. float weight[32];
  8862. float xval[32];
  8863. int8_t L[32];
  8864. int8_t Laux[32];
  8865. float waux[32];
  8866. bool is_on_grid[8];
  8867. bool is_on_grid_aux[8];
  8868. uint8_t block_signs[8];
  8869. uint8_t q3[3*(QK_K/8)+QK_K/32];
  8870. uint32_t * scales_and_signs = (uint32_t *)(q3 + QK_K/4);
  8871. uint8_t * qh = q3 + 3*(QK_K/8);
  8872. for (int ibl = 0; ibl < nbl; ++ibl) {
  8873. dh[0] = GGML_FP32_TO_FP16(0.f);
  8874. memset(q3, 0, 3*QK_K/8+QK_K/32);
  8875. float max_scale = 0;
  8876. const float * xbl = x + QK_K*ibl;
  8877. float sumx2 = 0;
  8878. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  8879. float sigma2 = 2*sumx2/QK_K;
  8880. for (int ib = 0; ib < QK_K/32; ++ib) {
  8881. const float * xb = xbl + 32*ib;
  8882. if (quant_weights) {
  8883. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  8884. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  8885. } else {
  8886. for (int i = 0; i < 32; ++i) weight[i] = xb[i]*xb[i];
  8887. }
  8888. for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
  8889. for (int k = 0; k < 4; ++k) {
  8890. int nflip = 0;
  8891. uint8_t s = 0;
  8892. for (int i = 0; i < 8; ++i) {
  8893. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  8894. else {
  8895. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  8896. }
  8897. }
  8898. if (nflip%2) {
  8899. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  8900. for (int i = 1; i < 8; ++i) {
  8901. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  8902. if (ax < min) {
  8903. min = ax; imin = i;
  8904. }
  8905. }
  8906. xval[8*k+imin] = -xval[8*k+imin];
  8907. s ^= (1 << imin);
  8908. }
  8909. block_signs[k] = s & 127;
  8910. }
  8911. float max = xval[0];
  8912. for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]);
  8913. if (!max) {
  8914. scales[ib] = 0;
  8915. memset(L, 0, 32);
  8916. continue;
  8917. }
  8918. float best = 0;
  8919. float scale = max/(2*kMaxQ-1);
  8920. for (int is = -15; is <= 15; ++is) {
  8921. float id = (2*kMaxQ-1+is*0.2f)/max;
  8922. float this_scale = 1/id;
  8923. for (int k = 0; k < 8; ++k) {
  8924. for (int i = 0; i < 4; ++i) {
  8925. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  8926. Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l));
  8927. }
  8928. uint16_t u = 0;
  8929. for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i);
  8930. int grid_index = kmap_q3xs[u];
  8931. is_on_grid_aux[k] = true;
  8932. if (grid_index < 0) {
  8933. is_on_grid_aux[k] = false;
  8934. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  8935. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k);
  8936. }
  8937. }
  8938. float sumqx = 0, sumq2 = 0;
  8939. for (int i = 0; i < 32; ++i) {
  8940. float w = weight[i];
  8941. float q = 2*Laux[i] + 1;
  8942. sumqx += w*xval[i]*q;
  8943. sumq2 += w*q*q;
  8944. }
  8945. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  8946. scale = sumqx/sumq2; best = scale*sumqx;
  8947. for (int i = 0; i < 32; ++i) L[i] = Laux[i];
  8948. for (int k = 0; k < 8; ++k) is_on_grid[k] = is_on_grid_aux[k];
  8949. }
  8950. }
  8951. int n_not_ongrid = 0;
  8952. for (int k = 0; k < 8; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  8953. if (n_not_ongrid > 0 && scale > 0) {
  8954. float id = 1/scale;
  8955. for (int k = 0; k < 8; ++k) {
  8956. if (is_on_grid[k]) continue;
  8957. uint16_t u = 0;
  8958. for (int i = 0; i < 4; ++i) {
  8959. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  8960. l = MAX(0, MIN(kMaxQ-1, l));
  8961. u |= (l << 3*i);
  8962. }
  8963. int grid_index = kmap_q3xs[u];
  8964. if (grid_index < 0) {
  8965. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  8966. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k);
  8967. }
  8968. const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index);
  8969. for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2;
  8970. }
  8971. float sumqx = 0, sumq2 = 0;
  8972. for (int i = 0; i < 32; ++i) {
  8973. float w = weight[i];
  8974. float q = 2*L[i] + 1;
  8975. sumqx += w*xval[i]*q;
  8976. sumq2 += w*q*q;
  8977. }
  8978. if (sumq2 > 0) scale = sumqx/sumq2;
  8979. }
  8980. if (scale < 0) {
  8981. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  8982. // and correspondingly flip quant signs.
  8983. scale = -scale;
  8984. for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127;
  8985. }
  8986. for (int k = 0; k < 8; ++k) {
  8987. uint16_t u = 0;
  8988. for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i);
  8989. int grid_index = kmap_q3xs[u];
  8990. if (grid_index < 0) {
  8991. printf("Oops: found point %u not on grid:", u);
  8992. for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]);
  8993. printf("\n");
  8994. GGML_ASSERT(false);
  8995. }
  8996. if (grid_size == 256) {
  8997. q3[8*ib+k] = grid_index;
  8998. } else {
  8999. q3[8*ib+k] = grid_index & 255;
  9000. qh[ib] |= ((grid_index >> 8) << k);
  9001. }
  9002. }
  9003. scales_and_signs[ib] = block_signs[0] | (block_signs[1] << 7) | (block_signs[2] << 14) | (block_signs[3] << 21);
  9004. GGML_ASSERT(scale >= 0);
  9005. scales[ib] = scale;
  9006. max_scale = MAX(max_scale, scale);
  9007. }
  9008. if (!max_scale) {
  9009. memset(qs, 0, quant_size);
  9010. dh += block_size/sizeof(ggml_fp16_t);
  9011. qs += block_size;
  9012. continue;
  9013. }
  9014. float d = max_scale/31;
  9015. dh[0] = GGML_FP32_TO_FP16(d * 1.0125f); // small improvement via this fudge factor
  9016. float id = 1/d;
  9017. for (int ib = 0; ib < QK_K/32; ++ib) {
  9018. int l = nearest_int(0.5f*(id*scales[ib]-1));
  9019. l = MAX(0, MIN(15, l));
  9020. scales_and_signs[ib] |= ((uint32_t)l << 28);
  9021. }
  9022. memcpy(qs, q3, quant_size);
  9023. dh += block_size/sizeof(ggml_fp16_t);
  9024. qs += block_size;
  9025. }
  9026. }
  9027. size_t quantize_iq3_xxs(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  9028. GGML_ASSERT(n_per_row%QK_K == 0);
  9029. int nblock = n_per_row/QK_K;
  9030. char * qrow = (char *)dst;
  9031. for (int row = 0; row < nrow; ++row) {
  9032. quantize_row_iq3_xxs_impl(256, src, qrow, n_per_row, quant_weights);
  9033. src += n_per_row;
  9034. qrow += nblock*sizeof(block_iq3_xxs);
  9035. }
  9036. return nrow * nblock * sizeof(block_iq3_xxs);
  9037. }
  9038. void quantize_row_iq3_xxs(const float * restrict x, void * restrict vy, int k) {
  9039. assert(k % QK_K == 0);
  9040. block_iq3_xxs * restrict y = vy;
  9041. quantize_row_iq3_xxs_reference(x, y, k);
  9042. }
  9043. void quantize_row_iq3_xxs_reference(const float * restrict x, block_iq3_xxs * restrict y, int k) {
  9044. assert(k % QK_K == 0);
  9045. quantize_row_iq3_xxs_impl(256, x, y, k, NULL);
  9046. }
  9047. static void quantize_row_iq3_s_impl(int block_size, const float * restrict x, void * restrict vy, int n,
  9048. const float * restrict quant_weights,
  9049. float * scales,
  9050. float * weight,
  9051. float * xval,
  9052. int8_t * L,
  9053. int8_t * Laux,
  9054. float * waux,
  9055. bool * is_on_grid,
  9056. bool * is_on_grid_aux,
  9057. uint8_t * block_signs) {
  9058. const int gindex = iq3_data_index(512);
  9059. const uint32_t * kgrid_q3xs = iq3_data[gindex].grid;
  9060. const int * kmap_q3xs = iq3_data[gindex].map;
  9061. const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours;
  9062. //GGML_ASSERT(quant_weights && "missing quantization weights");
  9063. GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?");
  9064. GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?");
  9065. GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?");
  9066. GGML_ASSERT(n%QK_K == 0);
  9067. const int kMaxQ = 8;
  9068. const int nbl = n/QK_K;
  9069. block_iq3_s * y = vy;
  9070. const int bs4 = block_size/4;
  9071. const int bs8 = block_size/8;
  9072. for (int ibl = 0; ibl < nbl; ++ibl) {
  9073. memset(&y[ibl], 0, sizeof(block_iq3_s));
  9074. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  9075. uint8_t * qs = y[ibl].qs;
  9076. uint8_t * qh = y[ibl].qh;
  9077. uint8_t * signs = y[ibl].signs;
  9078. float max_scale = 0;
  9079. const float * xbl = x + QK_K*ibl;
  9080. float sumx2 = 0;
  9081. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  9082. float sigma2 = 2*sumx2/QK_K;
  9083. for (int ib = 0; ib < QK_K/block_size; ++ib) {
  9084. const float * xb = xbl + block_size*ib;
  9085. if (quant_weights) {
  9086. const float * qw = quant_weights + QK_K*ibl + block_size*ib;
  9087. for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  9088. } else {
  9089. for (int i = 0; i < block_size; ++i) weight[i] = xb[i]*xb[i];
  9090. }
  9091. for (int i = 0; i < block_size; ++i) waux[i] = sqrtf(weight[i]);
  9092. for (int k = 0; k < bs8; ++k) {
  9093. uint8_t s = 0;
  9094. for (int i = 0; i < 8; ++i) {
  9095. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  9096. else {
  9097. xval[8*k + i] = -xb[8*k + i]; s |= (1 << i);
  9098. }
  9099. }
  9100. block_signs[k] = s;
  9101. }
  9102. float max = xval[0];
  9103. for (int i = 1; i < block_size; ++i) max = MAX(max, xval[i]);
  9104. if (!max) {
  9105. scales[ib] = 0;
  9106. continue;
  9107. }
  9108. float best = 0;
  9109. float scale = max/(2*kMaxQ-1);
  9110. for (int k = 0; k < bs4; ++k) is_on_grid[k] = false;
  9111. for (int is = -9; is <= 9; ++is) {
  9112. float id = (2*kMaxQ-1+is*0.2f)/max;
  9113. float this_scale = 1/id;
  9114. for (int k = 0; k < bs4; ++k) {
  9115. for (int i = 0; i < 4; ++i) {
  9116. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  9117. Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l));
  9118. }
  9119. uint16_t u = 0;
  9120. for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i);
  9121. int grid_index = kmap_q3xs[u];
  9122. is_on_grid_aux[k] = true;
  9123. if (grid_index < 0) {
  9124. is_on_grid_aux[k] = false;
  9125. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  9126. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k);
  9127. }
  9128. }
  9129. float sumqx = 0, sumq2 = 0;
  9130. for (int i = 0; i < block_size; ++i) {
  9131. float w = weight[i];
  9132. float q = 2*Laux[i] + 1;
  9133. sumqx += w*xval[i]*q;
  9134. sumq2 += w*q*q;
  9135. }
  9136. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  9137. scale = sumqx/sumq2; best = scale*sumqx;
  9138. for (int i = 0; i < block_size; ++i) L[i] = Laux[i];
  9139. for (int k = 0; k < bs4; ++k) is_on_grid[k] = is_on_grid_aux[k];
  9140. }
  9141. }
  9142. int n_not_ongrid = 0;
  9143. for (int k = 0; k < bs4; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  9144. if (n_not_ongrid > 0 && scale > 0) {
  9145. float id = 1/scale;
  9146. for (int k = 0; k < bs4; ++k) {
  9147. //if (is_on_grid[k]) continue;
  9148. uint16_t u = 0;
  9149. for (int i = 0; i < 4; ++i) {
  9150. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  9151. l = MAX(0, MIN(kMaxQ-1, l));
  9152. u |= (l << 3*i);
  9153. }
  9154. int grid_index = kmap_q3xs[u];
  9155. if (grid_index < 0) {
  9156. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  9157. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k);
  9158. }
  9159. const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index);
  9160. for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2;
  9161. }
  9162. float sumqx = 0, sumq2 = 0;
  9163. for (int i = 0; i < block_size; ++i) {
  9164. float w = weight[i];
  9165. float q = 2*L[i] + 1;
  9166. sumqx += w*xval[i]*q;
  9167. sumq2 += w*q*q;
  9168. }
  9169. if (sumq2 > 0) scale = sumqx/sumq2;
  9170. }
  9171. if (scale < 0) {
  9172. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  9173. // and correspondingly flip quant signs.
  9174. scale = -scale;
  9175. for (int k = 0; k < bs8; ++k) block_signs[k] = ~block_signs[k];
  9176. }
  9177. for (int k = 0; k < bs4; ++k) {
  9178. uint16_t u = 0;
  9179. for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i);
  9180. int grid_index = kmap_q3xs[u];
  9181. if (grid_index < 0) {
  9182. printf("Oops: found point %u not on grid:", u);
  9183. for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]);
  9184. printf("\n");
  9185. GGML_ASSERT(false);
  9186. }
  9187. qs[k] = grid_index & 255;
  9188. qh[(ib*bs4+k)/8] |= ((grid_index >> 8) << ((ib*bs4+k)%8));
  9189. }
  9190. qs += bs4;
  9191. for (int k = 0; k < bs8; ++k) signs[k] = block_signs[k];
  9192. signs += bs8;
  9193. GGML_ASSERT(scale >= 0);
  9194. scales[ib] = scale;
  9195. max_scale = MAX(max_scale, scale);
  9196. }
  9197. if (!max_scale) {
  9198. continue;
  9199. }
  9200. float d = max_scale/31;
  9201. y[ibl].d = GGML_FP32_TO_FP16(d * 1.033f);
  9202. float id = 1/d;
  9203. for (int ib = 0; ib < QK_K/block_size; ib += 2) {
  9204. int l1 = nearest_int(0.5f*(id*scales[ib+0]-1));
  9205. l1 = MAX(0, MIN(15, l1));
  9206. int l2 = nearest_int(0.5f*(id*scales[ib+1]-1));
  9207. l2 = MAX(0, MIN(15, l2));
  9208. y[ibl].scales[ib/2] = l1 | (l2 << 4);
  9209. }
  9210. }
  9211. }
  9212. #define IQ3S_BLOCK_SIZE 32
  9213. size_t quantize_iq3_s(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  9214. GGML_ASSERT(n_per_row%QK_K == 0);
  9215. int nblock = n_per_row/QK_K;
  9216. float scales[QK_K/IQ3S_BLOCK_SIZE];
  9217. float weight[IQ3S_BLOCK_SIZE];
  9218. float xval[IQ3S_BLOCK_SIZE];
  9219. int8_t L[IQ3S_BLOCK_SIZE];
  9220. int8_t Laux[IQ3S_BLOCK_SIZE];
  9221. float waux[IQ3S_BLOCK_SIZE];
  9222. bool is_on_grid[IQ3S_BLOCK_SIZE/4];
  9223. bool is_on_grid_aux[IQ3S_BLOCK_SIZE/4];
  9224. uint8_t block_signs[IQ3S_BLOCK_SIZE/8];
  9225. char * qrow = (char *)dst;
  9226. for (int row = 0; row < nrow; ++row) {
  9227. quantize_row_iq3_s_impl(IQ3S_BLOCK_SIZE, src, qrow, n_per_row, quant_weights,
  9228. scales, weight, xval, L, Laux, waux, is_on_grid, is_on_grid_aux, block_signs);
  9229. src += n_per_row;
  9230. qrow += nblock*sizeof(block_iq3_s);
  9231. }
  9232. return nrow * nblock * sizeof(block_iq3_s);
  9233. }
  9234. void quantize_row_iq3_s(const float * restrict x, void * restrict vy, int k) {
  9235. assert(k % QK_K == 0);
  9236. block_iq3_s * restrict y = vy;
  9237. quantize_row_iq3_s_reference(x, y, k);
  9238. }
  9239. void quantize_row_iq3_s_reference(const float * restrict x, block_iq3_s * restrict y, int k) {
  9240. assert(k % QK_K == 0);
  9241. quantize_iq3_s(x, y, 1, k, NULL);
  9242. }
  9243. // =================================== 1.5 bpw ===================================================
  9244. static int iq1_find_best_neighbour(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
  9245. const float * restrict xval, const float * restrict weight, float * scale, int8_t * restrict L, int ngrid) {
  9246. int num_neighbors = neighbours[0];
  9247. GGML_ASSERT(num_neighbors > 0);
  9248. float best_score = 0;
  9249. int grid_index = -1;
  9250. for (int j = 1; j <= num_neighbors; ++j) {
  9251. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  9252. float sumqx = 0, sumq2 = 0;
  9253. for (int i = 0; i < 8; ++i) {
  9254. float q = (pg[i] - 3)/2;
  9255. float w = weight[i];
  9256. sumqx += w*q*xval[i];
  9257. sumq2 += w*q*q;
  9258. }
  9259. if (sumqx > 0 && sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  9260. *scale = sumqx/sumq2; best_score = *scale * sumqx;
  9261. grid_index = neighbours[j];
  9262. }
  9263. }
  9264. if (grid_index < 0) {
  9265. for (int i = 0; i < ngrid; ++i) {
  9266. const int8_t * grid_i = (const int8_t *)(grid + i);
  9267. float sumqx = 0, sumq2 = 0;
  9268. for (int j = 0; j < 8; ++j) {
  9269. float w = weight[j];
  9270. float q = (grid_i[j] - 3)/2;
  9271. sumqx += w*q*xval[j];
  9272. sumq2 += w*q*q;
  9273. }
  9274. if (sumqx > 0 && sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  9275. *scale = sumqx/sumq2; best_score = *scale*sumqx;
  9276. grid_index = i;
  9277. }
  9278. }
  9279. }
  9280. if (grid_index < 0) {
  9281. printf("Oops, did not find grid point\n");
  9282. printf("Have %d neighbours\n", num_neighbors);
  9283. for (int j = 1; j <= num_neighbors; ++j) {
  9284. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  9285. float sumqx = 0, sumq2 = 0;
  9286. for (int i = 0; i < 8; ++i) {
  9287. float q = (pg[i] - 3)/2;
  9288. float w = weight[i];
  9289. sumqx += w*q*xval[i];
  9290. sumq2 += w*q*q;
  9291. }
  9292. printf(" neighbour %d: sumqx = %g sumq2 = %g\n", j, (double)sumqx, (double)sumq2);
  9293. }
  9294. }
  9295. GGML_ASSERT(grid_index >= 0);
  9296. //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  9297. *scale *= 1.05f; // This is a fudge factor. Don't ask me why it improves the result.
  9298. //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  9299. const int8_t * pg = (const int8_t *)(grid + grid_index);
  9300. for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
  9301. return grid_index;
  9302. }
  9303. static int iq1_find_best_neighbour2(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
  9304. const float * restrict xval, const float * restrict weight, float scale, const float * restrict xg, int8_t * restrict L, int ngrid) {
  9305. int num_neighbors = neighbours[0];
  9306. GGML_ASSERT(num_neighbors > 0);
  9307. float best_score = FLT_MAX;
  9308. int grid_index = -1;
  9309. for (int j = 1; j <= num_neighbors; ++j) {
  9310. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  9311. float d2 = 0;
  9312. for (int i = 0; i < 8; ++i) {
  9313. float q = xg[(pg[i] - 1)/2];
  9314. float w = weight[i];
  9315. float diff = scale*q - xval[i];
  9316. d2 += w*diff*diff;
  9317. }
  9318. if (d2 < best_score) {
  9319. best_score = d2;
  9320. grid_index = neighbours[j];
  9321. }
  9322. }
  9323. if (grid_index < 0) {
  9324. for (int i = 0; i < ngrid; ++i) {
  9325. const int8_t * grid_i = (const int8_t *)(grid + i);
  9326. float d2 = 0;
  9327. for (int j = 0; j < 8; ++j) {
  9328. float w = weight[j];
  9329. float q = xg[(grid_i[j] - 1)/2];
  9330. float diff = scale*q - xval[i];
  9331. d2 += w*diff*diff;
  9332. }
  9333. if (d2 < best_score) {
  9334. best_score = d2;
  9335. grid_index = i;
  9336. }
  9337. }
  9338. }
  9339. if (grid_index < 0) {
  9340. printf("Oops, did not find grid point\n");
  9341. printf("Have %d neighbours\n", num_neighbors);
  9342. for (int j = 1; j <= num_neighbors; ++j) {
  9343. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  9344. float sumqx = 0, sumq2 = 0;
  9345. for (int i = 0; i < 8; ++i) {
  9346. float q = xg[(pg[i] - 1)/2];
  9347. float w = weight[i];
  9348. sumqx += w*q*xval[i];
  9349. sumq2 += w*q*q;
  9350. }
  9351. printf(" neighbour %d: sumqx = %g sumq2 = %g\n", j, (double)sumqx, (double)sumq2);
  9352. }
  9353. }
  9354. GGML_ASSERT(grid_index >= 0);
  9355. const int8_t * pg = (const int8_t *)(grid + grid_index);
  9356. for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
  9357. return grid_index;
  9358. }
  9359. static int iq1_sort_helper(const void * left, const void * right) {
  9360. const float * l = left;
  9361. const float * r = right;
  9362. return *l < *r ? -1 : *l > *r ? 1 : 0;
  9363. }
  9364. #define IQ1S_BLOCK_SIZE 32
  9365. static void quantize_row_iq1_s_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
  9366. const int gindex = iq2_data_index(GGML_TYPE_IQ1_S);
  9367. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  9368. const int * kmap_q2xs = iq2_data[gindex].map;
  9369. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  9370. GGML_ASSERT(quant_weights && "missing quantization weights");
  9371. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  9372. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  9373. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  9374. GGML_ASSERT(n%QK_K == 0);
  9375. const int nbl = n/QK_K;
  9376. block_iq1_s * y = vy;
  9377. const float x_p[3] = {-1 + IQ1S_DELTA, IQ1S_DELTA, 1 + IQ1S_DELTA};
  9378. const float x_m[3] = {-1 - IQ1S_DELTA, -IQ1S_DELTA, 1 - IQ1S_DELTA};
  9379. float scales[QK_K/IQ1S_BLOCK_SIZE];
  9380. float weight[IQ1S_BLOCK_SIZE];
  9381. int8_t L[IQ1S_BLOCK_SIZE];
  9382. float sumx[IQ1S_BLOCK_SIZE+1];
  9383. float sumw[IQ1S_BLOCK_SIZE+1];
  9384. float pairs[2*IQ1S_BLOCK_SIZE];
  9385. int * idx = (int *)(pairs + 1);
  9386. uint16_t index[IQ1S_BLOCK_SIZE/8];
  9387. int8_t shifts[QK_K/IQ1S_BLOCK_SIZE];
  9388. for (int ibl = 0; ibl < nbl; ++ibl) {
  9389. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  9390. memset(y[ibl].qs, 0, QK_K/8);
  9391. memset(y[ibl].qh, 0, QK_K/16);
  9392. float max_scale = 0;
  9393. const float * xbl = x + QK_K*ibl;
  9394. float sumx2 = 0;
  9395. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  9396. float sigma2 = 2*sumx2/QK_K;
  9397. for (int ib = 0; ib < QK_K/IQ1S_BLOCK_SIZE; ++ib) {
  9398. const float * xb = xbl + IQ1S_BLOCK_SIZE*ib;
  9399. const float * qw = quant_weights + QK_K*ibl + IQ1S_BLOCK_SIZE*ib;
  9400. for (int i = 0; i < IQ1S_BLOCK_SIZE; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  9401. float max = fabsf(xb[0]);
  9402. for (int i = 1; i < IQ1S_BLOCK_SIZE; ++i) max = MAX(max, fabsf(xb[i]));
  9403. if (!max) {
  9404. scales[ib] = 0;
  9405. memset(L, 1, IQ1S_BLOCK_SIZE);
  9406. continue;
  9407. }
  9408. // Here we solve exactly the sum of squared difference (SSD) weighted minimization problem.
  9409. // With just 3 allowed quant values (-1, 0, 1), we can search exhaustively for the two
  9410. // boundaries that split the weights xb[i] into 3 groups. To do so, we sort the weights
  9411. // in ascending order, compute Si = sum[weight[j] xb[j], j = 0...i] and
  9412. // Wi = sum[weight[j], j = 0...i], and use these to quckly get get the optimum scale
  9413. // for each possible and score for each split.
  9414. for (int j = 0; j < IQ1S_BLOCK_SIZE; ++j) {
  9415. pairs[2*j] = xb[j];
  9416. idx[2*j] = j;
  9417. }
  9418. qsort(pairs, IQ1S_BLOCK_SIZE, 2*sizeof(float), iq1_sort_helper);
  9419. {
  9420. sumx[0] = sumw[0] = 0;
  9421. for (int j = 0; j < IQ1S_BLOCK_SIZE; ++j) {
  9422. int i = idx[2*j];
  9423. sumx[j+1] = sumx[j] + weight[i]*xb[i];
  9424. sumw[j+1] = sumw[j] + weight[i];
  9425. }
  9426. }
  9427. float best_score = 0, scale = max;
  9428. int besti1 = -1, besti2 = -1, best_shift = 0;
  9429. for (int i1 = 0; i1 <= IQ1S_BLOCK_SIZE; ++i1) {
  9430. for (int i2 = i1; i2 <= IQ1S_BLOCK_SIZE; ++i2) {
  9431. float sumqx = (sumx[i1] - sumx[0])*x_p[0] + (sumx[i2] - sumx[i1])*x_p[1] + (sumx[IQ1S_BLOCK_SIZE] - sumx[i2])*x_p[2];
  9432. float sumq2 = (sumw[i1] - sumw[0])*x_p[0]*x_p[0] + (sumw[i2] - sumw[i1])*x_p[1]*x_p[1] + (sumw[IQ1S_BLOCK_SIZE] - sumw[i2])*x_p[2]*x_p[2];
  9433. if (sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  9434. scale = sumqx/sumq2; best_score = scale*sumqx;
  9435. besti1 = i1; besti2 = i2; best_shift = 1;
  9436. }
  9437. sumqx = (sumx[i1] - sumx[0])*x_m[0] + (sumx[i2] - sumx[i1])*x_m[1] + (sumx[IQ1S_BLOCK_SIZE] - sumx[i2])*x_m[2];
  9438. sumq2 = (sumw[i1] - sumw[0])*x_m[0]*x_m[0] + (sumw[i2] - sumw[i1])*x_m[1]*x_m[1] + (sumw[IQ1S_BLOCK_SIZE] - sumw[i2])*x_m[2]*x_m[2];
  9439. if (sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  9440. scale = sumqx/sumq2; best_score = scale*sumqx;
  9441. besti1 = i1; besti2 = i2; best_shift = -1;
  9442. }
  9443. }
  9444. }
  9445. GGML_ASSERT(besti1 >= 0 && besti2 >= 0 && best_shift != 0);
  9446. for (int j = 0; j < besti1; ++j) L[idx[2*j]] = 0;
  9447. for (int j = besti1; j < besti2; ++j) L[idx[2*j]] = 1;
  9448. for (int j = besti2; j < IQ1S_BLOCK_SIZE; ++j) L[idx[2*j]] = 2;
  9449. if (scale < 0) {
  9450. for (int j = 0; j < IQ1S_BLOCK_SIZE; ++j) L[j] = 2 - L[j];
  9451. scale = -scale; best_shift = -best_shift;
  9452. }
  9453. bool all_on_grid = true;
  9454. const float * xx = best_shift == 1 ? x_p : x_m;
  9455. for (int k = 0; k < IQ1S_BLOCK_SIZE/8; ++k) {
  9456. uint16_t u = 0;
  9457. for (int j = 0; j < 8; ++j) u |= (L[8*k+j] << 2*j);
  9458. int grid_index = kmap_q2xs[u];
  9459. if (grid_index < 0) {
  9460. all_on_grid = false;
  9461. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  9462. grid_index = iq1_find_best_neighbour2(neighbours, kgrid_q2xs, xb + 8*k, weight + 8*k, scale, xx, L + 8*k, NGRID_IQ1S);
  9463. GGML_ASSERT(grid_index >= 0);
  9464. }
  9465. index[k] = grid_index;
  9466. }
  9467. if (!all_on_grid) {
  9468. float sumqx = 0, sumq2 = 0;
  9469. for (int k = 0; k < IQ1S_BLOCK_SIZE/8; ++k) {
  9470. const int8_t * pg = (const int8_t *)(kgrid_q2xs + index[k]);
  9471. for (int j = 0; j < 8; ++j) {
  9472. float w = weight[8*k + j];
  9473. float q = xx[(pg[j] - 1)/2];
  9474. sumqx += w*q*xb[8*k+j];
  9475. sumq2 += w*q*q;
  9476. }
  9477. }
  9478. if (sumqx > 0 && sumq2 > 0) scale = sumqx/sumq2;
  9479. }
  9480. uint16_t h = 0;
  9481. for (int k = 0; k < IQ1S_BLOCK_SIZE/8; ++k) {
  9482. y[ibl].qs[(IQ1S_BLOCK_SIZE/8)*ib + k] = index[k] & 255;
  9483. h |= (index[k] >> 8) << 3*k;
  9484. }
  9485. y[ibl].qh[ib] = h;
  9486. GGML_ASSERT(scale >= 0);
  9487. scales[ib] = scale;
  9488. shifts[ib] = best_shift;
  9489. max_scale = MAX(max_scale, scale);
  9490. }
  9491. if (!max_scale) {
  9492. memset(y[ibl].qs, 0, QK_K/8);
  9493. continue;
  9494. }
  9495. float d = max_scale/15;
  9496. y[ibl].d = GGML_FP32_TO_FP16(d*1.125f); // 1.085f is another fudge factor. Don't ask me why it is needed.
  9497. float id = 1/d;
  9498. for (int ib = 0; ib < QK_K/IQ1S_BLOCK_SIZE; ++ib) {
  9499. int l = nearest_int(0.5f*(id*scales[ib]-1));
  9500. l = MAX(0, MIN(7, l));
  9501. if (shifts[ib] == -1) l |= 8;
  9502. y[ibl].qh[ib] |= (l << 12);
  9503. }
  9504. }
  9505. }
  9506. size_t quantize_iq1_s(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  9507. GGML_ASSERT(n_per_row%QK_K == 0);
  9508. int nblock = n_per_row/QK_K;
  9509. char * qrow = (char *)dst;
  9510. for (int row = 0; row < nrow; ++row) {
  9511. quantize_row_iq1_s_impl(src, qrow, n_per_row, quant_weights);
  9512. src += n_per_row;
  9513. qrow += nblock*sizeof(block_iq1_s);
  9514. }
  9515. return nrow * nblock * sizeof(block_iq1_s);
  9516. }
  9517. // ============================ 4-bit non-linear quants
  9518. static inline int best_index_int8(int n, const int8_t * val, float x) {
  9519. if (x <= val[0]) return 0;
  9520. if (x >= val[n-1]) return n-1;
  9521. int ml = 0, mu = n-1;
  9522. while (mu-ml > 1) {
  9523. int mav = (ml+mu)/2;
  9524. if (x < val[mav]) mu = mav; else ml = mav;
  9525. }
  9526. return x - val[mu-1] < val[mu] - x ? mu-1 : mu;
  9527. }
  9528. static void quantize_row_iq4_nl_impl(const int super_block_size, const int block_size, const float * restrict x,
  9529. ggml_fp16_t * dh, uint8_t * q4, uint16_t * scales_h, uint8_t * scales_l,
  9530. float * scales, float * weight, uint8_t * L,
  9531. const int8_t * values,
  9532. const float * quant_weights) {
  9533. const int ntry = 7;
  9534. float sigma2 = 0;
  9535. for (int j = 0; j < super_block_size; ++j) sigma2 += x[j]*x[j];
  9536. sigma2 *= 2.f/super_block_size;
  9537. memset(q4, 0, super_block_size/2);
  9538. dh[0] = GGML_FP32_TO_FP16(0.f);
  9539. float max_scale = 0, amax_scale = 0;
  9540. for (int ib = 0; ib < super_block_size/block_size; ++ib) {
  9541. const float * xb = x + ib*block_size;
  9542. if (quant_weights) {
  9543. const float * qw = quant_weights + ib*block_size;
  9544. for (int j = 0; j < block_size; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  9545. } else {
  9546. for (int j = 0; j < block_size; ++j) weight[j] = xb[j]*xb[j];
  9547. }
  9548. float amax = 0, max = 0;
  9549. for (int j = 0; j < block_size; ++j) {
  9550. float ax = fabsf(xb[j]);
  9551. if (ax > amax) {
  9552. amax = ax; max = xb[j];
  9553. }
  9554. }
  9555. if (!amax) {
  9556. scales[ib] = 0;
  9557. continue;
  9558. }
  9559. float d = -max/values[0];
  9560. float id = 1/d;
  9561. float sumqx = 0, sumq2 = 0;
  9562. for (int j = 0; j < block_size; ++j) {
  9563. float al = id*xb[j];
  9564. int l = best_index_int8(16, values, al);
  9565. float q = values[l];
  9566. float w = weight[j];
  9567. sumqx += w*q*xb[j];
  9568. sumq2 += w*q*q;
  9569. }
  9570. d = sumqx/sumq2;
  9571. float best = d*sumqx;
  9572. for (int itry = -ntry; itry <= ntry; ++itry) {
  9573. id = (itry + values[0])/max;
  9574. sumqx = sumq2 = 0;
  9575. for (int j = 0; j < block_size; ++j) {
  9576. float al = id*xb[j];
  9577. int l = best_index_int8(16, values, al);
  9578. float q = values[l];
  9579. float w = weight[j];
  9580. sumqx += w*q*xb[j];
  9581. sumq2 += w*q*q;
  9582. }
  9583. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  9584. d = sumqx/sumq2; best = d * sumqx;
  9585. }
  9586. }
  9587. scales[ib] = d;
  9588. float abs_d = fabsf(d);
  9589. if (abs_d > amax_scale) {
  9590. amax_scale = abs_d; max_scale = d;
  9591. }
  9592. }
  9593. if (super_block_size/block_size > 1) {
  9594. int nb = super_block_size/block_size;
  9595. memset(scales_h, 0, ((nb+7)/8)*sizeof(uint16_t));
  9596. float d = -max_scale/32;
  9597. dh[0] = GGML_FP32_TO_FP16(d);
  9598. float id = d ? 1/d : 0.f;
  9599. for (int ib = 0; ib < super_block_size/block_size; ++ib) {
  9600. int l = nearest_int(id*scales[ib]);
  9601. l = MAX(-32, MIN(31, l));
  9602. float dl = d * l;
  9603. float idl = dl ? 1/dl : 0.f;
  9604. uint8_t * Lb = L + ib*block_size;
  9605. const float * xb = x + ib*block_size;
  9606. for (int j = 0; j < block_size; ++j) {
  9607. Lb[j] = best_index_int8(16, values, idl*xb[j]);
  9608. }
  9609. l += 32;
  9610. uint8_t l_l = l & 0xf;
  9611. uint8_t l_h = l >> 4;
  9612. if (ib%2 == 0) scales_l[ib/2] = l_l;
  9613. else scales_l[ib/2] |= (l_l << 4);
  9614. scales_h[ib/8] |= (l_h << 2*(ib%8));
  9615. }
  9616. } else {
  9617. dh[0] = GGML_FP32_TO_FP16(scales[0]);
  9618. float id = scales[0] ? 1/scales[0] : 0;
  9619. for (int j = 0; j < super_block_size; ++j) {
  9620. L[j] = best_index_int8(16, values, id*x[j]);
  9621. }
  9622. }
  9623. for (int i = 0; i < super_block_size/32; ++i) {
  9624. for (int j = 0; j < 16; ++j) {
  9625. q4[16*i + j] = L[32*i + j] | (L[32*i + 16 + j] << 4);
  9626. }
  9627. }
  9628. }
  9629. size_t quantize_iq4_nl(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  9630. GGML_ASSERT(n_per_row%QK4_NL == 0);
  9631. int nblock = n_per_row/QK4_NL;
  9632. char * qrow = (char *)dst;
  9633. uint8_t L[QK4_NL];
  9634. float weight[QK4_NL];
  9635. uint16_t unused_h;
  9636. uint8_t * unused_l = NULL;
  9637. float scale;
  9638. for (int row = 0; row < nrow; ++row) {
  9639. block_iq4_nl * iq4 = (block_iq4_nl *)qrow;
  9640. for (int ibl = 0; ibl < nblock; ++ibl) {
  9641. const float * qw = quant_weights ? quant_weights + QK4_NL*ibl : NULL;
  9642. quantize_row_iq4_nl_impl(QK4_NL, 32, src + QK4_NL*ibl, &iq4[ibl].d, iq4[ibl].qs, &unused_h, unused_l,
  9643. &scale, weight, L, kvalues_iq4nl, qw);
  9644. }
  9645. src += n_per_row;
  9646. qrow += nblock*sizeof(block_iq4_nl);
  9647. }
  9648. return nrow * nblock * sizeof(block_iq4_nl);
  9649. }
  9650. void quantize_row_iq4_nl(const float * restrict x, void * restrict vy, int k) {
  9651. assert(k % QK4_NL == 0);
  9652. block_iq4_nl * restrict y = vy;
  9653. quantize_row_iq4_nl_reference(x, y, k);
  9654. }
  9655. void quantize_row_iq4_nl_reference(const float * restrict x, block_iq4_nl * restrict y, int k) {
  9656. assert(k % QK4_NL == 0);
  9657. quantize_iq4_nl(x, y, 1, k, NULL);
  9658. }
  9659. size_t quantize_iq4_xs(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  9660. #if QK_K == 64
  9661. return quantize_iq4_nl(src, dst, nrow, n_per_row, quant_weights);
  9662. #else
  9663. GGML_ASSERT(n_per_row%QK_K == 0);
  9664. int nblock = n_per_row/QK_K;
  9665. char * qrow = (char *)dst;
  9666. uint8_t L[QK_K];
  9667. float weight[32];
  9668. float scales[QK_K/32];
  9669. for (int row = 0; row < nrow; ++row) {
  9670. block_iq4_xs * iq4 = (block_iq4_xs *)qrow;
  9671. for (int ibl = 0; ibl < nblock; ++ibl) {
  9672. const float * qw = quant_weights ? quant_weights + QK_K*ibl : NULL;
  9673. quantize_row_iq4_nl_impl(QK_K, 32, src + QK_K*ibl, &iq4[ibl].d, iq4[ibl].qs, &iq4[ibl].scales_h, iq4[ibl].scales_l,
  9674. scales, weight, L, kvalues_iq4nl, qw);
  9675. }
  9676. src += n_per_row;
  9677. qrow += nblock*sizeof(block_iq4_xs);
  9678. }
  9679. return nrow * nblock * sizeof(block_iq4_xs);
  9680. #endif
  9681. }
  9682. void quantize_row_iq4_xs(const float * restrict x, void * restrict vy, int k) {
  9683. assert(k % QK_K == 0);
  9684. block_iq4_xs * restrict y = vy;
  9685. quantize_row_iq4_xs_reference(x, y, k);
  9686. }
  9687. void quantize_row_iq4_xs_reference(const float * restrict x, block_iq4_xs * restrict y, int k) {
  9688. assert(k % QK_K == 0);
  9689. quantize_iq4_xs(x, y, 1, k, NULL);
  9690. }
  9691. // =============================== 2.5625 bpw
  9692. static void quantize_row_iq2_s_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) {
  9693. const int gindex = iq2_data_index(GGML_TYPE_IQ2_S);
  9694. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  9695. const int * kmap_q2xs = iq2_data[gindex].map;
  9696. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  9697. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  9698. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  9699. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  9700. GGML_ASSERT(n%QK_K == 0);
  9701. const int kMaxQ = 3;
  9702. const int nbl = n/QK_K;
  9703. block_iq2_s * y = vy;
  9704. float scales[QK_K/16];
  9705. float weight[16];
  9706. float xval[16];
  9707. int8_t L[16];
  9708. int8_t Laux[16];
  9709. float waux[16];
  9710. bool is_on_grid[2];
  9711. bool is_on_grid_aux[2];
  9712. uint8_t block_signs[2];
  9713. for (int ibl = 0; ibl < nbl; ++ibl) {
  9714. memset(&y[ibl], 0, sizeof(block_iq2_s));
  9715. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  9716. float max_scale = 0;
  9717. const float * xbl = x + QK_K*ibl;
  9718. float sumx2 = 0;
  9719. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  9720. float sigma2 = 2*sumx2/QK_K;
  9721. for (int ib = 0; ib < QK_K/16; ++ib) {
  9722. const float * xb = xbl + 16*ib;
  9723. if (quant_weights) {
  9724. const float * qw = quant_weights + QK_K*ibl + 16*ib;
  9725. for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  9726. } else {
  9727. for (int i = 0; i < 16; ++i) weight[i] = 0.25f*sigma2 + xb[i]*xb[i];
  9728. }
  9729. for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]);
  9730. for (int k = 0; k < 2; ++k) {
  9731. uint8_t s = 0;
  9732. for (int i = 0; i < 8; ++i) {
  9733. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  9734. else {
  9735. xval[8*k + i] = -xb[8*k + i]; s |= (1 << i);
  9736. }
  9737. }
  9738. block_signs[k] = s;
  9739. }
  9740. float max = xval[0];
  9741. for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]);
  9742. if (!max) {
  9743. scales[ib] = 0;
  9744. continue;
  9745. }
  9746. float best = 0;
  9747. float scale = max/(2*kMaxQ-1);
  9748. is_on_grid[0] = is_on_grid[1] = true;
  9749. for (int is = -9; is <= 9; ++is) {
  9750. float id = (2*kMaxQ-1+is*0.1f)/max;
  9751. float this_scale = 1/id;
  9752. for (int k = 0; k < 2; ++k) {
  9753. for (int i = 0; i < 8; ++i) {
  9754. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  9755. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  9756. }
  9757. uint16_t u = 0;
  9758. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  9759. int grid_index = kmap_q2xs[u];
  9760. is_on_grid_aux[k] = true;
  9761. if (grid_index < 0) {
  9762. is_on_grid_aux[k] = false;
  9763. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  9764. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  9765. }
  9766. }
  9767. float sumqx = 0, sumq2 = 0;
  9768. for (int i = 0; i < 16; ++i) {
  9769. float w = weight[i];
  9770. float q = 2*Laux[i] + 1;
  9771. sumqx += w*xval[i]*q;
  9772. sumq2 += w*q*q;
  9773. }
  9774. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  9775. scale = sumqx/sumq2; best = scale*sumqx;
  9776. for (int i = 0; i < 16; ++i) L[i] = Laux[i];
  9777. for (int k = 0; k < 2; ++k) is_on_grid[k] = is_on_grid_aux[k];
  9778. }
  9779. }
  9780. int n_not_ongrid = 0;
  9781. for (int k = 0; k < 2; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  9782. if (n_not_ongrid > 0 && scale > 0) {
  9783. float id = 1/scale;
  9784. for (int k = 0; k < 2; ++k) {
  9785. if (is_on_grid[k]) continue;
  9786. uint16_t u = 0;
  9787. for (int i = 0; i < 8; ++i) {
  9788. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  9789. l = MAX(0, MIN(kMaxQ-1, l));
  9790. u |= (l << 2*i);
  9791. L[8*k + i] = l;
  9792. }
  9793. int grid_index = kmap_q2xs[u];
  9794. if (grid_index < 0) {
  9795. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  9796. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  9797. }
  9798. }
  9799. float sumqx = 0, sumq2 = 0;
  9800. for (int i = 0; i < 16; ++i) {
  9801. float w = weight[i];
  9802. float q = 2*L[i] + 1;
  9803. sumqx += w*xval[i]*q;
  9804. sumq2 += w*q*q;
  9805. }
  9806. if (sumq2 > 0) scale = sumqx/sumq2;
  9807. }
  9808. if (scale < 0) {
  9809. scale = -scale;
  9810. for (int k = 0; k < 2; ++k) block_signs[k] = ~block_signs[k];
  9811. }
  9812. for (int k = 0; k < 2; ++k) {
  9813. uint16_t u = 0;
  9814. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  9815. int grid_index = kmap_q2xs[u];
  9816. if (grid_index < 0) {
  9817. printf("Oops: found point %u not on grid:", u);
  9818. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  9819. printf("\n");
  9820. GGML_ASSERT(false);
  9821. }
  9822. const int i8 = 2*ib + k;
  9823. y[ibl].qs[i8] = grid_index & 255;
  9824. y[ibl].qh[i8/4] |= ((grid_index >> 8) << 2*(i8%4));
  9825. y[ibl].qs[QK_K/8 + i8] = block_signs[k];
  9826. }
  9827. GGML_ASSERT(scale >= 0);
  9828. scales[ib] = scale;
  9829. max_scale = MAX(max_scale, scale);
  9830. }
  9831. if (!max_scale) {
  9832. continue;
  9833. }
  9834. float d = max_scale/31;
  9835. y[ibl].d = GGML_FP32_TO_FP16(d * 0.9875f);
  9836. float id = 1/d;
  9837. for (int ib = 0; ib < QK_K/16; ++ib) {
  9838. int l = nearest_int(0.5f*(id*scales[ib]-1));
  9839. l = MAX(0, MIN(15, l));
  9840. if (ib%2 == 0) y[ibl].scales[ib/2] = l;
  9841. else y[ibl].scales[ib/2] |= (l << 4);
  9842. }
  9843. }
  9844. }
  9845. size_t quantize_iq2_s(const float * restrict src, void * restrict dst, int nrow, int n_per_row, const float * quant_weights) {
  9846. GGML_ASSERT(n_per_row%QK_K == 0);
  9847. int nblock = n_per_row/QK_K;
  9848. char * qrow = (char *)dst;
  9849. for (int row = 0; row < nrow; ++row) {
  9850. quantize_row_iq2_s_impl(src, qrow, n_per_row, quant_weights);
  9851. src += n_per_row;
  9852. qrow += nblock*sizeof(block_iq2_s);
  9853. }
  9854. return nrow * nblock * sizeof(block_iq2_s);
  9855. }
  9856. void quantize_row_iq2_s_reference(const float * restrict x, block_iq2_s * restrict y, int k) {
  9857. assert(k % QK_K == 0);
  9858. quantize_iq2_s(x, y, 1, k, NULL);
  9859. }
  9860. void quantize_row_iq2_s(const float * restrict x, void * restrict vy, int k) {
  9861. assert(k % QK_K == 0);
  9862. block_iq2_s * restrict y = vy;
  9863. quantize_row_iq2_s_reference(x, y, k);
  9864. }