ggml-quants.c 620 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867
  1. #define GGML_COMMON_IMPL_C
  2. #include "ggml-common.h"
  3. #include "ggml-quants.h"
  4. #include "ggml-impl.h"
  5. #define GGML_COMMON_IMPL_C
  6. #include "ggml-common.h"
  7. #include <math.h>
  8. #include <string.h>
  9. #include <assert.h>
  10. #include <float.h>
  11. #include <stdlib.h> // for qsort
  12. #include <stdio.h> // for GGML_ASSERT
  13. #define GROUP_MAX_EPS 1e-15f
  14. #define GROUP_MAX_EPS_IQ3_XXS 1e-8f
  15. #define GROUP_MAX_EPS_IQ2_S 1e-8f
  16. #define GROUP_MAX_EPS_IQ1_M 1e-7f
  17. #define GROUP_MAX_EPS_IQ1_S 1e-12f
  18. #if defined(_MSC_VER)
  19. // disable "possible loss of data" to avoid warnings for hundreds of casts
  20. // we should just be careful :)
  21. #pragma warning(disable: 4244 4267)
  22. #endif
  23. #define UNUSED GGML_UNUSED
  24. // some compilers don't provide _mm256_set_m128i, e.g. gcc 7
  25. #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
  26. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  27. // multiply int8_t, add results pairwise twice
  28. static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
  29. // Get absolute values of x vectors
  30. const __m128i ax = _mm_sign_epi8(x, x);
  31. // Sign the values of the y vectors
  32. const __m128i sy = _mm_sign_epi8(y, x);
  33. // Perform multiplication and create 16-bit values
  34. const __m128i dot = _mm_maddubs_epi16(ax, sy);
  35. const __m128i ones = _mm_set1_epi16(1);
  36. return _mm_madd_epi16(ones, dot);
  37. }
  38. #if __AVX__ || __AVX2__ || __AVX512F__
  39. // horizontally add 8 floats
  40. static inline float hsum_float_8(const __m256 x) {
  41. __m128 res = _mm256_extractf128_ps(x, 1);
  42. res = _mm_add_ps(res, _mm256_castps256_ps128(x));
  43. res = _mm_add_ps(res, _mm_movehl_ps(res, res));
  44. res = _mm_add_ss(res, _mm_movehdup_ps(res));
  45. return _mm_cvtss_f32(res);
  46. }
  47. // horizontally add 8 int32_t
  48. static inline int hsum_i32_8(const __m256i a) {
  49. const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
  50. const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
  51. const __m128i sum64 = _mm_add_epi32(hi64, sum128);
  52. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  53. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  54. }
  55. // horizontally add 4 int32_t
  56. static inline int hsum_i32_4(const __m128i a) {
  57. const __m128i hi64 = _mm_unpackhi_epi64(a, a);
  58. const __m128i sum64 = _mm_add_epi32(hi64, a);
  59. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  60. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  61. }
  62. #if defined(__AVX2__) || defined(__AVX512F__)
  63. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  64. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  65. uint32_t x32;
  66. memcpy(&x32, x, sizeof(uint32_t));
  67. const __m256i shuf_mask = _mm256_set_epi64x(
  68. 0x0303030303030303, 0x0202020202020202,
  69. 0x0101010101010101, 0x0000000000000000);
  70. __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
  71. const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
  72. bytes = _mm256_or_si256(bytes, bit_mask);
  73. return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1));
  74. }
  75. // Unpack 32 4-bit fields into 32 bytes
  76. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  77. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  78. {
  79. const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
  80. const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
  81. const __m256i lowMask = _mm256_set1_epi8( 0xF );
  82. return _mm256_and_si256(lowMask, bytes);
  83. }
  84. // add int16_t pairwise and return as float vector
  85. static inline __m256 sum_i16_pairs_float(const __m256i x) {
  86. const __m256i ones = _mm256_set1_epi16(1);
  87. const __m256i summed_pairs = _mm256_madd_epi16(ones, x);
  88. return _mm256_cvtepi32_ps(summed_pairs);
  89. }
  90. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  91. #if defined(__AVXVNNI__) || (defined(__AVX512VNNI__) && defined(__AVX512VL__))
  92. const __m256i zero = _mm256_setzero_si256();
  93. const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy);
  94. return _mm256_cvtepi32_ps(summed_pairs);
  95. #else
  96. // Perform multiplication and create 16-bit values
  97. const __m256i dot = _mm256_maddubs_epi16(ax, sy);
  98. return sum_i16_pairs_float(dot);
  99. #endif
  100. }
  101. // multiply int8_t, add results pairwise twice and return as float vector
  102. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  103. #if __AVXVNNIINT8__
  104. const __m256i zero = _mm256_setzero_si256();
  105. const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y);
  106. return _mm256_cvtepi32_ps(summed_pairs);
  107. #else
  108. // Get absolute values of x vectors
  109. const __m256i ax = _mm256_sign_epi8(x, x);
  110. // Sign the values of the y vectors
  111. const __m256i sy = _mm256_sign_epi8(y, x);
  112. return mul_sum_us8_pairs_float(ax, sy);
  113. #endif
  114. }
  115. static inline __m128i packNibbles( __m256i bytes )
  116. {
  117. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  118. #if __AVX512F__
  119. const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000
  120. bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh
  121. return _mm256_cvtepi16_epi8(bytes); // abcd_efgh
  122. #else
  123. const __m256i lowByte = _mm256_set1_epi16( 0xFF );
  124. __m256i high = _mm256_andnot_si256( lowByte, bytes );
  125. __m256i low = _mm256_and_si256( lowByte, bytes );
  126. high = _mm256_srli_epi16( high, 4 );
  127. bytes = _mm256_or_si256( low, high );
  128. // Compress uint16_t lanes into bytes
  129. __m128i r0 = _mm256_castsi256_si128( bytes );
  130. __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
  131. return _mm_packus_epi16( r0, r1 );
  132. #endif
  133. }
  134. #elif defined(__AVX__)
  135. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  136. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  137. uint32_t x32;
  138. memcpy(&x32, x, sizeof(uint32_t));
  139. const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  140. const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202);
  141. __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl);
  142. __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh);
  143. const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe);
  144. bytesl = _mm_or_si128(bytesl, bit_mask);
  145. bytesh = _mm_or_si128(bytesh, bit_mask);
  146. bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
  147. bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
  148. return MM256_SET_M128I(bytesh, bytesl);
  149. }
  150. // Unpack 32 4-bit fields into 32 bytes
  151. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  152. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  153. {
  154. // Load 16 bytes from memory
  155. __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi);
  156. __m128i tmph = _mm_srli_epi16(tmpl, 4);
  157. const __m128i lowMask = _mm_set1_epi8(0xF);
  158. tmpl = _mm_and_si128(lowMask, tmpl);
  159. tmph = _mm_and_si128(lowMask, tmph);
  160. return MM256_SET_M128I(tmph, tmpl);
  161. }
  162. // add int16_t pairwise and return as float vector
  163. static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
  164. const __m128i ones = _mm_set1_epi16(1);
  165. const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
  166. const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
  167. const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
  168. return _mm256_cvtepi32_ps(summed_pairs);
  169. }
  170. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  171. const __m128i axl = _mm256_castsi256_si128(ax);
  172. const __m128i axh = _mm256_extractf128_si256(ax, 1);
  173. const __m128i syl = _mm256_castsi256_si128(sy);
  174. const __m128i syh = _mm256_extractf128_si256(sy, 1);
  175. // Perform multiplication and create 16-bit values
  176. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  177. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  178. return sum_i16_pairs_float(doth, dotl);
  179. }
  180. // multiply int8_t, add results pairwise twice and return as float vector
  181. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  182. const __m128i xl = _mm256_castsi256_si128(x);
  183. const __m128i xh = _mm256_extractf128_si256(x, 1);
  184. const __m128i yl = _mm256_castsi256_si128(y);
  185. const __m128i yh = _mm256_extractf128_si256(y, 1);
  186. // Get absolute values of x vectors
  187. const __m128i axl = _mm_sign_epi8(xl, xl);
  188. const __m128i axh = _mm_sign_epi8(xh, xh);
  189. // Sign the values of the y vectors
  190. const __m128i syl = _mm_sign_epi8(yl, xl);
  191. const __m128i syh = _mm_sign_epi8(yh, xh);
  192. // Perform multiplication and create 16-bit values
  193. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  194. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  195. return sum_i16_pairs_float(doth, dotl);
  196. }
  197. static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
  198. {
  199. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  200. const __m128i lowByte = _mm_set1_epi16( 0xFF );
  201. __m128i high = _mm_andnot_si128( lowByte, bytes1 );
  202. __m128i low = _mm_and_si128( lowByte, bytes1 );
  203. high = _mm_srli_epi16( high, 4 );
  204. bytes1 = _mm_or_si128( low, high );
  205. high = _mm_andnot_si128( lowByte, bytes2 );
  206. low = _mm_and_si128( lowByte, bytes2 );
  207. high = _mm_srli_epi16( high, 4 );
  208. bytes2 = _mm_or_si128( low, high );
  209. return _mm_packus_epi16( bytes1, bytes2);
  210. }
  211. #endif
  212. #elif defined(__SSSE3__)
  213. // horizontally add 4x4 floats
  214. static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
  215. __m128 res_0 =_mm_hadd_ps(a, b);
  216. __m128 res_1 =_mm_hadd_ps(c, d);
  217. __m128 res =_mm_hadd_ps(res_0, res_1);
  218. res =_mm_hadd_ps(res, res);
  219. res =_mm_hadd_ps(res, res);
  220. return _mm_cvtss_f32(res);
  221. }
  222. #endif // __AVX__ || __AVX2__ || __AVX512F__
  223. #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  224. #if defined(__ARM_NEON) || defined(__wasm_simd128__) || defined(__POWER9_VECTOR__)
  225. #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
  226. #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
  227. #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
  228. #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
  229. #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
  230. #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
  231. #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
  232. #define B8(c,s ) B7(c,s, c), B7(c,s, s)
  233. // precomputed tables for expanding 8bits to 8 bytes:
  234. static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
  235. static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
  236. #endif
  237. // reference implementation for deterministic creation of model files
  238. void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int64_t k) {
  239. static const int qk = QK4_0;
  240. assert(k % qk == 0);
  241. const int nb = k / qk;
  242. for (int i = 0; i < nb; i++) {
  243. float amax = 0.0f; // absolute max
  244. float max = 0.0f;
  245. for (int j = 0; j < qk; j++) {
  246. const float v = x[i*qk + j];
  247. if (amax < fabsf(v)) {
  248. amax = fabsf(v);
  249. max = v;
  250. }
  251. }
  252. const float d = max / -8;
  253. const float id = d ? 1.0f/d : 0.0f;
  254. y[i].d = GGML_FP32_TO_FP16(d);
  255. for (int j = 0; j < qk/2; ++j) {
  256. const float x0 = x[i*qk + 0 + j]*id;
  257. const float x1 = x[i*qk + qk/2 + j]*id;
  258. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
  259. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
  260. y[i].qs[j] = xi0;
  261. y[i].qs[j] |= xi1 << 4;
  262. }
  263. }
  264. }
  265. void quantize_row_q4_0(const float * restrict x, void * restrict y, int64_t k) {
  266. quantize_row_q4_0_reference(x, y, k);
  267. }
  268. void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int64_t k) {
  269. const int qk = QK4_1;
  270. assert(k % qk == 0);
  271. const int nb = k / qk;
  272. for (int i = 0; i < nb; i++) {
  273. float min = FLT_MAX;
  274. float max = -FLT_MAX;
  275. for (int j = 0; j < qk; j++) {
  276. const float v = x[i*qk + j];
  277. if (v < min) min = v;
  278. if (v > max) max = v;
  279. }
  280. const float d = (max - min) / ((1 << 4) - 1);
  281. const float id = d ? 1.0f/d : 0.0f;
  282. y[i].d = GGML_FP32_TO_FP16(d);
  283. y[i].m = GGML_FP32_TO_FP16(min);
  284. for (int j = 0; j < qk/2; ++j) {
  285. const float x0 = (x[i*qk + 0 + j] - min)*id;
  286. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  287. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
  288. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
  289. y[i].qs[j] = xi0;
  290. y[i].qs[j] |= xi1 << 4;
  291. }
  292. }
  293. }
  294. void quantize_row_q4_1(const float * restrict x, void * restrict y, int64_t k) {
  295. quantize_row_q4_1_reference(x, y, k);
  296. }
  297. void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int64_t k) {
  298. static const int qk = QK5_0;
  299. assert(k % qk == 0);
  300. const int nb = k / qk;
  301. for (int i = 0; i < nb; i++) {
  302. float amax = 0.0f; // absolute max
  303. float max = 0.0f;
  304. for (int j = 0; j < qk; j++) {
  305. const float v = x[i*qk + j];
  306. if (amax < fabsf(v)) {
  307. amax = fabsf(v);
  308. max = v;
  309. }
  310. }
  311. const float d = max / -16;
  312. const float id = d ? 1.0f/d : 0.0f;
  313. y[i].d = GGML_FP32_TO_FP16(d);
  314. uint32_t qh = 0;
  315. for (int j = 0; j < qk/2; ++j) {
  316. const float x0 = x[i*qk + 0 + j]*id;
  317. const float x1 = x[i*qk + qk/2 + j]*id;
  318. const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
  319. const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
  320. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  321. // get the 5-th bit and store it in qh at the right position
  322. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  323. qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
  324. }
  325. memcpy(&y[i].qh, &qh, sizeof(qh));
  326. }
  327. }
  328. void quantize_row_q5_0(const float * restrict x, void * restrict y, int64_t k) {
  329. quantize_row_q5_0_reference(x, y, k);
  330. }
  331. void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int64_t k) {
  332. const int qk = QK5_1;
  333. assert(k % qk == 0);
  334. const int nb = k / qk;
  335. for (int i = 0; i < nb; i++) {
  336. float min = FLT_MAX;
  337. float max = -FLT_MAX;
  338. for (int j = 0; j < qk; j++) {
  339. const float v = x[i*qk + j];
  340. if (v < min) min = v;
  341. if (v > max) max = v;
  342. }
  343. const float d = (max - min) / ((1 << 5) - 1);
  344. const float id = d ? 1.0f/d : 0.0f;
  345. y[i].d = GGML_FP32_TO_FP16(d);
  346. y[i].m = GGML_FP32_TO_FP16(min);
  347. uint32_t qh = 0;
  348. for (int j = 0; j < qk/2; ++j) {
  349. const float x0 = (x[i*qk + 0 + j] - min)*id;
  350. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  351. const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
  352. const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
  353. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  354. // get the 5-th bit and store it in qh at the right position
  355. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  356. qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
  357. }
  358. memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
  359. }
  360. }
  361. void quantize_row_q5_1(const float * restrict x, void * restrict y, int64_t k) {
  362. quantize_row_q5_1_reference(x, y, k);
  363. }
  364. // reference implementation for deterministic creation of model files
  365. void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int64_t k) {
  366. assert(k % QK8_0 == 0);
  367. const int nb = k / QK8_0;
  368. for (int i = 0; i < nb; i++) {
  369. float amax = 0.0f; // absolute max
  370. for (int j = 0; j < QK8_0; j++) {
  371. const float v = x[i*QK8_0 + j];
  372. amax = MAX(amax, fabsf(v));
  373. }
  374. const float d = amax / ((1 << 7) - 1);
  375. const float id = d ? 1.0f/d : 0.0f;
  376. y[i].d = GGML_FP32_TO_FP16(d);
  377. for (int j = 0; j < QK8_0; ++j) {
  378. const float x0 = x[i*QK8_0 + j]*id;
  379. y[i].qs[j] = roundf(x0);
  380. }
  381. }
  382. }
  383. void quantize_row_q8_0(const float * restrict x, void * restrict vy, int64_t k) {
  384. assert(QK8_0 == 32);
  385. assert(k % QK8_0 == 0);
  386. const int nb = k / QK8_0;
  387. block_q8_0 * restrict y = vy;
  388. #if defined(__ARM_NEON)
  389. for (int i = 0; i < nb; i++) {
  390. float32x4_t srcv [8];
  391. float32x4_t asrcv[8];
  392. float32x4_t amaxv[8];
  393. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  394. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  395. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  396. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  397. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  398. const float amax = vmaxvq_f32(amaxv[0]);
  399. const float d = amax / ((1 << 7) - 1);
  400. const float id = d ? 1.0f/d : 0.0f;
  401. y[i].d = GGML_FP32_TO_FP16(d);
  402. for (int j = 0; j < 8; j++) {
  403. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  404. const int32x4_t vi = vcvtnq_s32_f32(v);
  405. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  406. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  407. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  408. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  409. }
  410. }
  411. #elif defined(__wasm_simd128__)
  412. for (int i = 0; i < nb; i++) {
  413. v128_t srcv [8];
  414. v128_t asrcv[8];
  415. v128_t amaxv[8];
  416. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  417. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  418. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  419. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  420. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  421. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  422. wasm_f32x4_extract_lane(amaxv[0], 1)),
  423. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  424. wasm_f32x4_extract_lane(amaxv[0], 3)));
  425. const float d = amax / ((1 << 7) - 1);
  426. const float id = d ? 1.0f/d : 0.0f;
  427. y[i].d = GGML_FP32_TO_FP16(d);
  428. for (int j = 0; j < 8; j++) {
  429. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  430. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  431. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  432. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  433. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  434. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  435. }
  436. }
  437. #elif defined(__AVX2__) || defined(__AVX__)
  438. for (int i = 0; i < nb; i++) {
  439. // Load elements into 4 AVX vectors
  440. __m256 v0 = _mm256_loadu_ps( x );
  441. __m256 v1 = _mm256_loadu_ps( x + 8 );
  442. __m256 v2 = _mm256_loadu_ps( x + 16 );
  443. __m256 v3 = _mm256_loadu_ps( x + 24 );
  444. x += 32;
  445. // Compute max(abs(e)) for the block
  446. const __m256 signBit = _mm256_set1_ps( -0.0f );
  447. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  448. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  449. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  450. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  451. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  452. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  453. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  454. const float maxScalar = _mm_cvtss_f32( max4 );
  455. // Quantize these floats
  456. const float d = maxScalar / 127.f;
  457. y[i].d = GGML_FP32_TO_FP16(d);
  458. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  459. const __m256 mul = _mm256_set1_ps( id );
  460. // Apply the multiplier
  461. v0 = _mm256_mul_ps( v0, mul );
  462. v1 = _mm256_mul_ps( v1, mul );
  463. v2 = _mm256_mul_ps( v2, mul );
  464. v3 = _mm256_mul_ps( v3, mul );
  465. // Round to nearest integer
  466. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  467. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  468. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  469. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  470. // Convert floats to integers
  471. __m256i i0 = _mm256_cvtps_epi32( v0 );
  472. __m256i i1 = _mm256_cvtps_epi32( v1 );
  473. __m256i i2 = _mm256_cvtps_epi32( v2 );
  474. __m256i i3 = _mm256_cvtps_epi32( v3 );
  475. #if defined(__AVX2__)
  476. // Convert int32 to int16
  477. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  478. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  479. // Convert int16 to int8
  480. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  481. // We got our precious signed bytes, but the order is now wrong
  482. // These AVX2 pack instructions process 16-byte pieces independently
  483. // The following instruction is fixing the order
  484. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  485. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  486. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  487. #else
  488. // Since we don't have in AVX some necessary functions,
  489. // we split the registers in half and call AVX2 analogs from SSE
  490. __m128i ni0 = _mm256_castsi256_si128( i0 );
  491. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  492. __m128i ni2 = _mm256_castsi256_si128( i1 );
  493. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  494. __m128i ni4 = _mm256_castsi256_si128( i2 );
  495. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  496. __m128i ni6 = _mm256_castsi256_si128( i3 );
  497. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  498. // Convert int32 to int16
  499. ni0 = _mm_packs_epi32( ni0, ni1 );
  500. ni2 = _mm_packs_epi32( ni2, ni3 );
  501. ni4 = _mm_packs_epi32( ni4, ni5 );
  502. ni6 = _mm_packs_epi32( ni6, ni7 );
  503. // Convert int16 to int8
  504. ni0 = _mm_packs_epi16( ni0, ni2 );
  505. ni4 = _mm_packs_epi16( ni4, ni6 );
  506. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  507. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  508. #endif
  509. }
  510. #elif defined(__riscv_v_intrinsic)
  511. size_t vl = __riscv_vsetvl_e32m4(QK8_0);
  512. for (int i = 0; i < nb; i++) {
  513. // load elements
  514. vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_0, vl);
  515. vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
  516. vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl);
  517. vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
  518. float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
  519. const float d = amax / ((1 << 7) - 1);
  520. const float id = d ? 1.0f/d : 0.0f;
  521. y[i].d = GGML_FP32_TO_FP16(d);
  522. vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
  523. // convert to integer
  524. vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
  525. vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
  526. // store result
  527. __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
  528. }
  529. #elif defined(__POWER9_VECTOR__)
  530. for (int i = 0; i < nb; i++) {
  531. vector float srcv [8];
  532. vector float asrcv[8];
  533. vector float amaxv[8];
  534. vector signed int vi[8];
  535. for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j);
  536. for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]);
  537. for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]);
  538. for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]);
  539. for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]);
  540. const float amax = MAX(MAX(vec_extract(amaxv[0], 0),
  541. vec_extract(amaxv[0], 1)),
  542. MAX(vec_extract(amaxv[0], 2),
  543. vec_extract(amaxv[0], 3)));
  544. const float d = amax / ((1 << 7) - 1);
  545. const float id = d ? 1.0f/d : 0.0f;
  546. const vector float vid = vec_splats(id);
  547. y[i].d = GGML_FP32_TO_FP16(d);
  548. for (int j = 0; j < 8; j++) {
  549. const vector float v = vec_round(vec_mul(srcv[j], vid));
  550. vi[j] = vec_cts(v, 0);
  551. }
  552. vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]);
  553. vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]);
  554. }
  555. #else
  556. GGML_UNUSED(nb);
  557. // scalar
  558. quantize_row_q8_0_reference(x, y, k);
  559. #endif
  560. }
  561. // reference implementation for deterministic creation of model files
  562. void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int64_t k) {
  563. assert(QK8_1 == 32);
  564. assert(k % QK8_1 == 0);
  565. const int nb = k / QK8_1;
  566. for (int i = 0; i < nb; i++) {
  567. float amax = 0.0f; // absolute max
  568. for (int j = 0; j < QK8_1; j++) {
  569. const float v = x[i*QK8_1 + j];
  570. amax = MAX(amax, fabsf(v));
  571. }
  572. const float d = amax / ((1 << 7) - 1);
  573. const float id = d ? 1.0f/d : 0.0f;
  574. y[i].d = GGML_FP32_TO_FP16(d);
  575. int sum = 0;
  576. for (int j = 0; j < QK8_1/2; ++j) {
  577. const float v0 = x[i*QK8_1 + j]*id;
  578. const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
  579. y[i].qs[ j] = roundf(v0);
  580. y[i].qs[QK8_1/2 + j] = roundf(v1);
  581. sum += y[i].qs[ j];
  582. sum += y[i].qs[QK8_1/2 + j];
  583. }
  584. y[i].s = GGML_FP32_TO_FP16(sum*d);
  585. }
  586. }
  587. void quantize_row_q8_1(const float * restrict x, void * restrict vy, int64_t k) {
  588. assert(k % QK8_1 == 0);
  589. const int nb = k / QK8_1;
  590. block_q8_1 * restrict y = vy;
  591. #if defined(__ARM_NEON)
  592. for (int i = 0; i < nb; i++) {
  593. float32x4_t srcv [8];
  594. float32x4_t asrcv[8];
  595. float32x4_t amaxv[8];
  596. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  597. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  598. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  599. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  600. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  601. const float amax = vmaxvq_f32(amaxv[0]);
  602. const float d = amax / ((1 << 7) - 1);
  603. const float id = d ? 1.0f/d : 0.0f;
  604. y[i].d = GGML_FP32_TO_FP16(d);
  605. int32x4_t accv = vdupq_n_s32(0);
  606. for (int j = 0; j < 8; j++) {
  607. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  608. const int32x4_t vi = vcvtnq_s32_f32(v);
  609. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  610. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  611. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  612. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  613. accv = vaddq_s32(accv, vi);
  614. }
  615. y[i].s = GGML_FP32_TO_FP16(d * vaddvq_s32(accv));
  616. }
  617. #elif defined(__wasm_simd128__)
  618. for (int i = 0; i < nb; i++) {
  619. v128_t srcv [8];
  620. v128_t asrcv[8];
  621. v128_t amaxv[8];
  622. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  623. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  624. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  625. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  626. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  627. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  628. wasm_f32x4_extract_lane(amaxv[0], 1)),
  629. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  630. wasm_f32x4_extract_lane(amaxv[0], 3)));
  631. const float d = amax / ((1 << 7) - 1);
  632. const float id = d ? 1.0f/d : 0.0f;
  633. y[i].d = GGML_FP32_TO_FP16(d);
  634. v128_t accv = wasm_i32x4_splat(0);
  635. for (int j = 0; j < 8; j++) {
  636. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  637. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  638. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  639. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  640. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  641. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  642. accv = wasm_i32x4_add(accv, vi);
  643. }
  644. y[i].s = GGML_FP32_TO_FP16(
  645. d * (wasm_i32x4_extract_lane(accv, 0) +
  646. wasm_i32x4_extract_lane(accv, 1) +
  647. wasm_i32x4_extract_lane(accv, 2) +
  648. wasm_i32x4_extract_lane(accv, 3)));
  649. }
  650. #elif defined(__AVX2__) || defined(__AVX__)
  651. for (int i = 0; i < nb; i++) {
  652. // Load elements into 4 AVX vectors
  653. __m256 v0 = _mm256_loadu_ps( x );
  654. __m256 v1 = _mm256_loadu_ps( x + 8 );
  655. __m256 v2 = _mm256_loadu_ps( x + 16 );
  656. __m256 v3 = _mm256_loadu_ps( x + 24 );
  657. x += 32;
  658. // Compute max(abs(e)) for the block
  659. const __m256 signBit = _mm256_set1_ps( -0.0f );
  660. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  661. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  662. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  663. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  664. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  665. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  666. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  667. const float maxScalar = _mm_cvtss_f32( max4 );
  668. // Quantize these floats
  669. const float d = maxScalar / 127.f;
  670. y[i].d = GGML_FP32_TO_FP16(d);
  671. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  672. const __m256 mul = _mm256_set1_ps( id );
  673. // Apply the multiplier
  674. v0 = _mm256_mul_ps( v0, mul );
  675. v1 = _mm256_mul_ps( v1, mul );
  676. v2 = _mm256_mul_ps( v2, mul );
  677. v3 = _mm256_mul_ps( v3, mul );
  678. // Round to nearest integer
  679. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  680. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  681. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  682. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  683. // Convert floats to integers
  684. __m256i i0 = _mm256_cvtps_epi32( v0 );
  685. __m256i i1 = _mm256_cvtps_epi32( v1 );
  686. __m256i i2 = _mm256_cvtps_epi32( v2 );
  687. __m256i i3 = _mm256_cvtps_epi32( v3 );
  688. #if defined(__AVX2__)
  689. // Compute the sum of the quants and set y[i].s
  690. y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3))));
  691. // Convert int32 to int16
  692. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  693. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  694. // Convert int16 to int8
  695. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  696. // We got our precious signed bytes, but the order is now wrong
  697. // These AVX2 pack instructions process 16-byte pieces independently
  698. // The following instruction is fixing the order
  699. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  700. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  701. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  702. #else
  703. // Since we don't have in AVX some necessary functions,
  704. // we split the registers in half and call AVX2 analogs from SSE
  705. __m128i ni0 = _mm256_castsi256_si128( i0 );
  706. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  707. __m128i ni2 = _mm256_castsi256_si128( i1 );
  708. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  709. __m128i ni4 = _mm256_castsi256_si128( i2 );
  710. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  711. __m128i ni6 = _mm256_castsi256_si128( i3 );
  712. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  713. // Compute the sum of the quants and set y[i].s
  714. const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
  715. const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
  716. y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_4(_mm_add_epi32(s0, s1)));
  717. // Convert int32 to int16
  718. ni0 = _mm_packs_epi32( ni0, ni1 );
  719. ni2 = _mm_packs_epi32( ni2, ni3 );
  720. ni4 = _mm_packs_epi32( ni4, ni5 );
  721. ni6 = _mm_packs_epi32( ni6, ni7 );
  722. // Convert int16 to int8
  723. ni0 = _mm_packs_epi16( ni0, ni2 );
  724. ni4 = _mm_packs_epi16( ni4, ni6 );
  725. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  726. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  727. #endif
  728. }
  729. #elif defined(__riscv_v_intrinsic)
  730. size_t vl = __riscv_vsetvl_e32m4(QK8_1);
  731. for (int i = 0; i < nb; i++) {
  732. // load elements
  733. vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_1, vl);
  734. vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
  735. vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl);
  736. vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
  737. float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
  738. const float d = amax / ((1 << 7) - 1);
  739. const float id = d ? 1.0f/d : 0.0f;
  740. y[i].d = GGML_FP32_TO_FP16(d);
  741. vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
  742. // convert to integer
  743. vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
  744. vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
  745. // store result
  746. __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
  747. // compute sum for y[i].s
  748. vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl);
  749. vint16m1_t vwrs = __riscv_vwredsum_vs_i8m1_i16m1(vs, tmp2, vl);
  750. // set y[i].s
  751. int sum = __riscv_vmv_x_s_i16m1_i16(vwrs);
  752. y[i].s = GGML_FP32_TO_FP16(sum*d);
  753. }
  754. #elif defined(__POWER9_VECTOR__)
  755. for (int i = 0; i < nb; i++) {
  756. vector float srcv [8];
  757. vector float asrcv[8];
  758. vector float amaxv[8];
  759. vector signed int vi[8];
  760. for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j);
  761. for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]);
  762. for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]);
  763. for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]);
  764. for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]);
  765. const float amax = MAX(MAX(vec_extract(amaxv[0], 0),
  766. vec_extract(amaxv[0], 1)),
  767. MAX(vec_extract(amaxv[0], 2),
  768. vec_extract(amaxv[0], 3)));
  769. const float d = amax / ((1 << 7) - 1);
  770. const float id = d ? 1.0f/d : 0.0f;
  771. const vector float vid = vec_splats(id);
  772. y[i].d = GGML_FP32_TO_FP16(d);
  773. vector int accv = vec_splats(0);
  774. for (int j = 0; j < 8; j++) {
  775. const vector float v = vec_round(vec_mul(srcv[j], vid));
  776. vi[j] = vec_cts(v, 0);
  777. accv = vec_add(accv, vi[j]);
  778. }
  779. vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]);
  780. vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]);
  781. accv = vec_add(accv, vec_sld(accv, accv, 4));
  782. accv = vec_add(accv, vec_sld(accv, accv, 8));
  783. y[i].s = GGML_FP32_TO_FP16(d * vec_extract(accv, 0));
  784. }
  785. #else
  786. GGML_UNUSED(nb);
  787. // scalar
  788. quantize_row_q8_1_reference(x, y, k);
  789. #endif
  790. }
  791. void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int64_t k) {
  792. static const int qk = QK4_0;
  793. assert(k % qk == 0);
  794. const int nb = k / qk;
  795. for (int i = 0; i < nb; i++) {
  796. const float d = GGML_FP16_TO_FP32(x[i].d);
  797. for (int j = 0; j < qk/2; ++j) {
  798. const int x0 = (x[i].qs[j] & 0x0F) - 8;
  799. const int x1 = (x[i].qs[j] >> 4) - 8;
  800. y[i*qk + j + 0 ] = x0*d;
  801. y[i*qk + j + qk/2] = x1*d;
  802. }
  803. }
  804. }
  805. void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int64_t k) {
  806. static const int qk = QK4_1;
  807. assert(k % qk == 0);
  808. const int nb = k / qk;
  809. for (int i = 0; i < nb; i++) {
  810. const float d = GGML_FP16_TO_FP32(x[i].d);
  811. const float m = GGML_FP16_TO_FP32(x[i].m);
  812. for (int j = 0; j < qk/2; ++j) {
  813. const int x0 = (x[i].qs[j] & 0x0F);
  814. const int x1 = (x[i].qs[j] >> 4);
  815. y[i*qk + j + 0 ] = x0*d + m;
  816. y[i*qk + j + qk/2] = x1*d + m;
  817. }
  818. }
  819. }
  820. void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int64_t k) {
  821. static const int qk = QK5_0;
  822. assert(k % qk == 0);
  823. const int nb = k / qk;
  824. for (int i = 0; i < nb; i++) {
  825. const float d = GGML_FP16_TO_FP32(x[i].d);
  826. uint32_t qh;
  827. memcpy(&qh, x[i].qh, sizeof(qh));
  828. for (int j = 0; j < qk/2; ++j) {
  829. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  830. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  831. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  832. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  833. y[i*qk + j + 0 ] = x0*d;
  834. y[i*qk + j + qk/2] = x1*d;
  835. }
  836. }
  837. }
  838. void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int64_t k) {
  839. static const int qk = QK5_1;
  840. assert(k % qk == 0);
  841. const int nb = k / qk;
  842. for (int i = 0; i < nb; i++) {
  843. const float d = GGML_FP16_TO_FP32(x[i].d);
  844. const float m = GGML_FP16_TO_FP32(x[i].m);
  845. uint32_t qh;
  846. memcpy(&qh, x[i].qh, sizeof(qh));
  847. for (int j = 0; j < qk/2; ++j) {
  848. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  849. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  850. const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
  851. const int x1 = (x[i].qs[j] >> 4) | xh_1;
  852. y[i*qk + j + 0 ] = x0*d + m;
  853. y[i*qk + j + qk/2] = x1*d + m;
  854. }
  855. }
  856. }
  857. void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int64_t k) {
  858. static const int qk = QK8_0;
  859. assert(k % qk == 0);
  860. const int nb = k / qk;
  861. for (int i = 0; i < nb; i++) {
  862. const float d = GGML_FP16_TO_FP32(x[i].d);
  863. for (int j = 0; j < qk; ++j) {
  864. y[i*qk + j] = x[i].qs[j]*d;
  865. }
  866. }
  867. }
  868. //
  869. // 2-6 bit quantization in super-blocks
  870. //
  871. //
  872. // ===================== Helper functions
  873. //
  874. static inline int nearest_int(float fval) {
  875. assert(fval <= 4194303.f);
  876. float val = fval + 12582912.f;
  877. int i; memcpy(&i, &val, sizeof(int));
  878. return (i & 0x007fffff) - 0x00400000;
  879. }
  880. static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, int rmse_type,
  881. const float * restrict qw) {
  882. float max = 0;
  883. float amax = 0;
  884. for (int i = 0; i < n; ++i) {
  885. float ax = fabsf(x[i]);
  886. if (ax > amax) { amax = ax; max = x[i]; }
  887. }
  888. if (amax < GROUP_MAX_EPS) { // all zero
  889. for (int i = 0; i < n; ++i) {
  890. L[i] = 0;
  891. }
  892. return 0.f;
  893. }
  894. float iscale = -nmax / max;
  895. if (rmse_type == 0) {
  896. for (int i = 0; i < n; ++i) {
  897. int l = nearest_int(iscale * x[i]);
  898. L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
  899. }
  900. return 1/iscale;
  901. }
  902. bool return_early = false;
  903. if (rmse_type < 0) {
  904. rmse_type = -rmse_type;
  905. return_early = true;
  906. }
  907. float sumlx = 0;
  908. float suml2 = 0;
  909. #ifdef HAVE_BUGGY_APPLE_LINKER
  910. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  911. for (volatile int i = 0; i < n; ++i) {
  912. #else
  913. for (int i = 0; i < n; ++i) {
  914. #endif
  915. int l = nearest_int(iscale * x[i]);
  916. l = MAX(-nmax, MIN(nmax-1, l));
  917. L[i] = l + nmax;
  918. float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i]));
  919. sumlx += w*x[i]*l;
  920. suml2 += w*l*l;
  921. }
  922. float scale = sumlx/suml2;
  923. if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale;
  924. float best = scale * sumlx;
  925. for (int is = -9; is <= 9; ++is) {
  926. if (is == 0) {
  927. continue;
  928. }
  929. iscale = -(nmax + 0.1f*is) / max;
  930. sumlx = suml2 = 0;
  931. for (int i = 0; i < n; ++i) {
  932. int l = nearest_int(iscale * x[i]);
  933. l = MAX(-nmax, MIN(nmax-1, l));
  934. float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i]));
  935. sumlx += w*x[i]*l;
  936. suml2 += w*l*l;
  937. }
  938. if (suml2 > 0 && sumlx*sumlx > best*suml2) {
  939. for (int i = 0; i < n; ++i) {
  940. int l = nearest_int(iscale * x[i]);
  941. L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
  942. }
  943. scale = sumlx/suml2; best = scale*sumlx;
  944. }
  945. }
  946. return scale;
  947. }
  948. static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, bool do_rmse) {
  949. float max = 0;
  950. float amax = 0;
  951. for (int i = 0; i < n; ++i) {
  952. float ax = fabsf(x[i]);
  953. if (ax > amax) { amax = ax; max = x[i]; }
  954. }
  955. if (amax < GROUP_MAX_EPS) { // all zero
  956. for (int i = 0; i < n; ++i) { L[i] = 0; }
  957. return 0.f;
  958. }
  959. float iscale = -nmax / max;
  960. if (do_rmse) {
  961. float sumlx = 0;
  962. float suml2 = 0;
  963. for (int i = 0; i < n; ++i) {
  964. int l = nearest_int(iscale * x[i]);
  965. l = MAX(-nmax, MIN(nmax-1, l));
  966. L[i] = l;
  967. float w = x[i]*x[i];
  968. sumlx += w*x[i]*l;
  969. suml2 += w*l*l;
  970. }
  971. for (int itry = 0; itry < 5; ++itry) {
  972. int n_changed = 0;
  973. for (int i = 0; i < n; ++i) {
  974. float w = x[i]*x[i];
  975. float slx = sumlx - w*x[i]*L[i];
  976. if (slx > 0) {
  977. float sl2 = suml2 - w*L[i]*L[i];
  978. int new_l = nearest_int(x[i] * sl2 / slx);
  979. new_l = MAX(-nmax, MIN(nmax-1, new_l));
  980. if (new_l != L[i]) {
  981. slx += w*x[i]*new_l;
  982. sl2 += w*new_l*new_l;
  983. if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) {
  984. L[i] = new_l; sumlx = slx; suml2 = sl2;
  985. ++n_changed;
  986. }
  987. }
  988. }
  989. }
  990. if (!n_changed) {
  991. break;
  992. }
  993. }
  994. for (int i = 0; i < n; ++i) {
  995. L[i] += nmax;
  996. }
  997. return sumlx / suml2;
  998. }
  999. for (int i = 0; i < n; ++i) {
  1000. int l = nearest_int(iscale * x[i]);
  1001. l = MAX(-nmax, MIN(nmax-1, l));
  1002. L[i] = l + nmax;
  1003. }
  1004. return 1/iscale;
  1005. }
  1006. static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min,
  1007. int ntry, float alpha) {
  1008. float min = x[0];
  1009. float max = x[0];
  1010. for (int i = 1; i < n; ++i) {
  1011. if (x[i] < min) min = x[i];
  1012. if (x[i] > max) max = x[i];
  1013. }
  1014. if (max == min) {
  1015. for (int i = 0; i < n; ++i) L[i] = 0;
  1016. *the_min = 0;
  1017. return 0.f;
  1018. }
  1019. if (min > 0) min = 0;
  1020. float iscale = nmax/(max - min);
  1021. float scale = 1/iscale;
  1022. for (int itry = 0; itry < ntry; ++itry) {
  1023. float sumlx = 0; int suml2 = 0;
  1024. bool did_change = false;
  1025. for (int i = 0; i < n; ++i) {
  1026. int l = nearest_int(iscale*(x[i] - min));
  1027. l = MAX(0, MIN(nmax, l));
  1028. if (l != L[i]) {
  1029. L[i] = l;
  1030. did_change = true;
  1031. }
  1032. sumlx += (x[i] - min)*l;
  1033. suml2 += l*l;
  1034. }
  1035. scale = sumlx/suml2;
  1036. float sum = 0;
  1037. for (int i = 0; i < n; ++i) {
  1038. sum += x[i] - scale*L[i];
  1039. }
  1040. min = alpha*min + (1 - alpha)*sum/n;
  1041. if (min > 0) min = 0;
  1042. iscale = 1/scale;
  1043. if (!did_change) break;
  1044. }
  1045. *the_min = -min;
  1046. return scale;
  1047. }
  1048. static float make_qkx2_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
  1049. uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
  1050. float rmin, float rdelta, int nstep, bool use_mad) {
  1051. float min = x[0];
  1052. float max = x[0];
  1053. float sum_w = weights[0];
  1054. float sum_x = sum_w * x[0];
  1055. #ifdef HAVE_BUGGY_APPLE_LINKER
  1056. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  1057. for (volatile int i = 1; i < n; ++i) {
  1058. #else
  1059. for (int i = 1; i < n; ++i) {
  1060. #endif
  1061. if (x[i] < min) min = x[i];
  1062. if (x[i] > max) max = x[i];
  1063. float w = weights[i];
  1064. sum_w += w;
  1065. sum_x += w * x[i];
  1066. }
  1067. if (min > 0) min = 0;
  1068. if (max == min) {
  1069. for (int i = 0; i < n; ++i) L[i] = 0;
  1070. *the_min = -min;
  1071. return 0.f;
  1072. }
  1073. float iscale = nmax/(max - min);
  1074. float scale = 1/iscale;
  1075. float best_mad = 0;
  1076. for (int i = 0; i < n; ++i) {
  1077. int l = nearest_int(iscale*(x[i] - min));
  1078. L[i] = MAX(0, MIN(nmax, l));
  1079. float diff = scale * L[i] + min - x[i];
  1080. diff = use_mad ? fabsf(diff) : diff * diff;
  1081. float w = weights[i];
  1082. best_mad += w * diff;
  1083. }
  1084. if (nstep < 1) {
  1085. *the_min = -min;
  1086. return scale;
  1087. }
  1088. for (int is = 0; is <= nstep; ++is) {
  1089. iscale = (rmin + rdelta*is + nmax)/(max - min);
  1090. float sum_l = 0, sum_l2 = 0, sum_xl = 0;
  1091. for (int i = 0; i < n; ++i) {
  1092. int l = nearest_int(iscale*(x[i] - min));
  1093. l = MAX(0, MIN(nmax, l));
  1094. Laux[i] = l;
  1095. float w = weights[i];
  1096. sum_l += w*l;
  1097. sum_l2 += w*l*l;
  1098. sum_xl += w*l*x[i];
  1099. }
  1100. float D = sum_w * sum_l2 - sum_l * sum_l;
  1101. if (D > 0) {
  1102. float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
  1103. float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
  1104. if (this_min > 0) {
  1105. this_min = 0;
  1106. this_scale = sum_xl / sum_l2;
  1107. }
  1108. float mad = 0;
  1109. for (int i = 0; i < n; ++i) {
  1110. float diff = this_scale * Laux[i] + this_min - x[i];
  1111. diff = use_mad ? fabsf(diff) : diff * diff;
  1112. float w = weights[i];
  1113. mad += w * diff;
  1114. }
  1115. if (mad < best_mad) {
  1116. for (int i = 0; i < n; ++i) {
  1117. L[i] = Laux[i];
  1118. }
  1119. best_mad = mad;
  1120. scale = this_scale;
  1121. min = this_min;
  1122. }
  1123. }
  1124. }
  1125. *the_min = -min;
  1126. return scale;
  1127. }
  1128. #if QK_K == 256
  1129. static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) {
  1130. if (j < 4) {
  1131. *d = q[j] & 63; *m = q[j + 4] & 63;
  1132. } else {
  1133. *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
  1134. *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
  1135. }
  1136. }
  1137. #endif
  1138. //========================- 2-bit (de)-quantization
  1139. void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int64_t k) {
  1140. assert(k % QK_K == 0);
  1141. const int nb = k / QK_K;
  1142. uint8_t L[QK_K];
  1143. uint8_t Laux[16];
  1144. float weights[16];
  1145. float mins[QK_K/16];
  1146. float scales[QK_K/16];
  1147. const float q4scale = 15.f;
  1148. for (int i = 0; i < nb; i++) {
  1149. float max_scale = 0; // as we are deducting the min, scales are always positive
  1150. float max_min = 0;
  1151. for (int j = 0; j < QK_K/16; ++j) {
  1152. for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]);
  1153. scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true);
  1154. float scale = scales[j];
  1155. if (scale > max_scale) {
  1156. max_scale = scale;
  1157. }
  1158. float min = mins[j];
  1159. if (min > max_min) {
  1160. max_min = min;
  1161. }
  1162. }
  1163. if (max_scale > 0) {
  1164. float iscale = q4scale/max_scale;
  1165. for (int j = 0; j < QK_K/16; ++j) {
  1166. int l = nearest_int(iscale*scales[j]);
  1167. y[i].scales[j] = l;
  1168. }
  1169. y[i].d = GGML_FP32_TO_FP16(max_scale/q4scale);
  1170. } else {
  1171. for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0;
  1172. y[i].d = GGML_FP32_TO_FP16(0.f);
  1173. }
  1174. if (max_min > 0) {
  1175. float iscale = q4scale/max_min;
  1176. for (int j = 0; j < QK_K/16; ++j) {
  1177. int l = nearest_int(iscale*mins[j]);
  1178. y[i].scales[j] |= (l << 4);
  1179. }
  1180. y[i].dmin = GGML_FP32_TO_FP16(max_min/q4scale);
  1181. } else {
  1182. y[i].dmin = GGML_FP32_TO_FP16(0.f);
  1183. }
  1184. for (int j = 0; j < QK_K/16; ++j) {
  1185. const float d = GGML_FP16_TO_FP32(y[i].d) * (y[i].scales[j] & 0xF);
  1186. if (!d) continue;
  1187. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * (y[i].scales[j] >> 4);
  1188. for (int ii = 0; ii < 16; ++ii) {
  1189. int l = nearest_int((x[16*j + ii] + dm)/d);
  1190. l = MAX(0, MIN(3, l));
  1191. L[16*j + ii] = l;
  1192. }
  1193. }
  1194. #if QK_K == 256
  1195. for (int j = 0; j < QK_K; j += 128) {
  1196. for (int l = 0; l < 32; ++l) {
  1197. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1198. }
  1199. }
  1200. #else
  1201. for (int l = 0; l < 16; ++l) {
  1202. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1203. }
  1204. #endif
  1205. x += QK_K;
  1206. }
  1207. }
  1208. void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int64_t k) {
  1209. assert(k % QK_K == 0);
  1210. const int nb = k / QK_K;
  1211. for (int i = 0; i < nb; i++) {
  1212. const float d = GGML_FP16_TO_FP32(x[i].d);
  1213. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  1214. const uint8_t * q = x[i].qs;
  1215. #if QK_K == 256
  1216. int is = 0;
  1217. float dl, ml;
  1218. for (int n = 0; n < QK_K; n += 128) {
  1219. int shift = 0;
  1220. for (int j = 0; j < 4; ++j) {
  1221. uint8_t sc = x[i].scales[is++];
  1222. dl = d * (sc & 0xF); ml = min * (sc >> 4);
  1223. for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml;
  1224. sc = x[i].scales[is++];
  1225. dl = d * (sc & 0xF); ml = min * (sc >> 4);
  1226. for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml;
  1227. shift += 2;
  1228. }
  1229. q += 32;
  1230. }
  1231. #else
  1232. float dl1 = d * (x[i].scales[0] & 0xF), ml1 = min * (x[i].scales[0] >> 4);
  1233. float dl2 = d * (x[i].scales[1] & 0xF), ml2 = min * (x[i].scales[1] >> 4);
  1234. float dl3 = d * (x[i].scales[2] & 0xF), ml3 = min * (x[i].scales[2] >> 4);
  1235. float dl4 = d * (x[i].scales[3] & 0xF), ml4 = min * (x[i].scales[3] >> 4);
  1236. for (int l = 0; l < 16; ++l) {
  1237. y[l+ 0] = dl1 * ((int8_t)((q[l] >> 0) & 3)) - ml1;
  1238. y[l+16] = dl2 * ((int8_t)((q[l] >> 2) & 3)) - ml2;
  1239. y[l+32] = dl3 * ((int8_t)((q[l] >> 4) & 3)) - ml3;
  1240. y[l+48] = dl4 * ((int8_t)((q[l] >> 6) & 3)) - ml4;
  1241. }
  1242. y += QK_K;
  1243. #endif
  1244. }
  1245. }
  1246. void quantize_row_q2_K(const float * restrict x, void * restrict vy, int64_t k) {
  1247. quantize_row_q2_K_reference(x, vy, k);
  1248. }
  1249. static float make_qkx3_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
  1250. uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
  1251. float rmin, float rdelta, int nstep, bool use_mad) {
  1252. float min = x[0];
  1253. float max = x[0];
  1254. float sum_w = weights ? weights[0] : x[0]*x[0];
  1255. float sum_x = sum_w * x[0];
  1256. #ifdef HAVE_BUGGY_APPLE_LINKER
  1257. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  1258. for (volatile int i = 1; i < n; ++i) {
  1259. #else
  1260. for (int i = 1; i < n; ++i) {
  1261. #endif
  1262. if (x[i] < min) min = x[i];
  1263. if (x[i] > max) max = x[i];
  1264. float w = weights ? weights[i] : x[i]*x[i];
  1265. sum_w += w;
  1266. sum_x += w * x[i];
  1267. }
  1268. if (min > 0) {
  1269. min = 0;
  1270. }
  1271. if (max <= min) {
  1272. memset(L, 0, n);
  1273. *the_min = -min;
  1274. return 0.f;
  1275. }
  1276. float iscale = nmax/(max - min);
  1277. float scale = 1/iscale;
  1278. float best_mad = 0;
  1279. for (int i = 0; i < n; ++i) {
  1280. int l = nearest_int(iscale*(x[i] - min));
  1281. L[i] = MAX(0, MIN(nmax, l));
  1282. float diff = scale * L[i] + min - x[i];
  1283. diff = use_mad ? fabsf(diff) : diff*diff;
  1284. float w = weights ? weights[i] : x[i]*x[i];
  1285. best_mad += w * diff;
  1286. }
  1287. if (nstep < 1) {
  1288. *the_min = -min;
  1289. return scale;
  1290. }
  1291. for (int is = 0; is <= nstep; ++is) {
  1292. iscale = (rmin + rdelta*is + nmax)/(max - min);
  1293. float sum_l = 0, sum_l2 = 0, sum_xl = 0;
  1294. for (int i = 0; i < n; ++i) {
  1295. int l = nearest_int(iscale*(x[i] - min));
  1296. l = MAX(0, MIN(nmax, l));
  1297. Laux[i] = l;
  1298. float w = weights ? weights[i] : x[i]*x[i];
  1299. sum_l += w*l;
  1300. sum_l2 += w*l*l;
  1301. sum_xl += w*l*x[i];
  1302. }
  1303. float D = sum_w * sum_l2 - sum_l * sum_l;
  1304. if (D > 0) {
  1305. float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
  1306. float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
  1307. if (this_min > 0) {
  1308. this_min = 0;
  1309. this_scale = sum_xl / sum_l2;
  1310. }
  1311. float mad = 0;
  1312. for (int i = 0; i < n; ++i) {
  1313. float diff = this_scale * Laux[i] + this_min - x[i];
  1314. diff = use_mad ? fabsf(diff) : diff*diff;
  1315. float w = weights ? weights[i] : x[i]*x[i];
  1316. mad += w * diff;
  1317. }
  1318. if (mad < best_mad) {
  1319. for (int i = 0; i < n; ++i) {
  1320. L[i] = Laux[i];
  1321. }
  1322. best_mad = mad;
  1323. scale = this_scale;
  1324. min = this_min;
  1325. }
  1326. }
  1327. }
  1328. *the_min = -min;
  1329. return scale;
  1330. }
  1331. static float make_qp_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, const float * quant_weights) {
  1332. float max = 0;
  1333. for (int i = 0; i < n; ++i) {
  1334. max = MAX(max, x[i]);
  1335. }
  1336. if (!max) { // all zero
  1337. for (int i = 0; i < n; ++i) { L[i] = 0; }
  1338. return 0.f;
  1339. }
  1340. float iscale = nmax / max;
  1341. for (int i = 0; i < n; ++i) {
  1342. L[i] = nearest_int(iscale * x[i]);
  1343. }
  1344. float scale = 1/iscale;
  1345. float best_mse = 0;
  1346. for (int i = 0; i < n; ++i) {
  1347. float diff = x[i] - scale*L[i];
  1348. float w = quant_weights[i];
  1349. best_mse += w*diff*diff;
  1350. }
  1351. for (int is = -4; is <= 4; ++is) {
  1352. if (is == 0) continue;
  1353. float iscale_is = (0.1f*is + nmax)/max;
  1354. float scale_is = 1/iscale_is;
  1355. float mse = 0;
  1356. for (int i = 0; i < n; ++i) {
  1357. int l = nearest_int(iscale_is*x[i]);
  1358. l = MIN(nmax, l);
  1359. float diff = x[i] - scale_is*l;
  1360. float w = quant_weights[i];
  1361. mse += w*diff*diff;
  1362. }
  1363. if (mse < best_mse) {
  1364. best_mse = mse;
  1365. iscale = iscale_is;
  1366. }
  1367. }
  1368. float sumlx = 0;
  1369. float suml2 = 0;
  1370. for (int i = 0; i < n; ++i) {
  1371. int l = nearest_int(iscale * x[i]);
  1372. l = MIN(nmax, l);
  1373. L[i] = l;
  1374. float w = quant_weights[i];
  1375. sumlx += w*x[i]*l;
  1376. suml2 += w*l*l;
  1377. }
  1378. for (int itry = 0; itry < 5; ++itry) {
  1379. int n_changed = 0;
  1380. for (int i = 0; i < n; ++i) {
  1381. float w = quant_weights[i];
  1382. float slx = sumlx - w*x[i]*L[i];
  1383. float sl2 = suml2 - w*L[i]*L[i];
  1384. if (slx > 0 && sl2 > 0) {
  1385. int new_l = nearest_int(x[i] * sl2 / slx);
  1386. new_l = MIN(nmax, new_l);
  1387. if (new_l != L[i]) {
  1388. slx += w*x[i]*new_l;
  1389. sl2 += w*new_l*new_l;
  1390. if (slx*slx*suml2 > sumlx*sumlx*sl2) {
  1391. L[i] = new_l; sumlx = slx; suml2 = sl2;
  1392. ++n_changed;
  1393. }
  1394. }
  1395. }
  1396. }
  1397. if (!n_changed) {
  1398. break;
  1399. }
  1400. }
  1401. return sumlx/suml2;
  1402. }
  1403. static void quantize_row_q2_K_impl(const float * restrict x, block_q2_K * restrict y, int k, const float * restrict quant_weights) {
  1404. GGML_ASSERT(quant_weights);
  1405. assert(k % QK_K == 0);
  1406. const int nb = k / QK_K;
  1407. const bool requantize = true;
  1408. uint8_t L[QK_K];
  1409. uint8_t Laux[16];
  1410. float mins[QK_K/16];
  1411. float scales[QK_K/16];
  1412. float sw[QK_K/16];
  1413. float weight[16];
  1414. uint8_t Ls[QK_K/16], Lm[QK_K/16];
  1415. for (int i = 0; i < nb; i++) {
  1416. memset(sw, 0, QK_K/16*sizeof(float));
  1417. float sumx2 = 0;
  1418. for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j];
  1419. float sigma2 = sumx2/QK_K;
  1420. for (int j = 0; j < QK_K/16; ++j) {
  1421. const float * restrict qw = quant_weights + QK_K * i + 16*j;
  1422. for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j + l]*x[16*j + l]);
  1423. for (int l = 0; l < QK_K/16; ++l) sw[j] += weight[l];
  1424. scales[j] = make_qkx3_quants(16, 3, x + 16*j, weight, L + 16*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  1425. }
  1426. float dm, mm;
  1427. #if QK_K == 64
  1428. float max_scale = 0, max_min = 0;
  1429. for (int j = 0; j < QK_K/16; ++j) {
  1430. max_scale = MAX(max_scale, scales[j]);
  1431. max_min = MAX(max_min, mins[j]);
  1432. }
  1433. dm = max_scale/15;
  1434. mm = max_min/15;
  1435. if (max_scale) {
  1436. float id = 1/dm;
  1437. for (int j = 0; j < QK_K/16; ++j) {
  1438. int l = nearest_int(id*scales[j]);
  1439. Ls[j] = MAX(0, MIN(15, l));
  1440. }
  1441. } else {
  1442. memset(Ls, 0, QK_K/16);
  1443. }
  1444. if (max_min) {
  1445. float id = 1/mm;
  1446. for (int j = 0; j < QK_K/16; ++j) {
  1447. int l = nearest_int(id*mins[j]);
  1448. Lm[j] = MAX(0, MIN(15, l));
  1449. }
  1450. } else {
  1451. memset(Lm, 0, QK_K/16);
  1452. }
  1453. #else
  1454. dm = make_qp_quants(QK_K/16, 15, scales, Ls, sw);
  1455. mm = make_qp_quants(QK_K/16, 15, mins, Lm, sw);
  1456. #endif
  1457. y[i].d = GGML_FP32_TO_FP16(dm);
  1458. y[i].dmin = GGML_FP32_TO_FP16(mm);
  1459. dm = GGML_FP16_TO_FP32(y[i].d);
  1460. mm = GGML_FP16_TO_FP32(y[i].dmin);
  1461. for (int j = 0; j < QK_K/16; ++j) {
  1462. y[i].scales[j] = Ls[j] | (Lm[j] << 4);
  1463. }
  1464. if (requantize) {
  1465. for (int j = 0; j < QK_K/16; ++j) {
  1466. const float d = dm * (y[i].scales[j] & 0xF);
  1467. if (!d) continue;
  1468. const float m = mm * (y[i].scales[j] >> 4);
  1469. for (int ii = 0; ii < 16; ++ii) {
  1470. int l = nearest_int((x[16*j + ii] + m)/d);
  1471. l = MAX(0, MIN(3, l));
  1472. L[16*j + ii] = l;
  1473. }
  1474. }
  1475. }
  1476. #if QK_K == 256
  1477. for (int j = 0; j < QK_K; j += 128) {
  1478. for (int l = 0; l < 32; ++l) {
  1479. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1480. }
  1481. }
  1482. #else
  1483. for (int l = 0; l < 16; ++l) {
  1484. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1485. }
  1486. #endif
  1487. x += QK_K;
  1488. }
  1489. }
  1490. size_t quantize_q2_K(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  1491. size_t row_size = ggml_row_size(GGML_TYPE_Q2_K, n_per_row);
  1492. if (!quant_weights) {
  1493. quantize_row_q2_K_reference(src, dst, (int64_t)nrow*n_per_row);
  1494. }
  1495. else {
  1496. char * qrow = (char *)dst;
  1497. for (int64_t row = 0; row < nrow; ++row) {
  1498. quantize_row_q2_K_impl(src, (block_q2_K*)qrow, n_per_row, quant_weights);
  1499. src += n_per_row;
  1500. qrow += row_size;
  1501. }
  1502. }
  1503. return nrow * row_size;
  1504. }
  1505. //========================= 3-bit (de)-quantization
  1506. void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int64_t k) {
  1507. assert(k % QK_K == 0);
  1508. const int nb = k / QK_K;
  1509. int8_t L[QK_K];
  1510. float scales[QK_K / 16];
  1511. for (int i = 0; i < nb; i++) {
  1512. float max_scale = 0;
  1513. float amax = 0;
  1514. for (int j = 0; j < QK_K/16; ++j) {
  1515. scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true);
  1516. float scale = fabsf(scales[j]);
  1517. if (scale > amax) {
  1518. amax = scale; max_scale = scales[j];
  1519. }
  1520. }
  1521. #if QK_K == 256
  1522. memset(y[i].scales, 0, 12);
  1523. if (max_scale) {
  1524. float iscale = -32.f/max_scale;
  1525. for (int j = 0; j < QK_K/16; ++j) {
  1526. int8_t l = nearest_int(iscale*scales[j]);
  1527. l = MAX(-32, MIN(31, l)) + 32;
  1528. if (j < 8) {
  1529. y[i].scales[j] = l & 0xF;
  1530. } else {
  1531. y[i].scales[j-8] |= ((l & 0xF) << 4);
  1532. }
  1533. l >>= 4;
  1534. y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
  1535. }
  1536. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  1537. } else {
  1538. y[i].d = GGML_FP32_TO_FP16(0.f);
  1539. }
  1540. int8_t sc;
  1541. for (int j = 0; j < QK_K/16; ++j) {
  1542. sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
  1543. sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
  1544. float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1545. if (!d) {
  1546. continue;
  1547. }
  1548. for (int ii = 0; ii < 16; ++ii) {
  1549. int l = nearest_int(x[16*j + ii]/d);
  1550. l = MAX(-4, MIN(3, l));
  1551. L[16*j + ii] = l + 4;
  1552. }
  1553. }
  1554. #else
  1555. if (max_scale) {
  1556. float iscale = -8.f/max_scale;
  1557. for (int j = 0; j < QK_K/16; j+=2) {
  1558. int l1 = nearest_int(iscale*scales[j]);
  1559. l1 = 8 + MAX(-8, MIN(7, l1));
  1560. int l2 = nearest_int(iscale*scales[j+1]);
  1561. l2 = 8 + MAX(-8, MIN(7, l2));
  1562. y[i].scales[j/2] = l1 | (l2 << 4);
  1563. }
  1564. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  1565. } else {
  1566. for (int j = 0; j < QK_K/16; j+=2) {
  1567. y[i].scales[j/2] = 0;
  1568. }
  1569. y[i].d = GGML_FP32_TO_FP16(0.f);
  1570. }
  1571. for (int j = 0; j < QK_K/16; ++j) {
  1572. int s = j%2 == 0 ? y[i].scales[j/2] & 0xF : y[i].scales[j/2] >> 4;
  1573. float d = GGML_FP16_TO_FP32(y[i].d) * (s - 8);
  1574. if (!d) {
  1575. continue;
  1576. }
  1577. for (int ii = 0; ii < 16; ++ii) {
  1578. int l = nearest_int(x[16*j + ii]/d);
  1579. l = MAX(-4, MIN(3, l));
  1580. L[16*j + ii] = l + 4;
  1581. }
  1582. }
  1583. #endif
  1584. memset(y[i].hmask, 0, QK_K/8);
  1585. // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
  1586. int m = 0;
  1587. uint8_t hm = 1;
  1588. for (int j = 0; j < QK_K; ++j) {
  1589. if (L[j] > 3) {
  1590. y[i].hmask[m] |= hm;
  1591. L[j] -= 4;
  1592. }
  1593. if (++m == QK_K/8) {
  1594. m = 0; hm <<= 1;
  1595. }
  1596. }
  1597. #if QK_K == 256
  1598. for (int j = 0; j < QK_K; j += 128) {
  1599. for (int l = 0; l < 32; ++l) {
  1600. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1601. }
  1602. }
  1603. #else
  1604. for (int l = 0; l < 16; ++l) {
  1605. y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
  1606. }
  1607. #endif
  1608. x += QK_K;
  1609. }
  1610. }
  1611. #if QK_K == 256
  1612. void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int64_t k) {
  1613. assert(k % QK_K == 0);
  1614. const int nb = k / QK_K;
  1615. const uint32_t kmask1 = 0x03030303;
  1616. const uint32_t kmask2 = 0x0f0f0f0f;
  1617. uint32_t aux[4];
  1618. const int8_t * scales = (const int8_t*)aux;
  1619. for (int i = 0; i < nb; i++) {
  1620. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  1621. const uint8_t * restrict q = x[i].qs;
  1622. const uint8_t * restrict hm = x[i].hmask;
  1623. uint8_t m = 1;
  1624. memcpy(aux, x[i].scales, 12);
  1625. uint32_t tmp = aux[2];
  1626. aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
  1627. aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
  1628. aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
  1629. aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
  1630. int is = 0;
  1631. float dl;
  1632. for (int n = 0; n < QK_K; n += 128) {
  1633. int shift = 0;
  1634. for (int j = 0; j < 4; ++j) {
  1635. dl = d_all * (scales[is++] - 32);
  1636. for (int l = 0; l < 16; ++l) {
  1637. *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4));
  1638. }
  1639. dl = d_all * (scales[is++] - 32);
  1640. for (int l = 0; l < 16; ++l) {
  1641. *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4));
  1642. }
  1643. shift += 2;
  1644. m <<= 1;
  1645. }
  1646. q += 32;
  1647. }
  1648. }
  1649. }
  1650. #else
  1651. void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int64_t k) {
  1652. assert(k % QK_K == 0);
  1653. assert(QK_K == 64);
  1654. const int nb = k / QK_K;
  1655. for (int i = 0; i < nb; i++) {
  1656. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  1657. const uint8_t * restrict q = x[i].qs;
  1658. const uint8_t * restrict hm = x[i].hmask;
  1659. const float d1 = d_all * ((x[i].scales[0] & 0xF) - 8);
  1660. const float d2 = d_all * ((x[i].scales[0] >> 4) - 8);
  1661. const float d3 = d_all * ((x[i].scales[1] & 0xF) - 8);
  1662. const float d4 = d_all * ((x[i].scales[1] >> 4) - 8);
  1663. for (int l=0; l<8; ++l) {
  1664. uint8_t h = hm[l];
  1665. y[l+ 0] = d1 * ((int8_t)((q[l+0] >> 0) & 3) - ((h & 0x01) ? 0 : 4));
  1666. y[l+ 8] = d1 * ((int8_t)((q[l+8] >> 0) & 3) - ((h & 0x02) ? 0 : 4));
  1667. y[l+16] = d2 * ((int8_t)((q[l+0] >> 2) & 3) - ((h & 0x04) ? 0 : 4));
  1668. y[l+24] = d2 * ((int8_t)((q[l+8] >> 2) & 3) - ((h & 0x08) ? 0 : 4));
  1669. y[l+32] = d3 * ((int8_t)((q[l+0] >> 4) & 3) - ((h & 0x10) ? 0 : 4));
  1670. y[l+40] = d3 * ((int8_t)((q[l+8] >> 4) & 3) - ((h & 0x20) ? 0 : 4));
  1671. y[l+48] = d4 * ((int8_t)((q[l+0] >> 6) & 3) - ((h & 0x40) ? 0 : 4));
  1672. y[l+56] = d4 * ((int8_t)((q[l+8] >> 6) & 3) - ((h & 0x80) ? 0 : 4));
  1673. }
  1674. y += QK_K;
  1675. }
  1676. }
  1677. #endif
  1678. void quantize_row_q3_K(const float * restrict x, void * restrict vy, int64_t k) {
  1679. quantize_row_q3_K_reference(x, vy, k);
  1680. }
  1681. static void quantize_row_q3_K_impl(const float * restrict x, block_q3_K * restrict y, int64_t n_per_row, const float * restrict quant_weights) {
  1682. #if QK_K != 256
  1683. (void)quant_weights;
  1684. quantize_row_q3_K_reference(x, y, n_per_row);
  1685. #else
  1686. assert(n_per_row % QK_K == 0);
  1687. const int nb = n_per_row / QK_K;
  1688. int8_t L[QK_K];
  1689. float scales[QK_K / 16];
  1690. float weight[16];
  1691. float sw[QK_K / 16];
  1692. int8_t Ls[QK_K / 16];
  1693. for (int i = 0; i < nb; i++) {
  1694. float sumx2 = 0;
  1695. for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j];
  1696. float sigma2 = 2*sumx2/QK_K;
  1697. for (int j = 0; j < QK_K/16; ++j) {
  1698. if (quant_weights) {
  1699. const float * qw = quant_weights + QK_K * i + 16*j;
  1700. for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j+l]*x[16*j+l]);
  1701. } else {
  1702. for (int l = 0; l < 16; ++l) weight[l] = x[16*j+l]*x[16*j+l];
  1703. }
  1704. float sumw = 0;
  1705. for (int l = 0; l < 16; ++l) sumw += weight[l];
  1706. sw[j] = sumw;
  1707. scales[j] = make_qx_quants(16, 4, x + 16*j, L + 16*j, 1, weight);
  1708. }
  1709. memset(y[i].scales, 0, 12);
  1710. float d_block = make_qx_quants(QK_K/16, 32, scales, Ls, 1, sw);
  1711. for (int j = 0; j < QK_K/16; ++j) {
  1712. int l = Ls[j];
  1713. if (j < 8) {
  1714. y[i].scales[j] = l & 0xF;
  1715. } else {
  1716. y[i].scales[j-8] |= ((l & 0xF) << 4);
  1717. }
  1718. l >>= 4;
  1719. y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
  1720. }
  1721. y[i].d = GGML_FP32_TO_FP16(d_block);
  1722. int8_t sc;
  1723. for (int j = 0; j < QK_K/16; ++j) {
  1724. sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
  1725. sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
  1726. float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1727. if (!d) {
  1728. continue;
  1729. }
  1730. for (int ii = 0; ii < 16; ++ii) {
  1731. int l = nearest_int(x[16*j + ii]/d);
  1732. l = MAX(-4, MIN(3, l));
  1733. L[16*j + ii] = l + 4;
  1734. }
  1735. }
  1736. memset(y[i].hmask, 0, QK_K/8);
  1737. // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
  1738. int m = 0;
  1739. uint8_t hm = 1;
  1740. for (int j = 0; j < QK_K; ++j) {
  1741. if (L[j] > 3) {
  1742. y[i].hmask[m] |= hm;
  1743. L[j] -= 4;
  1744. }
  1745. if (++m == QK_K/8) {
  1746. m = 0; hm <<= 1;
  1747. }
  1748. }
  1749. for (int j = 0; j < QK_K; j += 128) {
  1750. for (int l = 0; l < 32; ++l) {
  1751. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1752. }
  1753. }
  1754. x += QK_K;
  1755. }
  1756. #endif
  1757. }
  1758. size_t quantize_q3_K(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  1759. size_t row_size = ggml_row_size(GGML_TYPE_Q3_K, n_per_row);
  1760. if (!quant_weights) {
  1761. quantize_row_q3_K_reference(src, dst, (int64_t)nrow*n_per_row);
  1762. }
  1763. else {
  1764. char * qrow = (char *)dst;
  1765. for (int64_t row = 0; row < nrow; ++row) {
  1766. quantize_row_q3_K_impl(src, (block_q3_K*)qrow, n_per_row, quant_weights);
  1767. src += n_per_row;
  1768. qrow += row_size;
  1769. }
  1770. }
  1771. return nrow * row_size;
  1772. }
  1773. // ====================== 4-bit (de)-quantization
  1774. void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int64_t k) {
  1775. assert(k % QK_K == 0);
  1776. const int nb = k / QK_K;
  1777. uint8_t L[QK_K];
  1778. uint8_t Laux[32];
  1779. float weights[32];
  1780. float mins[QK_K/32];
  1781. float scales[QK_K/32];
  1782. for (int i = 0; i < nb; i++) {
  1783. float max_scale = 0; // as we are deducting the min, scales are always positive
  1784. float max_min = 0;
  1785. for (int j = 0; j < QK_K/32; ++j) {
  1786. //scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
  1787. float sum_x2 = 0;
  1788. for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
  1789. float av_x = sqrtf(sum_x2/32);
  1790. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  1791. scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false);
  1792. float scale = scales[j];
  1793. if (scale > max_scale) {
  1794. max_scale = scale;
  1795. }
  1796. float min = mins[j];
  1797. if (min > max_min) {
  1798. max_min = min;
  1799. }
  1800. }
  1801. #if QK_K == 256
  1802. float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
  1803. float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
  1804. for (int j = 0; j < QK_K/32; ++j) {
  1805. uint8_t ls = nearest_int(inv_scale*scales[j]);
  1806. uint8_t lm = nearest_int(inv_min*mins[j]);
  1807. ls = MIN(63, ls);
  1808. lm = MIN(63, lm);
  1809. if (j < 4) {
  1810. y[i].scales[j] = ls;
  1811. y[i].scales[j+4] = lm;
  1812. } else {
  1813. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  1814. y[i].scales[j-4] |= ((ls >> 4) << 6);
  1815. y[i].scales[j-0] |= ((lm >> 4) << 6);
  1816. }
  1817. }
  1818. y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
  1819. y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
  1820. uint8_t sc, m;
  1821. for (int j = 0; j < QK_K/32; ++j) {
  1822. get_scale_min_k4(j, y[i].scales, &sc, &m);
  1823. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1824. if (!d) continue;
  1825. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  1826. for (int ii = 0; ii < 32; ++ii) {
  1827. int l = nearest_int((x[32*j + ii] + dm)/d);
  1828. l = MAX(0, MIN(15, l));
  1829. L[32*j + ii] = l;
  1830. }
  1831. }
  1832. #else
  1833. const float s_factor = 15.f;
  1834. float inv_scale = max_scale > 0 ? s_factor/max_scale : 0.f;
  1835. float inv_min = max_min > 0 ? s_factor/max_min : 0.f;
  1836. int d1 = nearest_int(inv_scale*scales[0]);
  1837. int m1 = nearest_int(inv_min*mins[0]);
  1838. int d2 = nearest_int(inv_scale*scales[1]);
  1839. int m2 = nearest_int(inv_min*mins[1]);
  1840. y[i].scales[0] = d1 | (m1 << 4);
  1841. y[i].scales[1] = d2 | (m2 << 4);
  1842. y[i].d[0] = GGML_FP32_TO_FP16(max_scale/s_factor);
  1843. y[i].d[1] = GGML_FP32_TO_FP16(max_min/s_factor);
  1844. float sumlx = 0;
  1845. int suml2 = 0;
  1846. for (int j = 0; j < QK_K/32; ++j) {
  1847. const uint8_t sd = y[i].scales[j] & 0xF;
  1848. const uint8_t sm = y[i].scales[j] >> 4;
  1849. const float d = GGML_FP16_TO_FP32(y[i].d[0]) * sd;
  1850. if (!d) continue;
  1851. const float m = GGML_FP16_TO_FP32(y[i].d[1]) * sm;
  1852. for (int ii = 0; ii < 32; ++ii) {
  1853. int l = nearest_int((x[32*j + ii] + m)/d);
  1854. l = MAX(0, MIN(15, l));
  1855. L[32*j + ii] = l;
  1856. sumlx += (x[32*j + ii] + m)*l*sd;
  1857. suml2 += l*l*sd*sd;
  1858. }
  1859. }
  1860. if (suml2) {
  1861. y[i].d[0] = GGML_FP32_TO_FP16(sumlx/suml2);
  1862. }
  1863. #endif
  1864. uint8_t * q = y[i].qs;
  1865. for (int j = 0; j < QK_K; j += 64) {
  1866. for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
  1867. q += 32;
  1868. }
  1869. x += QK_K;
  1870. }
  1871. }
  1872. void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int64_t k) {
  1873. assert(k % QK_K == 0);
  1874. const int nb = k / QK_K;
  1875. for (int i = 0; i < nb; i++) {
  1876. const uint8_t * q = x[i].qs;
  1877. #if QK_K == 256
  1878. const float d = GGML_FP16_TO_FP32(x[i].d);
  1879. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  1880. int is = 0;
  1881. uint8_t sc, m;
  1882. for (int j = 0; j < QK_K; j += 64) {
  1883. get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
  1884. const float d1 = d * sc; const float m1 = min * m;
  1885. get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
  1886. const float d2 = d * sc; const float m2 = min * m;
  1887. for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1;
  1888. for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2;
  1889. q += 32; is += 2;
  1890. }
  1891. #else
  1892. const float dall = GGML_FP16_TO_FP32(x[i].d[0]);
  1893. const float mall = GGML_FP16_TO_FP32(x[i].d[1]);
  1894. const float d1 = dall * (x[i].scales[0] & 0xF), m1 = mall * (x[i].scales[0] >> 4);
  1895. const float d2 = dall * (x[i].scales[1] & 0xF), m2 = mall * (x[i].scales[1] >> 4);
  1896. for (int l = 0; l < 32; ++l) {
  1897. y[l+ 0] = d1 * (q[l] & 0xF) - m1;
  1898. y[l+32] = d2 * (q[l] >> 4) - m2;
  1899. }
  1900. y += QK_K;
  1901. #endif
  1902. }
  1903. }
  1904. void quantize_row_q4_K(const float * restrict x, void * restrict vy, int64_t k) {
  1905. assert(k % QK_K == 0);
  1906. block_q4_K * restrict y = vy;
  1907. quantize_row_q4_K_reference(x, y, k);
  1908. }
  1909. static void quantize_row_q4_K_impl(const float * restrict x, block_q4_K * restrict y, int64_t n_per_row, const float * quant_weights) {
  1910. #if QK_K != 256
  1911. (void)quant_weights;
  1912. quantize_row_q4_K_reference(x, y, n_per_row);
  1913. #else
  1914. assert(n_per_row % QK_K == 0);
  1915. const int64_t nb = n_per_row / QK_K;
  1916. uint8_t L[QK_K];
  1917. uint8_t Laux[32];
  1918. uint8_t Ls[QK_K/32];
  1919. uint8_t Lm[QK_K/32];
  1920. float weights[32];
  1921. float sw[QK_K/32];
  1922. float mins[QK_K/32];
  1923. float scales[QK_K/32];
  1924. for (int i = 0; i < nb; i++) {
  1925. float sum_x2 = 0;
  1926. for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l];
  1927. float sigma2 = 2*sum_x2/QK_K;
  1928. float av_x = sqrtf(sigma2);
  1929. for (int j = 0; j < QK_K/32; ++j) {
  1930. if (quant_weights) {
  1931. const float * qw = quant_weights + QK_K*i + 32*j;
  1932. for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]);
  1933. } else {
  1934. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  1935. }
  1936. float sumw = 0;
  1937. for (int l = 0; l < 32; ++l) sumw += weights[l];
  1938. sw[j] = sumw;
  1939. scales[j] = make_qkx3_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  1940. }
  1941. float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw);
  1942. float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw);
  1943. for (int j = 0; j < QK_K/32; ++j) {
  1944. uint8_t ls = Ls[j];
  1945. uint8_t lm = Lm[j];
  1946. if (j < 4) {
  1947. y[i].scales[j] = ls;
  1948. y[i].scales[j+4] = lm;
  1949. } else {
  1950. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  1951. y[i].scales[j-4] |= ((ls >> 4) << 6);
  1952. y[i].scales[j-0] |= ((lm >> 4) << 6);
  1953. }
  1954. }
  1955. y[i].d = GGML_FP32_TO_FP16(d_block);
  1956. y[i].dmin = GGML_FP32_TO_FP16(m_block);
  1957. uint8_t sc, m;
  1958. for (int j = 0; j < QK_K/32; ++j) {
  1959. get_scale_min_k4(j, y[i].scales, &sc, &m);
  1960. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1961. if (!d) continue;
  1962. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  1963. for (int ii = 0; ii < 32; ++ii) {
  1964. int l = nearest_int((x[32*j + ii] + dm)/d);
  1965. l = MAX(0, MIN(15, l));
  1966. L[32*j + ii] = l;
  1967. }
  1968. }
  1969. uint8_t * q = y[i].qs;
  1970. for (int j = 0; j < QK_K; j += 64) {
  1971. for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
  1972. q += 32;
  1973. }
  1974. x += QK_K;
  1975. }
  1976. #endif
  1977. }
  1978. size_t quantize_q4_K(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  1979. size_t row_size = ggml_row_size(GGML_TYPE_Q4_K, n_per_row);
  1980. if (!quant_weights) {
  1981. quantize_row_q4_K_reference(src, dst, (int64_t)nrow*n_per_row);
  1982. }
  1983. else {
  1984. char * qrow = (char *)dst;
  1985. for (int64_t row = 0; row < nrow; ++row) {
  1986. quantize_row_q4_K_impl(src, (block_q4_K*)qrow, n_per_row, quant_weights);
  1987. src += n_per_row;
  1988. qrow += row_size;
  1989. }
  1990. }
  1991. return nrow * row_size;
  1992. }
  1993. // ====================== 5-bit (de)-quantization
  1994. void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int64_t k) {
  1995. assert(k % QK_K == 0);
  1996. const int64_t nb = k / QK_K;
  1997. #if QK_K == 256
  1998. uint8_t L[QK_K];
  1999. float mins[QK_K/32];
  2000. float scales[QK_K/32];
  2001. float weights[32];
  2002. uint8_t Laux[32];
  2003. #else
  2004. int8_t L[QK_K];
  2005. float scales[QK_K/16];
  2006. #endif
  2007. for (int i = 0; i < nb; i++) {
  2008. #if QK_K == 256
  2009. float max_scale = 0; // as we are deducting the min, scales are always positive
  2010. float max_min = 0;
  2011. for (int j = 0; j < QK_K/32; ++j) {
  2012. //scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
  2013. float sum_x2 = 0;
  2014. for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
  2015. float av_x = sqrtf(sum_x2/32);
  2016. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  2017. scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false);
  2018. float scale = scales[j];
  2019. if (scale > max_scale) {
  2020. max_scale = scale;
  2021. }
  2022. float min = mins[j];
  2023. if (min > max_min) {
  2024. max_min = min;
  2025. }
  2026. }
  2027. float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
  2028. float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
  2029. for (int j = 0; j < QK_K/32; ++j) {
  2030. uint8_t ls = nearest_int(inv_scale*scales[j]);
  2031. uint8_t lm = nearest_int(inv_min*mins[j]);
  2032. ls = MIN(63, ls);
  2033. lm = MIN(63, lm);
  2034. if (j < 4) {
  2035. y[i].scales[j] = ls;
  2036. y[i].scales[j+4] = lm;
  2037. } else {
  2038. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  2039. y[i].scales[j-4] |= ((ls >> 4) << 6);
  2040. y[i].scales[j-0] |= ((lm >> 4) << 6);
  2041. }
  2042. }
  2043. y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
  2044. y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
  2045. uint8_t sc, m;
  2046. for (int j = 0; j < QK_K/32; ++j) {
  2047. get_scale_min_k4(j, y[i].scales, &sc, &m);
  2048. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  2049. if (!d) continue;
  2050. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  2051. for (int ii = 0; ii < 32; ++ii) {
  2052. int l = nearest_int((x[32*j + ii] + dm)/d);
  2053. l = MAX(0, MIN(31, l));
  2054. L[32*j + ii] = l;
  2055. }
  2056. }
  2057. uint8_t * restrict qh = y[i].qh;
  2058. uint8_t * restrict ql = y[i].qs;
  2059. memset(qh, 0, QK_K/8);
  2060. uint8_t m1 = 1, m2 = 2;
  2061. for (int n = 0; n < QK_K; n += 64) {
  2062. for (int j = 0; j < 32; ++j) {
  2063. int l1 = L[n + j];
  2064. if (l1 > 15) {
  2065. l1 -= 16; qh[j] |= m1;
  2066. }
  2067. int l2 = L[n + j + 32];
  2068. if (l2 > 15) {
  2069. l2 -= 16; qh[j] |= m2;
  2070. }
  2071. ql[j] = l1 | (l2 << 4);
  2072. }
  2073. m1 <<= 2; m2 <<= 2;
  2074. ql += 32;
  2075. }
  2076. #else
  2077. float max_scale = 0, amax = 0;
  2078. for (int j = 0; j < QK_K/16; ++j) {
  2079. scales[j] = make_qx_quants(16, 16, x + 16*j, L + 16*j, 1, NULL);
  2080. float abs_scale = fabsf(scales[j]);
  2081. if (abs_scale > amax) {
  2082. amax = abs_scale;
  2083. max_scale = scales[j];
  2084. }
  2085. }
  2086. float iscale = -128.f/max_scale;
  2087. for (int j = 0; j < QK_K/16; ++j) {
  2088. int l = nearest_int(iscale*scales[j]);
  2089. y[i].scales[j] = MAX(-128, MIN(127, l));
  2090. }
  2091. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2092. for (int j = 0; j < QK_K/16; ++j) {
  2093. const float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2094. if (!d) continue;
  2095. for (int ii = 0; ii < 16; ++ii) {
  2096. int l = nearest_int(x[16*j + ii]/d);
  2097. l = MAX(-16, MIN(15, l));
  2098. L[16*j + ii] = l + 16;
  2099. }
  2100. }
  2101. uint8_t * restrict qh = y[i].qh;
  2102. uint8_t * restrict ql = y[i].qs;
  2103. memset(qh, 0, QK_K/8);
  2104. for (int j = 0; j < 32; ++j) {
  2105. int jm = j%8;
  2106. int is = j/8;
  2107. int l1 = L[j];
  2108. if (l1 > 15) {
  2109. l1 -= 16; qh[jm] |= (1 << is);
  2110. }
  2111. int l2 = L[j + 32];
  2112. if (l2 > 15) {
  2113. l2 -= 16; qh[jm] |= (1 << (4 + is));
  2114. }
  2115. ql[j] = l1 | (l2 << 4);
  2116. }
  2117. #endif
  2118. x += QK_K;
  2119. }
  2120. }
  2121. void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int64_t k) {
  2122. assert(k % QK_K == 0);
  2123. const int64_t nb = k / QK_K;
  2124. for (int i = 0; i < nb; i++) {
  2125. const uint8_t * ql = x[i].qs;
  2126. const uint8_t * qh = x[i].qh;
  2127. #if QK_K == 256
  2128. const float d = GGML_FP16_TO_FP32(x[i].d);
  2129. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  2130. int is = 0;
  2131. uint8_t sc, m;
  2132. uint8_t u1 = 1, u2 = 2;
  2133. for (int j = 0; j < QK_K; j += 64) {
  2134. get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
  2135. const float d1 = d * sc; const float m1 = min * m;
  2136. get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
  2137. const float d2 = d * sc; const float m2 = min * m;
  2138. for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1;
  2139. for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2;
  2140. ql += 32; is += 2;
  2141. u1 <<= 2; u2 <<= 2;
  2142. }
  2143. #else
  2144. float d = GGML_FP16_TO_FP32(x[i].d);
  2145. const int8_t * restrict s = x[i].scales;
  2146. for (int l = 0; l < 8; ++l) {
  2147. y[l+ 0] = d * s[0] * ((ql[l+ 0] & 0xF) - (qh[l] & 0x01 ? 0 : 16));
  2148. y[l+ 8] = d * s[0] * ((ql[l+ 8] & 0xF) - (qh[l] & 0x02 ? 0 : 16));
  2149. y[l+16] = d * s[1] * ((ql[l+16] & 0xF) - (qh[l] & 0x04 ? 0 : 16));
  2150. y[l+24] = d * s[1] * ((ql[l+24] & 0xF) - (qh[l] & 0x08 ? 0 : 16));
  2151. y[l+32] = d * s[2] * ((ql[l+ 0] >> 4) - (qh[l] & 0x10 ? 0 : 16));
  2152. y[l+40] = d * s[2] * ((ql[l+ 8] >> 4) - (qh[l] & 0x20 ? 0 : 16));
  2153. y[l+48] = d * s[3] * ((ql[l+16] >> 4) - (qh[l] & 0x40 ? 0 : 16));
  2154. y[l+56] = d * s[3] * ((ql[l+24] >> 4) - (qh[l] & 0x80 ? 0 : 16));
  2155. }
  2156. y += QK_K;
  2157. #endif
  2158. }
  2159. }
  2160. void quantize_row_q5_K(const float * restrict x, void * restrict vy, int64_t k) {
  2161. assert(k % QK_K == 0);
  2162. block_q5_K * restrict y = vy;
  2163. quantize_row_q5_K_reference(x, y, k);
  2164. }
  2165. static void quantize_row_q5_K_impl(const float * restrict x, block_q5_K * restrict y, int64_t n_per_row, const float * quant_weights) {
  2166. #if QK_K != 256
  2167. (void)quant_weights;
  2168. quantize_row_q5_K_reference(x, y, n_per_row);
  2169. #else
  2170. assert(n_per_row % QK_K == 0);
  2171. const int64_t nb = n_per_row / QK_K;
  2172. uint8_t L[QK_K];
  2173. uint8_t Laux[32];
  2174. uint8_t Ls[QK_K/32];
  2175. uint8_t Lm[QK_K/32];
  2176. float mins[QK_K/32];
  2177. float scales[QK_K/32];
  2178. float sw[QK_K/32];
  2179. float weights[32];
  2180. for (int i = 0; i < nb; i++) {
  2181. float sum_x2 = 0;
  2182. for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l];
  2183. float sigma2 = 2*sum_x2/QK_K;
  2184. float av_x = sqrtf(sigma2);
  2185. for (int j = 0; j < QK_K/32; ++j) {
  2186. if (quant_weights) {
  2187. const float * qw = quant_weights + QK_K*i + 32*j;
  2188. for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]);
  2189. } else {
  2190. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  2191. }
  2192. float sumw = 0;
  2193. for (int l = 0; l < 32; ++l) sumw += weights[l];
  2194. sw[j] = sumw;
  2195. scales[j] = make_qkx3_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  2196. }
  2197. float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw);
  2198. float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw);
  2199. for (int j = 0; j < QK_K/32; ++j) {
  2200. uint8_t ls = Ls[j];
  2201. uint8_t lm = Lm[j];
  2202. ls = MIN(63, ls);
  2203. lm = MIN(63, lm);
  2204. if (j < 4) {
  2205. y[i].scales[j] = ls;
  2206. y[i].scales[j+4] = lm;
  2207. } else {
  2208. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  2209. y[i].scales[j-4] |= ((ls >> 4) << 6);
  2210. y[i].scales[j-0] |= ((lm >> 4) << 6);
  2211. }
  2212. }
  2213. y[i].d = GGML_FP32_TO_FP16(d_block);
  2214. y[i].dmin = GGML_FP32_TO_FP16(m_block);
  2215. uint8_t sc, m;
  2216. for (int j = 0; j < QK_K/32; ++j) {
  2217. get_scale_min_k4(j, y[i].scales, &sc, &m);
  2218. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  2219. if (!d) continue;
  2220. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  2221. for (int ii = 0; ii < 32; ++ii) {
  2222. int l = nearest_int((x[32*j + ii] + dm)/d);
  2223. l = MAX(0, MIN(31, l));
  2224. L[32*j + ii] = l;
  2225. }
  2226. }
  2227. uint8_t * restrict qh = y[i].qh;
  2228. uint8_t * restrict ql = y[i].qs;
  2229. memset(qh, 0, QK_K/8);
  2230. uint8_t m1 = 1, m2 = 2;
  2231. for (int n = 0; n < QK_K; n += 64) {
  2232. for (int j = 0; j < 32; ++j) {
  2233. int l1 = L[n + j];
  2234. if (l1 > 15) {
  2235. l1 -= 16; qh[j] |= m1;
  2236. }
  2237. int l2 = L[n + j + 32];
  2238. if (l2 > 15) {
  2239. l2 -= 16; qh[j] |= m2;
  2240. }
  2241. ql[j] = l1 | (l2 << 4);
  2242. }
  2243. m1 <<= 2; m2 <<= 2;
  2244. ql += 32;
  2245. }
  2246. x += QK_K;
  2247. }
  2248. #endif
  2249. }
  2250. size_t quantize_q5_K(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  2251. size_t row_size = ggml_row_size(GGML_TYPE_Q5_K, n_per_row);
  2252. if (!quant_weights) {
  2253. quantize_row_q5_K_reference(src, dst, (int64_t)nrow*n_per_row);
  2254. }
  2255. else {
  2256. char * qrow = (char *)dst;
  2257. for (int64_t row = 0; row < nrow; ++row) {
  2258. quantize_row_q5_K_impl(src, (block_q5_K*)qrow, n_per_row, quant_weights);
  2259. src += n_per_row;
  2260. qrow += row_size;
  2261. }
  2262. }
  2263. return nrow * row_size;
  2264. }
  2265. // ====================== 6-bit (de)-quantization
  2266. void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int64_t k) {
  2267. assert(k % QK_K == 0);
  2268. const int64_t nb = k / QK_K;
  2269. int8_t L[QK_K];
  2270. float scales[QK_K/16];
  2271. for (int i = 0; i < nb; i++) {
  2272. float max_scale = 0;
  2273. float max_abs_scale = 0;
  2274. for (int ib = 0; ib < QK_K/16; ++ib) {
  2275. const float scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL);
  2276. scales[ib] = scale;
  2277. const float abs_scale = fabsf(scale);
  2278. if (abs_scale > max_abs_scale) {
  2279. max_abs_scale = abs_scale;
  2280. max_scale = scale;
  2281. }
  2282. }
  2283. if (max_abs_scale < GROUP_MAX_EPS) {
  2284. memset(&y[i], 0, sizeof(block_q6_K));
  2285. y[i].d = GGML_FP32_TO_FP16(0.f);
  2286. x += QK_K;
  2287. continue;
  2288. }
  2289. float iscale = -128.f/max_scale;
  2290. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2291. for (int ib = 0; ib < QK_K/16; ++ib) {
  2292. y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
  2293. }
  2294. for (int j = 0; j < QK_K/16; ++j) {
  2295. float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2296. if (!d) {
  2297. continue;
  2298. }
  2299. for (int ii = 0; ii < 16; ++ii) {
  2300. int l = nearest_int(x[16*j + ii]/d);
  2301. l = MAX(-32, MIN(31, l));
  2302. L[16*j + ii] = l + 32;
  2303. }
  2304. }
  2305. uint8_t * restrict ql = y[i].ql;
  2306. uint8_t * restrict qh = y[i].qh;
  2307. #if QK_K == 256
  2308. for (int j = 0; j < QK_K; j += 128) {
  2309. for (int l = 0; l < 32; ++l) {
  2310. const uint8_t q1 = L[j + l + 0] & 0xF;
  2311. const uint8_t q2 = L[j + l + 32] & 0xF;
  2312. const uint8_t q3 = L[j + l + 64] & 0xF;
  2313. const uint8_t q4 = L[j + l + 96] & 0xF;
  2314. ql[l+ 0] = q1 | (q3 << 4);
  2315. ql[l+32] = q2 | (q4 << 4);
  2316. qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
  2317. }
  2318. ql += 64;
  2319. qh += 32;
  2320. }
  2321. #else
  2322. for (int l = 0; l < 32; ++l) {
  2323. const uint8_t q1 = L[l + 0] & 0xF;
  2324. const uint8_t q2 = L[l + 32] & 0xF;
  2325. ql[l] = q1 | (q2 << 4);
  2326. }
  2327. for (int l = 0; l < 16; ++l) {
  2328. qh[l] = (L[l] >> 4) | ((L[l + 16] >> 4) << 2) | ((L[l + 32] >> 4) << 4) | ((L[l + 48] >> 4) << 6);
  2329. }
  2330. #endif
  2331. x += QK_K;
  2332. }
  2333. }
  2334. void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int64_t k) {
  2335. assert(k % QK_K == 0);
  2336. const int64_t nb = k / QK_K;
  2337. for (int i = 0; i < nb; i++) {
  2338. const float d = GGML_FP16_TO_FP32(x[i].d);
  2339. const uint8_t * restrict ql = x[i].ql;
  2340. const uint8_t * restrict qh = x[i].qh;
  2341. const int8_t * restrict sc = x[i].scales;
  2342. #if QK_K == 256
  2343. for (int n = 0; n < QK_K; n += 128) {
  2344. for (int l = 0; l < 32; ++l) {
  2345. int is = l/16;
  2346. const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  2347. const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  2348. const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  2349. const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  2350. y[l + 0] = d * sc[is + 0] * q1;
  2351. y[l + 32] = d * sc[is + 2] * q2;
  2352. y[l + 64] = d * sc[is + 4] * q3;
  2353. y[l + 96] = d * sc[is + 6] * q4;
  2354. }
  2355. y += 128;
  2356. ql += 64;
  2357. qh += 32;
  2358. sc += 8;
  2359. }
  2360. #else
  2361. for (int l = 0; l < 16; ++l) {
  2362. const int8_t q1 = (int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  2363. const int8_t q2 = (int8_t)((ql[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  2364. const int8_t q3 = (int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  2365. const int8_t q4 = (int8_t)((ql[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  2366. y[l+ 0] = d * sc[0] * q1;
  2367. y[l+16] = d * sc[1] * q2;
  2368. y[l+32] = d * sc[2] * q3;
  2369. y[l+48] = d * sc[3] * q4;
  2370. }
  2371. y += 64;
  2372. #endif
  2373. }
  2374. }
  2375. void quantize_row_q6_K(const float * restrict x, void * restrict vy, int64_t k) {
  2376. assert(k % QK_K == 0);
  2377. block_q6_K * restrict y = vy;
  2378. quantize_row_q6_K_reference(x, y, k);
  2379. }
  2380. static void quantize_row_q6_K_impl(const float * restrict x, block_q6_K * restrict y, int64_t n_per_row, const float * quant_weights) {
  2381. #if QK_K != 256
  2382. (void)quant_weights;
  2383. quantize_row_q6_K_reference(x, y, n_per_row);
  2384. #else
  2385. assert(n_per_row % QK_K == 0);
  2386. const int64_t nb = n_per_row / QK_K;
  2387. int8_t L[QK_K];
  2388. float scales[QK_K/16];
  2389. //float weights[16];
  2390. for (int i = 0; i < nb; i++) {
  2391. //float sum_x2 = 0;
  2392. //for (int j = 0; j < QK_K; ++j) sum_x2 += x[j]*x[j];
  2393. //float sigma2 = sum_x2/QK_K;
  2394. float max_scale = 0;
  2395. float max_abs_scale = 0;
  2396. for (int ib = 0; ib < QK_K/16; ++ib) {
  2397. float scale;
  2398. if (quant_weights) {
  2399. const float * qw = quant_weights + QK_K*i + 16*ib;
  2400. //for (int j = 0; j < 16; ++j) weights[j] = qw[j] * sqrtf(sigma2 + x[16*ib + j]*x[16*ib + j]);
  2401. //scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, weights);
  2402. scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, qw);
  2403. } else {
  2404. scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL);
  2405. }
  2406. scales[ib] = scale;
  2407. const float abs_scale = fabsf(scale);
  2408. if (abs_scale > max_abs_scale) {
  2409. max_abs_scale = abs_scale;
  2410. max_scale = scale;
  2411. }
  2412. }
  2413. if (max_abs_scale < GROUP_MAX_EPS) {
  2414. memset(&y[i], 0, sizeof(block_q6_K));
  2415. y[i].d = GGML_FP32_TO_FP16(0.f);
  2416. x += QK_K;
  2417. continue;
  2418. }
  2419. float iscale = -128.f/max_scale;
  2420. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  2421. for (int ib = 0; ib < QK_K/16; ++ib) {
  2422. y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
  2423. }
  2424. for (int j = 0; j < QK_K/16; ++j) {
  2425. float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  2426. if (!d) {
  2427. continue;
  2428. }
  2429. for (int ii = 0; ii < 16; ++ii) {
  2430. int l = nearest_int(x[16*j + ii]/d);
  2431. l = MAX(-32, MIN(31, l));
  2432. L[16*j + ii] = l + 32;
  2433. }
  2434. }
  2435. uint8_t * restrict ql = y[i].ql;
  2436. uint8_t * restrict qh = y[i].qh;
  2437. for (int j = 0; j < QK_K; j += 128) {
  2438. for (int l = 0; l < 32; ++l) {
  2439. const uint8_t q1 = L[j + l + 0] & 0xF;
  2440. const uint8_t q2 = L[j + l + 32] & 0xF;
  2441. const uint8_t q3 = L[j + l + 64] & 0xF;
  2442. const uint8_t q4 = L[j + l + 96] & 0xF;
  2443. ql[l+ 0] = q1 | (q3 << 4);
  2444. ql[l+32] = q2 | (q4 << 4);
  2445. qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
  2446. }
  2447. ql += 64;
  2448. qh += 32;
  2449. }
  2450. x += QK_K;
  2451. }
  2452. #endif
  2453. }
  2454. size_t quantize_q6_K(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  2455. size_t row_size = ggml_row_size(GGML_TYPE_Q6_K, n_per_row);
  2456. if (!quant_weights) {
  2457. quantize_row_q6_K_reference(src, dst, (int64_t)nrow*n_per_row);
  2458. }
  2459. else {
  2460. char * qrow = (char *)dst;
  2461. for (int64_t row = 0; row < nrow; ++row) {
  2462. quantize_row_q6_K_impl(src, (block_q6_K*)qrow, n_per_row, quant_weights);
  2463. src += n_per_row;
  2464. qrow += row_size;
  2465. }
  2466. }
  2467. return nrow * row_size;
  2468. }
  2469. static void quantize_row_q4_0_impl(const float * restrict x, block_q4_0 * restrict y, int64_t n_per_row, const float * quant_weights) {
  2470. static_assert(QK4_0 == 32, "QK4_0 must be 32");
  2471. if (!quant_weights) {
  2472. quantize_row_q4_0_reference(x, y, n_per_row);
  2473. return;
  2474. }
  2475. float weight[QK4_0];
  2476. int8_t L[QK4_0];
  2477. float sum_x2 = 0;
  2478. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2479. float sigma2 = sum_x2/n_per_row;
  2480. const int64_t nb = n_per_row/QK4_0;
  2481. for (int ib = 0; ib < nb; ++ib) {
  2482. const float * xb = x + QK4_0 * ib;
  2483. const float * qw = quant_weights + QK4_0 * ib;
  2484. for (int j = 0; j < QK4_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2485. float d = make_qx_quants(QK4_0, 8, xb, L, 1, weight);
  2486. y[ib].d = GGML_FP32_TO_FP16(d);
  2487. for (int j = 0; j < 16; ++j) {
  2488. y[ib].qs[j] = L[j] | (L[j+16] << 4);
  2489. }
  2490. }
  2491. }
  2492. size_t quantize_q4_0(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  2493. if (!quant_weights) {
  2494. quantize_row_q4_0_reference(src, dst, (int64_t)nrow*n_per_row);
  2495. return nrow * ggml_row_size(GGML_TYPE_Q4_0, n_per_row);
  2496. }
  2497. size_t row_size = ggml_row_size(GGML_TYPE_Q4_0, n_per_row);
  2498. char * qrow = (char *)dst;
  2499. for (int64_t row = 0; row < nrow; ++row) {
  2500. quantize_row_q4_0_impl(src, (block_q4_0*)qrow, n_per_row, quant_weights);
  2501. src += n_per_row;
  2502. qrow += row_size;
  2503. }
  2504. return nrow * row_size;
  2505. }
  2506. static void quantize_row_q4_1_impl(const float * restrict x, block_q4_1 * restrict y, int64_t n_per_row, const float * quant_weights) {
  2507. static_assert(QK4_1 == 32, "QK4_1 must be 32");
  2508. if (!quant_weights) {
  2509. quantize_row_q4_1_reference(x, y, n_per_row);
  2510. return;
  2511. }
  2512. float weight[QK4_1];
  2513. uint8_t L[QK4_1], Laux[QK4_1];
  2514. float sum_x2 = 0;
  2515. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2516. float sigma2 = sum_x2/n_per_row;
  2517. const int64_t nb = n_per_row/QK4_1;
  2518. for (int ib = 0; ib < nb; ++ib) {
  2519. const float * xb = x + QK4_1 * ib;
  2520. const float * qw = quant_weights + QK4_1 * ib;
  2521. for (int j = 0; j < QK4_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2522. float min;
  2523. float d = make_qkx3_quants(QK4_1, 15, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false);
  2524. y[ib].d = GGML_FP32_TO_FP16(d);
  2525. y[ib].m = GGML_FP32_TO_FP16(-min);
  2526. for (int j = 0; j < 16; ++j) {
  2527. y[ib].qs[j] = L[j] | (L[j+16] << 4);
  2528. }
  2529. }
  2530. }
  2531. size_t quantize_q4_1(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  2532. if (!quant_weights) {
  2533. quantize_row_q4_1_reference(src, dst, (int64_t)nrow*n_per_row);
  2534. return nrow * ggml_row_size(GGML_TYPE_Q4_1, n_per_row);
  2535. }
  2536. size_t row_size = ggml_row_size(GGML_TYPE_Q4_1, n_per_row);
  2537. char * qrow = (char *)dst;
  2538. for (int64_t row = 0; row < nrow; ++row) {
  2539. quantize_row_q4_1_impl(src, (block_q4_1*)qrow, n_per_row, quant_weights);
  2540. src += n_per_row;
  2541. qrow += row_size;
  2542. }
  2543. return nrow * row_size;
  2544. }
  2545. static void quantize_row_q5_0_impl(const float * restrict x, block_q5_0 * restrict y, int64_t n_per_row, const float * quant_weights) {
  2546. static_assert(QK5_0 == 32, "QK5_0 must be 32");
  2547. if (!quant_weights) {
  2548. quantize_row_q5_0_reference(x, y, n_per_row);
  2549. return;
  2550. }
  2551. float weight[QK5_0];
  2552. int8_t L[QK5_0];
  2553. float sum_x2 = 0;
  2554. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2555. float sigma2 = sum_x2/n_per_row;
  2556. const int64_t nb = n_per_row/QK5_0;
  2557. for (int ib = 0; ib < nb; ++ib) {
  2558. const float * xb = x + QK5_0 * ib;
  2559. const float * qw = quant_weights + QK5_0 * ib;
  2560. for (int j = 0; j < QK5_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2561. float d = make_qx_quants(QK5_0, 16, xb, L, 1, weight);
  2562. y[ib].d = GGML_FP32_TO_FP16(d);
  2563. uint32_t qh = 0;
  2564. for (int j = 0; j < 16; ++j) {
  2565. const uint8_t xi0 = L[j];
  2566. const uint8_t xi1 = L[j+16];
  2567. y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  2568. // get the 5-th bit and store it in qh at the right position
  2569. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  2570. qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
  2571. }
  2572. memcpy(&y[ib].qh, &qh, sizeof(qh));
  2573. }
  2574. }
  2575. size_t quantize_q5_0(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  2576. if (!quant_weights) {
  2577. quantize_row_q5_0_reference(src, dst, (int64_t)nrow*n_per_row);
  2578. return nrow * ggml_row_size(GGML_TYPE_Q5_0, n_per_row);
  2579. }
  2580. size_t row_size = ggml_row_size(GGML_TYPE_Q5_0, n_per_row);
  2581. char * qrow = (char *)dst;
  2582. for (int64_t row = 0; row < nrow; ++row) {
  2583. quantize_row_q5_0_impl(src, (block_q5_0*)qrow, n_per_row, quant_weights);
  2584. src += n_per_row;
  2585. qrow += row_size;
  2586. }
  2587. return nrow * row_size;
  2588. }
  2589. static void quantize_row_q5_1_impl(const float * restrict x, block_q5_1 * restrict y, int64_t n_per_row, const float * quant_weights) {
  2590. static_assert(QK5_1 == 32, "QK5_1 must be 32");
  2591. if (!quant_weights) {
  2592. quantize_row_q5_1_reference(x, y, n_per_row);
  2593. return;
  2594. }
  2595. float weight[QK5_1];
  2596. uint8_t L[QK5_1], Laux[QK5_1];
  2597. float sum_x2 = 0;
  2598. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  2599. float sigma2 = sum_x2/n_per_row;
  2600. const int64_t nb = n_per_row/QK5_1;
  2601. for (int ib = 0; ib < nb; ++ib) {
  2602. const float * xb = x + QK5_1 * ib;
  2603. const float * qw = quant_weights + QK5_1 * ib;
  2604. for (int j = 0; j < QK5_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  2605. float min;
  2606. float d = make_qkx3_quants(QK5_1, 31, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false);
  2607. y[ib].d = GGML_FP32_TO_FP16(d);
  2608. y[ib].m = GGML_FP32_TO_FP16(-min);
  2609. uint32_t qh = 0;
  2610. for (int j = 0; j < 16; ++j) {
  2611. const uint8_t xi0 = L[j];
  2612. const uint8_t xi1 = L[j+16];
  2613. y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  2614. // get the 5-th bit and store it in qh at the right position
  2615. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  2616. qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
  2617. }
  2618. memcpy(&y[ib].qh, &qh, sizeof(qh));
  2619. }
  2620. }
  2621. size_t quantize_q5_1(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  2622. if (!quant_weights) {
  2623. quantize_row_q5_1_reference(src, dst, (int64_t)nrow*n_per_row);
  2624. return nrow * ggml_row_size(GGML_TYPE_Q5_1, n_per_row);
  2625. }
  2626. size_t row_size = ggml_row_size(GGML_TYPE_Q5_1, n_per_row);
  2627. char * qrow = (char *)dst;
  2628. for (int64_t row = 0; row < nrow; ++row) {
  2629. quantize_row_q5_1_impl(src, (block_q5_1*)qrow, n_per_row, quant_weights);
  2630. src += n_per_row;
  2631. qrow += row_size;
  2632. }
  2633. return nrow * row_size;
  2634. }
  2635. size_t quantize_q8_0(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  2636. (void)quant_weights; // not used
  2637. const size_t row_size = ggml_row_size(GGML_TYPE_Q8_0, n_per_row);
  2638. quantize_row_q8_0_reference(src, dst, (int64_t)nrow*n_per_row);
  2639. return nrow * row_size;
  2640. }
  2641. // ====================== "True" 2-bit (de)-quantization
  2642. void dequantize_row_iq2_xxs(const block_iq2_xxs * restrict x, float * restrict y, int64_t k) {
  2643. assert(k % QK_K == 0);
  2644. const int64_t nb = k / QK_K;
  2645. uint32_t aux32[2];
  2646. const uint8_t * aux8 = (const uint8_t *)aux32;
  2647. for (int i = 0; i < nb; i++) {
  2648. const float d = GGML_FP16_TO_FP32(x[i].d);
  2649. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  2650. memcpy(aux32, x[i].qs + 4*ib32, 2*sizeof(uint32_t));
  2651. const float db = d * (0.5f + (aux32[1] >> 28)) * 0.25f;
  2652. for (int l = 0; l < 4; ++l) {
  2653. const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
  2654. const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
  2655. for (int j = 0; j < 8; ++j) {
  2656. y[j] = db * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
  2657. }
  2658. y += 8;
  2659. }
  2660. }
  2661. }
  2662. }
  2663. // ====================== 2.3125 bpw (de)-quantization
  2664. void dequantize_row_iq2_xs(const block_iq2_xs * restrict x, float * restrict y, int64_t k) {
  2665. assert(k % QK_K == 0);
  2666. const int64_t nb = k / QK_K;
  2667. float db[2];
  2668. for (int i = 0; i < nb; i++) {
  2669. const float d = GGML_FP16_TO_FP32(x[i].d);
  2670. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  2671. db[0] = d * (0.5f + (x[i].scales[ib32] & 0xf)) * 0.25f;
  2672. db[1] = d * (0.5f + (x[i].scales[ib32] >> 4)) * 0.25f;
  2673. for (int l = 0; l < 4; ++l) {
  2674. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (x[i].qs[4*ib32 + l] & 511));
  2675. const uint8_t signs = ksigns_iq2xs[x[i].qs[4*ib32 + l] >> 9];
  2676. for (int j = 0; j < 8; ++j) {
  2677. y[j] = db[l/2] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
  2678. }
  2679. y += 8;
  2680. }
  2681. }
  2682. }
  2683. }
  2684. // ====================== 2.5625 bpw (de)-quantization
  2685. void dequantize_row_iq2_s(const block_iq2_s * restrict x, float * restrict y, int64_t k) {
  2686. assert(k % QK_K == 0);
  2687. const int64_t nb = k / QK_K;
  2688. float db[2];
  2689. for (int i = 0; i < nb; i++) {
  2690. const float d = GGML_FP16_TO_FP32(x[i].d);
  2691. const uint8_t * qs = x[i].qs;
  2692. const uint8_t * qh = x[i].qh;
  2693. const uint8_t * signs = qs + QK_K/8;
  2694. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  2695. db[0] = d * (0.5f + (x[i].scales[ib32] & 0xf)) * 0.25f;
  2696. db[1] = d * (0.5f + (x[i].scales[ib32] >> 4)) * 0.25f;
  2697. for (int l = 0; l < 4; ++l) {
  2698. const float dl = db[l/2];
  2699. const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
  2700. for (int j = 0; j < 8; ++j) {
  2701. y[j] = dl * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1.f : 1.f);
  2702. }
  2703. y += 8;
  2704. }
  2705. qs += 4;
  2706. signs += 4;
  2707. }
  2708. }
  2709. }
  2710. // ====================== 3.0625 bpw (de)-quantization
  2711. void dequantize_row_iq3_xxs(const block_iq3_xxs * restrict x, float * restrict y, int64_t k) {
  2712. assert(k % QK_K == 0);
  2713. const int64_t nb = k / QK_K;
  2714. uint32_t aux32;
  2715. for (int i = 0; i < nb; i++) {
  2716. const float d = GGML_FP16_TO_FP32(x[i].d);
  2717. const uint8_t * qs = x[i].qs;
  2718. const uint8_t * scales_and_signs = qs + QK_K/4;
  2719. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  2720. memcpy(&aux32, scales_and_signs + 4*ib32, sizeof(uint32_t));
  2721. const float db = d * (0.5f + (aux32 >> 28)) * 0.5f;
  2722. for (int l = 0; l < 4; ++l) {
  2723. const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
  2724. const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + qs[2*l+0]);
  2725. const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + qs[2*l+1]);
  2726. for (int j = 0; j < 4; ++j) {
  2727. y[j+0] = db * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f);
  2728. y[j+4] = db * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f);
  2729. }
  2730. y += 8;
  2731. }
  2732. qs += 8;
  2733. }
  2734. }
  2735. }
  2736. // ====================== 3.3125 bpw (de)-quantization
  2737. void dequantize_row_iq3_s(const block_iq3_s * restrict x, float * restrict y, int64_t k) {
  2738. assert(k % QK_K == 0);
  2739. const int64_t nb = k / QK_K;
  2740. for (int i = 0; i < nb; i++) {
  2741. const float d = GGML_FP16_TO_FP32(x[i].d);
  2742. const uint8_t * qs = x[i].qs;
  2743. const uint8_t * qh = x[i].qh;
  2744. const uint8_t * signs = x[i].signs;
  2745. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  2746. const float db1 = d * (1 + 2*(x[i].scales[ib32/2] & 0xf));
  2747. const float db2 = d * (1 + 2*(x[i].scales[ib32/2] >> 4));
  2748. for (int l = 0; l < 4; ++l) {
  2749. const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[0] << (8-2*l)) & 256)));
  2750. const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[0] << (7-2*l)) & 256)));
  2751. for (int j = 0; j < 4; ++j) {
  2752. y[j+0] = db1 * grid1[j] * (signs[l] & kmask_iq2xs[j+0] ? -1.f : 1.f);
  2753. y[j+4] = db1 * grid2[j] * (signs[l] & kmask_iq2xs[j+4] ? -1.f : 1.f);
  2754. }
  2755. y += 8;
  2756. }
  2757. qs += 8;
  2758. signs += 4;
  2759. for (int l = 0; l < 4; ++l) {
  2760. const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[1] << (8-2*l)) & 256)));
  2761. const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[1] << (7-2*l)) & 256)));
  2762. for (int j = 0; j < 4; ++j) {
  2763. y[j+0] = db2 * grid1[j] * (signs[l] & kmask_iq2xs[j+0] ? -1.f : 1.f);
  2764. y[j+4] = db2 * grid2[j] * (signs[l] & kmask_iq2xs[j+4] ? -1.f : 1.f);
  2765. }
  2766. y += 8;
  2767. }
  2768. qh += 2;
  2769. qs += 8;
  2770. signs += 4;
  2771. }
  2772. }
  2773. }
  2774. // ====================== 1.5625 bpw (de)-quantization
  2775. void dequantize_row_iq1_s(const block_iq1_s * restrict x, float * restrict y, int64_t k) {
  2776. assert(k % QK_K == 0);
  2777. const int64_t nb = k / QK_K;
  2778. for (int i = 0; i < nb; i++) {
  2779. const float d = GGML_FP16_TO_FP32(x[i].d);
  2780. const uint8_t * qs = x[i].qs;
  2781. const uint16_t * qh = x[i].qh;
  2782. for (int ib = 0; ib < QK_K/32; ++ib) {
  2783. const float dl = d * (2*((qh[ib] >> 12) & 7) + 1);
  2784. const float delta = qh[ib] & 0x8000 ? -IQ1S_DELTA : IQ1S_DELTA;
  2785. for (int l = 0; l < 4; ++l) {
  2786. const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8)));
  2787. for (int j = 0; j < 8; ++j) {
  2788. y[j] = dl * (grid[j] + delta);
  2789. }
  2790. y += 8;
  2791. }
  2792. qs += 4;
  2793. }
  2794. }
  2795. }
  2796. void dequantize_row_iq1_m(const block_iq1_m * restrict x, float * restrict y, int64_t k) {
  2797. assert(k % QK_K == 0);
  2798. const int64_t nb = k / QK_K;
  2799. float delta[4];
  2800. uint16_t idx[4];
  2801. #if QK_K != 64
  2802. iq1m_scale_t scale;
  2803. #endif
  2804. for (int i = 0; i < nb; i++) {
  2805. const uint16_t * sc = (const uint16_t *)x[i].scales;
  2806. #if QK_K == 64
  2807. const float d = GGML_FP16_TO_FP32(x[i].d);
  2808. #else
  2809. scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
  2810. const float d = GGML_FP16_TO_FP32(scale.f16);
  2811. #endif
  2812. const uint8_t * qs = x[i].qs;
  2813. const uint8_t * qh = x[i].qh;
  2814. for (int ib = 0; ib < QK_K/32; ++ib) {
  2815. #if QK_K == 64
  2816. const float dl1 = d * (2*((sc[ib/2] >> (8*(ib%2)+0)) & 0xf) + 1);
  2817. const float dl2 = d * (2*((sc[ib/2] >> (8*(ib%2)+4)) & 0xf) + 1);
  2818. #else
  2819. const float dl1 = d * (2*((sc[ib/2] >> (6*(ib%2)+0)) & 0x7) + 1);
  2820. const float dl2 = d * (2*((sc[ib/2] >> (6*(ib%2)+3)) & 0x7) + 1);
  2821. #endif
  2822. idx[0] = qs[0] | ((qh[0] << 8) & 0x700);
  2823. idx[1] = qs[1] | ((qh[0] << 4) & 0x700);
  2824. idx[2] = qs[2] | ((qh[1] << 8) & 0x700);
  2825. idx[3] = qs[3] | ((qh[1] << 4) & 0x700);
  2826. delta[0] = qh[0] & 0x08 ? -IQ1S_DELTA : IQ1S_DELTA;
  2827. delta[1] = qh[0] & 0x80 ? -IQ1S_DELTA : IQ1S_DELTA;
  2828. delta[2] = qh[1] & 0x08 ? -IQ1S_DELTA : IQ1S_DELTA;
  2829. delta[3] = qh[1] & 0x80 ? -IQ1S_DELTA : IQ1S_DELTA;
  2830. for (int l = 0; l < 2; ++l) {
  2831. const int8_t * grid = (const int8_t *)(iq1s_grid + idx[l]);
  2832. for (int j = 0; j < 8; ++j) {
  2833. y[j] = dl1 * (grid[j] + delta[l]);
  2834. }
  2835. y += 8;
  2836. }
  2837. for (int l = 2; l < 4; ++l) {
  2838. const int8_t * grid = (const int8_t *)(iq1s_grid + idx[l]);
  2839. for (int j = 0; j < 8; ++j) {
  2840. y[j] = dl2 * (grid[j] + delta[l]);
  2841. }
  2842. y += 8;
  2843. }
  2844. qs += 4;
  2845. qh += 2;
  2846. }
  2847. }
  2848. }
  2849. static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113};
  2850. void dequantize_row_iq4_nl(const block_iq4_nl * restrict x, float * restrict y, int64_t k) {
  2851. assert(k % QK4_NL == 0);
  2852. const int64_t nb = k / QK4_NL;
  2853. for (int i = 0; i < nb; i++) {
  2854. const uint8_t * qs = x[i].qs;
  2855. const float d = GGML_FP16_TO_FP32(x[i].d);
  2856. for (int j = 0; j < QK4_NL/2; ++j) {
  2857. y[j+ 0] = d * kvalues_iq4nl[qs[j] & 0xf];
  2858. y[j+QK4_NL/2] = d * kvalues_iq4nl[qs[j] >> 4];
  2859. }
  2860. y += QK4_NL;
  2861. qs += QK4_NL/2;
  2862. }
  2863. }
  2864. void dequantize_row_iq4_xs(const block_iq4_xs * restrict x, float * restrict y, int64_t k) {
  2865. assert(k % QK_K == 0);
  2866. #if QK_K == 64
  2867. dequantize_row_iq4_nl((const block_iq4_nl *)x, y, k);
  2868. #else
  2869. const int64_t nb = k / QK_K;
  2870. for (int i = 0; i < nb; i++) {
  2871. const uint8_t * qs = x[i].qs;
  2872. const float d = GGML_FP16_TO_FP32(x[i].d);
  2873. for (int ib = 0; ib < QK_K/32; ++ib) {
  2874. const int ls = ((x[i].scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((x[i].scales_h >> 2*ib) & 3) << 4);
  2875. const float dl = d * (ls - 32);
  2876. for (int j = 0; j < 16; ++j) {
  2877. y[j+ 0] = dl * kvalues_iq4nl[qs[j] & 0xf];
  2878. y[j+16] = dl * kvalues_iq4nl[qs[j] >> 4];
  2879. }
  2880. y += 32;
  2881. qs += 16;
  2882. }
  2883. }
  2884. #endif
  2885. }
  2886. //===================================== Q8_K ==============================================
  2887. void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int64_t k) {
  2888. assert(k % QK_K == 0);
  2889. const int64_t nb = k / QK_K;
  2890. for (int i = 0; i < nb; i++) {
  2891. float max = 0;
  2892. float amax = 0;
  2893. for (int j = 0; j < QK_K; ++j) {
  2894. float ax = fabsf(x[j]);
  2895. if (ax > amax) {
  2896. amax = ax; max = x[j];
  2897. }
  2898. }
  2899. if (!amax) {
  2900. y[i].d = 0;
  2901. memset(y[i].qs, 0, QK_K);
  2902. x += QK_K;
  2903. continue;
  2904. }
  2905. //const float iscale = -128.f/max;
  2906. // We need this change for IQ2_XXS, else the AVX implementation becomes very awkward
  2907. const float iscale = -127.f/max;
  2908. for (int j = 0; j < QK_K; ++j) {
  2909. int v = nearest_int(iscale*x[j]);
  2910. y[i].qs[j] = MIN(127, v);
  2911. }
  2912. for (int j = 0; j < QK_K/16; ++j) {
  2913. int sum = 0;
  2914. for (int ii = 0; ii < 16; ++ii) {
  2915. sum += y[i].qs[j*16 + ii];
  2916. }
  2917. y[i].bsums[j] = sum;
  2918. }
  2919. y[i].d = 1/iscale;
  2920. x += QK_K;
  2921. }
  2922. }
  2923. void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int64_t k) {
  2924. assert(k % QK_K == 0);
  2925. const int64_t nb = k / QK_K;
  2926. for (int i = 0; i < nb; i++) {
  2927. for (int j = 0; j < QK_K; ++j) {
  2928. *y++ = x[i].d * x[i].qs[j];
  2929. }
  2930. }
  2931. }
  2932. void quantize_row_q8_K(const float * restrict x, void * restrict y, int64_t k) {
  2933. quantize_row_q8_K_reference(x, y, k);
  2934. }
  2935. //===================================== Dot ptoducts =================================
  2936. //
  2937. // Helper functions
  2938. //
  2939. #if __AVX__ || __AVX2__ || __AVX512F__
  2940. // shuffles to pick the required scales in dot products
  2941. static inline __m256i get_scale_shuffle_q3k(int i) {
  2942. static const uint8_t k_shuffle[128] = {
  2943. 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
  2944. 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
  2945. 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
  2946. 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,
  2947. };
  2948. return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
  2949. }
  2950. static inline __m256i get_scale_shuffle_k4(int i) {
  2951. static const uint8_t k_shuffle[256] = {
  2952. 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
  2953. 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
  2954. 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5,
  2955. 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
  2956. 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9,
  2957. 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
  2958. 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,
  2959. 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15
  2960. };
  2961. return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
  2962. }
  2963. static inline __m128i get_scale_shuffle(int i) {
  2964. static const uint8_t k_shuffle[128] = {
  2965. 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
  2966. 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
  2967. 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
  2968. 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
  2969. 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
  2970. 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11,
  2971. 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13,
  2972. 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15
  2973. };
  2974. return _mm_loadu_si128((const __m128i*)k_shuffle + i);
  2975. }
  2976. #endif
  2977. void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  2978. const int qk = QK8_0;
  2979. const int nb = n / qk;
  2980. assert(n % qk == 0);
  2981. #if defined(__ARM_FEATURE_MATMUL_INT8)
  2982. assert((nrc == 2) || (nrc == 1));
  2983. #else
  2984. assert(nrc == 1);
  2985. #endif
  2986. UNUSED(nrc);
  2987. UNUSED(bx);
  2988. UNUSED(by);
  2989. UNUSED(bs);
  2990. const block_q4_0 * restrict x = vx;
  2991. const block_q8_0 * restrict y = vy;
  2992. #if defined(__ARM_FEATURE_MATMUL_INT8)
  2993. if (nrc == 2) {
  2994. const block_q4_0 * restrict vx0 = vx;
  2995. const block_q4_0 * restrict vx1 = (const block_q4_0 *) ((const uint8_t*)vx + bx);
  2996. const block_q8_0 * restrict vy0 = vy;
  2997. const block_q8_0 * restrict vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by);
  2998. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2999. for (int i = 0; i < nb; i++) {
  3000. const block_q4_0 * restrict b_x0 = &vx0[i];
  3001. const block_q4_0 * restrict b_x1 = &vx1[i];
  3002. const block_q8_0 * restrict b_y0 = &vy0[i];
  3003. const block_q8_0 * restrict b_y1 = &vy1[i];
  3004. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3005. const int8x16_t s8b = vdupq_n_s8(0x8);
  3006. const uint8x16_t v0_0 = vld1q_u8(b_x0->qs);
  3007. const uint8x16_t v0_1 = vld1q_u8(b_x1->qs);
  3008. // 4-bit -> 8-bit
  3009. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3010. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3011. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3012. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3013. // sub 8
  3014. const int8x16_t x0_l = vsubq_s8(v0_0l, s8b);
  3015. const int8x16_t x0_h = vsubq_s8(v0_0h, s8b);
  3016. const int8x16_t x1_l = vsubq_s8(v0_1l, s8b);
  3017. const int8x16_t x1_h = vsubq_s8(v0_1h, s8b);
  3018. // load y
  3019. const int8x16_t y0_l = vld1q_s8(b_y0->qs);
  3020. const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
  3021. const int8x16_t y1_l = vld1q_s8(b_y1->qs);
  3022. const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
  3023. float32_t _scale[4] = { GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d),
  3024. GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d),
  3025. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d),
  3026. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)};
  3027. float32x4_t scale = vld1q_f32(_scale);
  3028. int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  3029. int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  3030. int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  3031. int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  3032. int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  3033. int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  3034. int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  3035. int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  3036. sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
  3037. l1, r1)), l2, r2)), l3, r3))), scale);
  3038. }
  3039. float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
  3040. float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
  3041. vst1_f32(s, vget_low_f32(sumv2));
  3042. vst1_f32(s + bs, vget_high_f32(sumv2));
  3043. return;
  3044. }
  3045. #endif
  3046. #if defined(__ARM_NEON)
  3047. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3048. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3049. assert(nb % 2 == 0); // TODO: handle odd nb
  3050. for (int i = 0; i < nb; i += 2) {
  3051. const block_q4_0 * restrict x0 = &x[i + 0];
  3052. const block_q4_0 * restrict x1 = &x[i + 1];
  3053. const block_q8_0 * restrict y0 = &y[i + 0];
  3054. const block_q8_0 * restrict y1 = &y[i + 1];
  3055. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3056. const int8x16_t s8b = vdupq_n_s8(0x8);
  3057. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3058. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3059. // 4-bit -> 8-bit
  3060. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3061. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3062. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3063. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3064. // sub 8
  3065. const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
  3066. const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
  3067. const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
  3068. const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
  3069. // load y
  3070. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3071. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3072. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3073. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3074. // dot product into int32x4_t
  3075. const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
  3076. const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
  3077. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  3078. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  3079. }
  3080. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  3081. #elif defined(__AVX2__)
  3082. // Initialize accumulator with zeros
  3083. __m256 acc = _mm256_setzero_ps();
  3084. // Main loop
  3085. for (int i = 0; i < nb; ++i) {
  3086. /* Compute combined scale for the block */
  3087. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  3088. __m256i qx = bytes_from_nibbles_32(x[i].qs);
  3089. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  3090. const __m256i off = _mm256_set1_epi8( 8 );
  3091. qx = _mm256_sub_epi8( qx, off );
  3092. __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3093. const __m256 q = mul_sum_i8_pairs_float(qx, qy);
  3094. /* Multiply q with scale and accumulate */
  3095. acc = _mm256_fmadd_ps( d, q, acc );
  3096. }
  3097. *s = hsum_float_8(acc);
  3098. #elif defined(__AVX__)
  3099. // Initialize accumulator with zeros
  3100. __m256 acc = _mm256_setzero_ps();
  3101. // Main loop
  3102. for (int i = 0; i < nb; ++i) {
  3103. // Compute combined scale for the block
  3104. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  3105. const __m128i lowMask = _mm_set1_epi8(0xF);
  3106. const __m128i off = _mm_set1_epi8(8);
  3107. const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
  3108. __m128i bx_0 = _mm_and_si128(lowMask, tmp);
  3109. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
  3110. bx_0 = _mm_sub_epi8(bx_0, off);
  3111. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  3112. bx_0 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
  3113. by_0 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  3114. bx_0 = _mm_sub_epi8(bx_0, off);
  3115. const __m128i i32_1 = mul_sum_i8_pairs(bx_0, by_0);
  3116. // Convert int32_t to float
  3117. __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
  3118. // Apply the scale, and accumulate
  3119. acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
  3120. }
  3121. *s = hsum_float_8(acc);
  3122. #elif defined(__SSSE3__)
  3123. // set constants
  3124. const __m128i lowMask = _mm_set1_epi8(0xF);
  3125. const __m128i off = _mm_set1_epi8(8);
  3126. // Initialize accumulator with zeros
  3127. __m128 acc_0 = _mm_setzero_ps();
  3128. __m128 acc_1 = _mm_setzero_ps();
  3129. __m128 acc_2 = _mm_setzero_ps();
  3130. __m128 acc_3 = _mm_setzero_ps();
  3131. // First round without accumulation
  3132. {
  3133. _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
  3134. _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
  3135. // Compute combined scale for the block 0 and 1
  3136. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) );
  3137. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
  3138. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  3139. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
  3140. bx_0 = _mm_sub_epi8(bx_0, off);
  3141. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  3142. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  3143. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
  3144. bx_1 = _mm_sub_epi8(bx_1, off);
  3145. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  3146. _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
  3147. _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
  3148. // Compute combined scale for the block 2 and 3
  3149. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) );
  3150. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
  3151. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  3152. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
  3153. bx_2 = _mm_sub_epi8(bx_2, off);
  3154. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  3155. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  3156. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
  3157. bx_3 = _mm_sub_epi8(bx_3, off);
  3158. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  3159. // Convert int32_t to float
  3160. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  3161. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  3162. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  3163. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  3164. // Apply the scale
  3165. acc_0 = _mm_mul_ps( d_0_1, p0 );
  3166. acc_1 = _mm_mul_ps( d_0_1, p1 );
  3167. acc_2 = _mm_mul_ps( d_2_3, p2 );
  3168. acc_3 = _mm_mul_ps( d_2_3, p3 );
  3169. }
  3170. assert(nb % 2 == 0); // TODO: handle odd nb
  3171. // Main loop
  3172. for (int i = 2; i < nb; i+=2) {
  3173. _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
  3174. _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
  3175. // Compute combined scale for the block 0 and 1
  3176. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  3177. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
  3178. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  3179. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
  3180. bx_0 = _mm_sub_epi8(bx_0, off);
  3181. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  3182. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  3183. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  3184. bx_1 = _mm_sub_epi8(bx_1, off);
  3185. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  3186. _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
  3187. _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
  3188. // Compute combined scale for the block 2 and 3
  3189. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) );
  3190. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
  3191. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  3192. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
  3193. bx_2 = _mm_sub_epi8(bx_2, off);
  3194. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  3195. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  3196. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
  3197. bx_3 = _mm_sub_epi8(bx_3, off);
  3198. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  3199. // Convert int32_t to float
  3200. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  3201. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  3202. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  3203. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  3204. // Apply the scale
  3205. __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
  3206. __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
  3207. __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
  3208. __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
  3209. // Acummulate
  3210. acc_0 = _mm_add_ps(p0_d, acc_0);
  3211. acc_1 = _mm_add_ps(p1_d, acc_1);
  3212. acc_2 = _mm_add_ps(p2_d, acc_2);
  3213. acc_3 = _mm_add_ps(p3_d, acc_3);
  3214. }
  3215. *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
  3216. #elif defined(__riscv_v_intrinsic)
  3217. float sumf = 0.0;
  3218. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3219. for (int i = 0; i < nb; i++) {
  3220. // load elements
  3221. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3222. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3223. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3224. // mask and store lower part of x, and then upper part
  3225. vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3226. vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3227. vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3228. vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3229. // subtract offset
  3230. vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 8, vl);
  3231. vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 8, vl);
  3232. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3233. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3234. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3235. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3236. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3237. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3238. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  3239. }
  3240. *s = sumf;
  3241. #elif defined(__POWER9_VECTOR__)
  3242. const vector signed char lowMask = vec_splats((signed char)0xF);
  3243. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  3244. const vector signed char v8 = vec_splats((signed char)0x8);
  3245. vector float vsumf0 = vec_splats(0.0f);
  3246. #pragma GCC unroll 4
  3247. for (int i = 0; i < nb; i++) {
  3248. __builtin_prefetch(x[i].qs, 0, 1);
  3249. __builtin_prefetch(y[i].qs, 0, 1);
  3250. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  3251. vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d));
  3252. vector float vd = vec_mul(vxd, vyd);
  3253. vector signed char qxs = (vector signed char)vec_xl( 0, x[i].qs);
  3254. vector signed char q8y0 = vec_xl( 0, y[i].qs);
  3255. vector signed char q8y1 = vec_xl(16, y[i].qs);
  3256. vector signed char q4x0 = vec_and(qxs, lowMask);
  3257. vector signed char q4x1 = vec_sr(qxs, v4);
  3258. q4x0 = vec_sub(q4x0, v8);
  3259. q4x1 = vec_sub(q4x1, v8);
  3260. vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0));
  3261. vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1));
  3262. qv0 = vec_add(qv0, qv1);
  3263. vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0));
  3264. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  3265. }
  3266. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  3267. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  3268. *s = vec_extract(vsumf0, 0);
  3269. #else
  3270. // scalar
  3271. float sumf = 0.0;
  3272. for (int i = 0; i < nb; i++) {
  3273. int sumi = 0;
  3274. for (int j = 0; j < qk/2; ++j) {
  3275. const int v0 = (x[i].qs[j] & 0x0F) - 8;
  3276. const int v1 = (x[i].qs[j] >> 4) - 8;
  3277. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  3278. }
  3279. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  3280. }
  3281. *s = sumf;
  3282. #endif
  3283. }
  3284. void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  3285. const int qk = QK8_1;
  3286. const int nb = n / qk;
  3287. assert(n % qk == 0);
  3288. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3289. assert((nrc == 2) || (nrc == 1));
  3290. #else
  3291. assert(nrc == 1);
  3292. #endif
  3293. UNUSED(nrc);
  3294. UNUSED(bx);
  3295. UNUSED(by);
  3296. UNUSED(bs);
  3297. const block_q4_1 * restrict x = vx;
  3298. const block_q8_1 * restrict y = vy;
  3299. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3300. if (nrc == 2) {
  3301. const block_q4_1 * restrict vx0 = vx;
  3302. const block_q4_1 * restrict vx1 = (const block_q4_1 *) ((const uint8_t*)vx + bx);
  3303. const block_q8_1 * restrict vy0 = vy;
  3304. const block_q8_1 * restrict vy1 = (const block_q8_1 *) ((const uint8_t*)vy + by);
  3305. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3306. float32x4_t summs0 = vdupq_n_f32(0.0f);
  3307. for (int i = 0; i < nb; i++) {
  3308. const block_q4_1 * restrict b_x0 = &vx0[i];
  3309. const block_q4_1 * restrict b_x1 = &vx1[i];
  3310. const block_q8_1 * restrict b_y0 = &vy0[i];
  3311. const block_q8_1 * restrict b_y1 = &vy1[i];
  3312. float32_t summs_t[4] = {GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y0->s),
  3313. GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y0->s),
  3314. GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y1->s),
  3315. GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y1->s)};
  3316. summs0 = vaddq_f32(summs0, vld1q_f32(summs_t));
  3317. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3318. const uint8x16_t v0_0 = vld1q_u8(b_x0->qs);
  3319. const uint8x16_t v0_1 = vld1q_u8(b_x1->qs);
  3320. // 4-bit -> 8-bit
  3321. const int8x16_t x0_l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3322. const int8x16_t x0_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3323. const int8x16_t x1_l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3324. const int8x16_t x1_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3325. // load y
  3326. const int8x16_t y0_l = vld1q_s8(b_y0->qs);
  3327. const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
  3328. const int8x16_t y1_l = vld1q_s8(b_y1->qs);
  3329. const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
  3330. // mmla into int32x4_t
  3331. float32_t _scale[4] = {GGML_FP16_TO_FP32(b_x0->d)*b_y0->d,
  3332. GGML_FP16_TO_FP32(b_x0->d)*b_y1->d,
  3333. GGML_FP16_TO_FP32(b_x1->d)*b_y0->d,
  3334. GGML_FP16_TO_FP32(b_x1->d)*b_y1->d};
  3335. float32x4_t scale = vld1q_f32(_scale);
  3336. int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  3337. int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  3338. int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  3339. int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  3340. int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  3341. int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  3342. int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  3343. int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  3344. sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
  3345. l1, r1)), l2, r2)), l3, r3))), scale);
  3346. }
  3347. float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
  3348. float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
  3349. sumv2 = vaddq_f32(sumv2, summs0);
  3350. vst1_f32(s, vget_low_f32(sumv2));
  3351. vst1_f32(s + bs, vget_high_f32(sumv2));
  3352. return;
  3353. }
  3354. #endif
  3355. // TODO: add WASM SIMD
  3356. #if defined(__ARM_NEON)
  3357. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3358. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3359. float summs = 0;
  3360. assert(nb % 2 == 0); // TODO: handle odd nb
  3361. for (int i = 0; i < nb; i += 2) {
  3362. const block_q4_1 * restrict x0 = &x[i + 0];
  3363. const block_q4_1 * restrict x1 = &x[i + 1];
  3364. const block_q8_1 * restrict y0 = &y[i + 0];
  3365. const block_q8_1 * restrict y1 = &y[i + 1];
  3366. summs += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s) + GGML_FP16_TO_FP32(x1->m) * GGML_FP16_TO_FP32(y1->s);
  3367. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3368. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3369. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3370. // 4-bit -> 8-bit
  3371. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3372. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3373. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3374. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3375. // load y
  3376. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3377. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3378. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3379. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3380. // dot product into int32x4_t
  3381. const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
  3382. const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
  3383. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  3384. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  3385. }
  3386. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
  3387. #elif defined(__AVX2__) || defined(__AVX__)
  3388. // Initialize accumulator with zeros
  3389. __m256 acc = _mm256_setzero_ps();
  3390. float summs = 0;
  3391. // Main loop
  3392. for (int i = 0; i < nb; ++i) {
  3393. const float d0 = GGML_FP16_TO_FP32(x[i].d);
  3394. const float d1 = GGML_FP16_TO_FP32(y[i].d);
  3395. summs += GGML_FP16_TO_FP32(x[i].m) * GGML_FP16_TO_FP32(y[i].s);
  3396. const __m256 d0v = _mm256_set1_ps( d0 );
  3397. const __m256 d1v = _mm256_set1_ps( d1 );
  3398. // Compute combined scales
  3399. const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
  3400. // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
  3401. const __m256i qx = bytes_from_nibbles_32(x[i].qs);
  3402. const __m256i qy = _mm256_loadu_si256( (const __m256i *)y[i].qs );
  3403. const __m256 xy = mul_sum_us8_pairs_float(qx, qy);
  3404. // Accumulate d0*d1*x*y
  3405. #if defined(__AVX2__)
  3406. acc = _mm256_fmadd_ps( d0d1, xy, acc );
  3407. #else
  3408. acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc );
  3409. #endif
  3410. }
  3411. *s = hsum_float_8(acc) + summs;
  3412. #elif defined(__riscv_v_intrinsic)
  3413. float sumf = 0.0;
  3414. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3415. for (int i = 0; i < nb; i++) {
  3416. // load elements
  3417. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3418. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3419. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3420. // mask and store lower part of x, and then upper part
  3421. vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3422. vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3423. vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3424. vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3425. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3426. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3427. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3428. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3429. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3430. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3431. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d))*sumi + GGML_FP16_TO_FP32(x[i].m)*GGML_FP16_TO_FP32(y[i].s);
  3432. }
  3433. *s = sumf;
  3434. #elif defined(__POWER9_VECTOR__)
  3435. const vector signed char lowMask = vec_splats((signed char)0xF);
  3436. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  3437. vector float vsumf0 = vec_splats(0.0f);
  3438. #pragma GCC unroll 4
  3439. for (int i = 0; i < nb; i++) {
  3440. __builtin_prefetch(x[i].qs, 0, 1);
  3441. __builtin_prefetch(y[i].qs, 0, 1);
  3442. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  3443. vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d));
  3444. vector float vd = vec_mul(vxd, vyd);
  3445. vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].m));
  3446. vector float vys = {GGML_FP16_TO_FP32(y[i].s), 0.0f, 0.0f, 0.0f};
  3447. vsumf0 = vec_madd(vxmin, vys, vsumf0);
  3448. vector signed char qxs = (vector signed char)vec_xl( 0, x[i].qs);
  3449. vector signed char q8y0 = vec_xl( 0, y[i].qs);
  3450. vector signed char q8y1 = vec_xl(16, y[i].qs);
  3451. vector signed char q4x0 = vec_and(qxs, lowMask);
  3452. vector signed char q4x1 = vec_sr(qxs, v4);
  3453. vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0));
  3454. vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1));
  3455. qv0 = vec_add(qv0, qv1);
  3456. vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0));
  3457. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  3458. }
  3459. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  3460. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  3461. *s = vec_extract(vsumf0, 0);
  3462. #else
  3463. // scalar
  3464. float sumf = 0.0;
  3465. for (int i = 0; i < nb; i++) {
  3466. int sumi = 0;
  3467. for (int j = 0; j < qk/2; ++j) {
  3468. const int v0 = (x[i].qs[j] & 0x0F);
  3469. const int v1 = (x[i].qs[j] >> 4);
  3470. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  3471. }
  3472. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d))*sumi + GGML_FP16_TO_FP32(x[i].m)*GGML_FP16_TO_FP32(y[i].s);
  3473. }
  3474. *s = sumf;
  3475. #endif
  3476. }
  3477. void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  3478. const int qk = QK8_0;
  3479. const int nb = n / qk;
  3480. assert(n % qk == 0);
  3481. assert(qk == QK5_0);
  3482. assert(nrc == 1);
  3483. UNUSED(nrc);
  3484. UNUSED(bx);
  3485. UNUSED(by);
  3486. UNUSED(bs);
  3487. const block_q5_0 * restrict x = vx;
  3488. const block_q8_0 * restrict y = vy;
  3489. #if defined(__ARM_NEON)
  3490. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3491. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3492. uint32_t qh0;
  3493. uint32_t qh1;
  3494. uint64_t tmp0[4];
  3495. uint64_t tmp1[4];
  3496. assert(nb % 2 == 0); // TODO: handle odd nb
  3497. for (int i = 0; i < nb; i += 2) {
  3498. const block_q5_0 * restrict x0 = &x[i];
  3499. const block_q5_0 * restrict x1 = &x[i + 1];
  3500. const block_q8_0 * restrict y0 = &y[i];
  3501. const block_q8_0 * restrict y1 = &y[i + 1];
  3502. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3503. // extract the 5th bit via lookup table ((!b) << 4)
  3504. memcpy(&qh0, x0->qh, sizeof(qh0));
  3505. memcpy(&qh1, x1->qh, sizeof(qh1));
  3506. tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
  3507. tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
  3508. tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
  3509. tmp0[3] = table_b2b_1[(qh0 >> 24) ];
  3510. tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
  3511. tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
  3512. tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
  3513. tmp1[3] = table_b2b_1[(qh1 >> 24) ];
  3514. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  3515. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  3516. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  3517. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  3518. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3519. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3520. // 4-bit -> 8-bit
  3521. int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3522. int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3523. int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3524. int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3525. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  3526. const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
  3527. const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
  3528. const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
  3529. const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
  3530. // load y
  3531. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3532. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3533. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3534. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3535. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  3536. ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  3537. ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  3538. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  3539. ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  3540. ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  3541. }
  3542. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  3543. #elif defined(__wasm_simd128__)
  3544. v128_t sumv = wasm_f32x4_splat(0.0f);
  3545. uint32_t qh;
  3546. uint64_t tmp[4];
  3547. // TODO: check if unrolling this is better
  3548. for (int i = 0; i < nb; ++i) {
  3549. const block_q5_0 * restrict x0 = &x[i];
  3550. const block_q8_0 * restrict y0 = &y[i];
  3551. const v128_t m4b = wasm_i8x16_splat(0x0F);
  3552. // extract the 5th bit
  3553. memcpy(&qh, x0->qh, sizeof(qh));
  3554. tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
  3555. tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
  3556. tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
  3557. tmp[3] = table_b2b_1[(qh >> 24) ];
  3558. const v128_t qhl = wasm_v128_load(tmp + 0);
  3559. const v128_t qhh = wasm_v128_load(tmp + 2);
  3560. const v128_t v0 = wasm_v128_load(x0->qs);
  3561. // 4-bit -> 8-bit
  3562. const v128_t v0l = wasm_v128_and (v0, m4b);
  3563. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  3564. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  3565. const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
  3566. const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
  3567. // load y
  3568. const v128_t v1l = wasm_v128_load(y0->qs);
  3569. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  3570. // int8x16 -> int16x8
  3571. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  3572. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  3573. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  3574. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  3575. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  3576. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  3577. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  3578. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  3579. // dot product
  3580. sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
  3581. wasm_i32x4_add(
  3582. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  3583. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  3584. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  3585. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  3586. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
  3587. }
  3588. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  3589. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
  3590. #elif defined(__AVX2__)
  3591. // Initialize accumulator with zeros
  3592. __m256 acc = _mm256_setzero_ps();
  3593. // Main loop
  3594. for (int i = 0; i < nb; i++) {
  3595. /* Compute combined scale for the block */
  3596. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  3597. __m256i qx = bytes_from_nibbles_32(x[i].qs);
  3598. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3599. bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
  3600. qx = _mm256_or_si256(qx, bxhi);
  3601. __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3602. const __m256 q = mul_sum_i8_pairs_float(qx, qy);
  3603. /* Multiply q with scale and accumulate */
  3604. acc = _mm256_fmadd_ps(d, q, acc);
  3605. }
  3606. *s = hsum_float_8(acc);
  3607. #elif defined(__AVX__)
  3608. // Initialize accumulator with zeros
  3609. __m256 acc = _mm256_setzero_ps();
  3610. __m128i mask = _mm_set1_epi8((char)0xF0);
  3611. // Main loop
  3612. for (int i = 0; i < nb; i++) {
  3613. /* Compute combined scale for the block */
  3614. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  3615. __m256i bx_0 = bytes_from_nibbles_32(x[i].qs);
  3616. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3617. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  3618. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  3619. bxhil = _mm_andnot_si128(bxhil, mask);
  3620. bxhih = _mm_andnot_si128(bxhih, mask);
  3621. __m128i bxl = _mm256_castsi256_si128(bx_0);
  3622. __m128i bxh = _mm256_extractf128_si256(bx_0, 1);
  3623. bxl = _mm_or_si128(bxl, bxhil);
  3624. bxh = _mm_or_si128(bxh, bxhih);
  3625. bx_0 = MM256_SET_M128I(bxh, bxl);
  3626. const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3627. const __m256 q = mul_sum_i8_pairs_float(bx_0, by_0);
  3628. /* Multiply q with scale and accumulate */
  3629. acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
  3630. }
  3631. *s = hsum_float_8(acc);
  3632. #elif defined(__riscv_v_intrinsic)
  3633. float sumf = 0.0;
  3634. uint32_t qh;
  3635. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3636. // These temporary registers are for masking and shift operations
  3637. vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
  3638. vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl);
  3639. vuint32m2_t vt_3 = __riscv_vsll_vx_u32m2(vt_2, 16, vl);
  3640. vuint32m2_t vt_4 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
  3641. for (int i = 0; i < nb; i++) {
  3642. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  3643. // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  3644. vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(vt_2, qh, vl);
  3645. vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(xha_0, vt_1, vl);
  3646. vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
  3647. // ((qh & (1u << (j + 16))) >> (j + 12));
  3648. vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(vt_3, qh, vl);
  3649. vuint32m2_t xhl_1 = __riscv_vsrl_vv_u32m2(xha_1, vt_4, vl);
  3650. // narrowing
  3651. vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xhl_0, vl);
  3652. vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
  3653. vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xhl_1, vl);
  3654. vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
  3655. // load
  3656. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3657. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3658. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3659. vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3660. vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3661. vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
  3662. vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
  3663. vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3664. vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3665. vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 16, vl);
  3666. vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 16, vl);
  3667. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3668. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3669. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3670. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3671. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3672. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3673. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  3674. }
  3675. *s = sumf;
  3676. #elif defined(__POWER9_VECTOR__)
  3677. const vector signed char lowMask = vec_splats((signed char)0xF);
  3678. const vector unsigned char v4 = vec_splats((unsigned char)4);
  3679. vector float vsumf0 = vec_splats(0.0f);
  3680. #pragma GCC unroll 4
  3681. for (int i = 0; i < nb; ++i) {
  3682. __builtin_prefetch(x[i].qs, 0, 1);
  3683. __builtin_prefetch(y[i].qs, 0, 1);
  3684. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  3685. vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d));
  3686. vector float vd = vec_mul(vxd, vyd);
  3687. vector signed long long aux64x2_0 = {(uint64_t)(table_b2b_1[x[i].qh[0]]), (uint64_t)(table_b2b_1[x[i].qh[1]])};
  3688. vector signed long long aux64x2_1 = {(uint64_t)(table_b2b_1[x[i].qh[2]]), (uint64_t)(table_b2b_1[x[i].qh[3]])};
  3689. vector signed char qh0 = (vector signed char)aux64x2_0;
  3690. vector signed char qh1 = (vector signed char)aux64x2_1;
  3691. vector signed char qxs = (vector signed char)vec_xl( 0, x[i].qs);
  3692. vector signed char q5x0 = vec_sub(vec_and (qxs, lowMask), qh0);
  3693. vector signed char q5x1 = vec_sub(vec_sr(qxs, v4), qh1);
  3694. vector signed char q8y0 = vec_xl( 0, y[i].qs);
  3695. vector signed char q8y1 = vec_xl( 16, y[i].qs);
  3696. vector signed short qv0 = vec_add(vec_mule(q5x0, q8y0), vec_mulo(q5x0, q8y0));
  3697. vector signed short qv1 = vec_add(vec_mule(q5x1, q8y1), vec_mulo(q5x1, q8y1));
  3698. qv0 = vec_add(qv0, qv1);
  3699. vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0));
  3700. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  3701. }
  3702. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  3703. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  3704. *s = vec_extract(vsumf0, 0);
  3705. #else
  3706. // scalar
  3707. float sumf = 0.0;
  3708. for (int i = 0; i < nb; i++) {
  3709. uint32_t qh;
  3710. memcpy(&qh, x[i].qh, sizeof(qh));
  3711. int sumi = 0;
  3712. for (int j = 0; j < qk/2; ++j) {
  3713. const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  3714. const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
  3715. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  3716. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  3717. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  3718. }
  3719. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  3720. }
  3721. *s = sumf;
  3722. #endif
  3723. }
  3724. void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  3725. const int qk = QK8_1;
  3726. const int nb = n / qk;
  3727. assert(n % qk == 0);
  3728. assert(qk == QK5_1);
  3729. assert(nrc == 1);
  3730. UNUSED(nrc);
  3731. UNUSED(bx);
  3732. UNUSED(by);
  3733. UNUSED(bs);
  3734. const block_q5_1 * restrict x = vx;
  3735. const block_q8_1 * restrict y = vy;
  3736. #if defined(__ARM_NEON)
  3737. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  3738. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  3739. float summs0 = 0.0f;
  3740. float summs1 = 0.0f;
  3741. uint32_t qh0;
  3742. uint32_t qh1;
  3743. uint64_t tmp0[4];
  3744. uint64_t tmp1[4];
  3745. assert(nb % 2 == 0); // TODO: handle odd nb
  3746. for (int i = 0; i < nb; i += 2) {
  3747. const block_q5_1 * restrict x0 = &x[i];
  3748. const block_q5_1 * restrict x1 = &x[i + 1];
  3749. const block_q8_1 * restrict y0 = &y[i];
  3750. const block_q8_1 * restrict y1 = &y[i + 1];
  3751. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  3752. summs0 += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s);
  3753. summs1 += GGML_FP16_TO_FP32(x1->m) * GGML_FP16_TO_FP32(y1->s);
  3754. // extract the 5th bit via lookup table ((b) << 4)
  3755. memcpy(&qh0, x0->qh, sizeof(qh0));
  3756. memcpy(&qh1, x1->qh, sizeof(qh1));
  3757. tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
  3758. tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
  3759. tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
  3760. tmp0[3] = table_b2b_0[(qh0 >> 24) ];
  3761. tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
  3762. tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
  3763. tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
  3764. tmp1[3] = table_b2b_0[(qh1 >> 24) ];
  3765. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  3766. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  3767. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  3768. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  3769. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  3770. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  3771. // 4-bit -> 8-bit
  3772. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  3773. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  3774. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  3775. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  3776. // add high bit
  3777. const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
  3778. const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
  3779. const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
  3780. const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
  3781. // load y
  3782. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  3783. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  3784. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  3785. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  3786. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  3787. ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  3788. ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  3789. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  3790. ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  3791. ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  3792. }
  3793. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
  3794. #elif defined(__wasm_simd128__)
  3795. v128_t sumv = wasm_f32x4_splat(0.0f);
  3796. float summs = 0.0f;
  3797. uint32_t qh;
  3798. uint64_t tmp[4];
  3799. // TODO: check if unrolling this is better
  3800. for (int i = 0; i < nb; ++i) {
  3801. const block_q5_1 * restrict x0 = &x[i];
  3802. const block_q8_1 * restrict y0 = &y[i];
  3803. summs += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s);
  3804. const v128_t m4b = wasm_i8x16_splat(0x0F);
  3805. // extract the 5th bit
  3806. memcpy(&qh, x0->qh, sizeof(qh));
  3807. tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
  3808. tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
  3809. tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
  3810. tmp[3] = table_b2b_0[(qh >> 24) ];
  3811. const v128_t qhl = wasm_v128_load(tmp + 0);
  3812. const v128_t qhh = wasm_v128_load(tmp + 2);
  3813. const v128_t v0 = wasm_v128_load(x0->qs);
  3814. // 4-bit -> 8-bit
  3815. const v128_t v0l = wasm_v128_and (v0, m4b);
  3816. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  3817. // add high bit
  3818. const v128_t v0lf = wasm_v128_or(v0l, qhl);
  3819. const v128_t v0hf = wasm_v128_or(v0h, qhh);
  3820. // load y
  3821. const v128_t v1l = wasm_v128_load(y0->qs);
  3822. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  3823. // int8x16 -> int16x8
  3824. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  3825. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  3826. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  3827. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  3828. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  3829. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  3830. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  3831. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  3832. // dot product
  3833. sumv = wasm_f32x4_add(sumv,
  3834. wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add(
  3835. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  3836. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  3837. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  3838. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  3839. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
  3840. }
  3841. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  3842. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
  3843. #elif defined(__AVX2__)
  3844. // Initialize accumulator with zeros
  3845. __m256 acc = _mm256_setzero_ps();
  3846. float summs = 0.0f;
  3847. // Main loop
  3848. for (int i = 0; i < nb; i++) {
  3849. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  3850. summs += GGML_FP16_TO_FP32(x[i].m) * GGML_FP16_TO_FP32(y[i].s);
  3851. __m256i qx = bytes_from_nibbles_32(x[i].qs);
  3852. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3853. bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
  3854. qx = _mm256_or_si256(qx, bxhi);
  3855. const __m256 dy = _mm256_set1_ps(GGML_FP16_TO_FP32(y[i].d));
  3856. const __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3857. const __m256 q = mul_sum_us8_pairs_float(qx, qy);
  3858. acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
  3859. }
  3860. *s = hsum_float_8(acc) + summs;
  3861. #elif defined(__AVX__)
  3862. // Initialize accumulator with zeros
  3863. __m256 acc = _mm256_setzero_ps();
  3864. __m128i mask = _mm_set1_epi8(0x10);
  3865. float summs = 0.0f;
  3866. // Main loop
  3867. for (int i = 0; i < nb; i++) {
  3868. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  3869. summs += GGML_FP16_TO_FP32(x[i].m) * GGML_FP16_TO_FP32(y[i].s);
  3870. __m256i bx_0 = bytes_from_nibbles_32(x[i].qs);
  3871. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  3872. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  3873. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  3874. bxhil = _mm_and_si128(bxhil, mask);
  3875. bxhih = _mm_and_si128(bxhih, mask);
  3876. __m128i bxl = _mm256_castsi256_si128(bx_0);
  3877. __m128i bxh = _mm256_extractf128_si256(bx_0, 1);
  3878. bxl = _mm_or_si128(bxl, bxhil);
  3879. bxh = _mm_or_si128(bxh, bxhih);
  3880. bx_0 = MM256_SET_M128I(bxh, bxl);
  3881. const __m256 dy = _mm256_set1_ps(GGML_FP16_TO_FP32(y[i].d));
  3882. const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[i].qs);
  3883. const __m256 q = mul_sum_us8_pairs_float(bx_0, by_0);
  3884. acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
  3885. }
  3886. *s = hsum_float_8(acc) + summs;
  3887. #elif defined(__riscv_v_intrinsic)
  3888. float sumf = 0.0;
  3889. uint32_t qh;
  3890. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  3891. // temporary registers for shift operations
  3892. vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
  3893. vuint32m2_t vt_2 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
  3894. for (int i = 0; i < nb; i++) {
  3895. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  3896. // load qh
  3897. vuint32m2_t vqh = __riscv_vmv_v_x_u32m2(qh, vl);
  3898. // ((qh >> (j + 0)) << 4) & 0x10;
  3899. vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(vqh, vt_1, vl);
  3900. vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
  3901. vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(xhl_0, 0x10, vl);
  3902. // ((qh >> (j + 12)) ) & 0x10;
  3903. vuint32m2_t xhr_1 = __riscv_vsrl_vv_u32m2(vqh, vt_2, vl);
  3904. vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(xhr_1, 0x10, vl);
  3905. // narrowing
  3906. vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xha_0, vl);
  3907. vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
  3908. vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xha_1, vl);
  3909. vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
  3910. // load
  3911. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  3912. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  3913. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  3914. vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  3915. vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  3916. vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
  3917. vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
  3918. vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  3919. vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  3920. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  3921. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  3922. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  3923. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  3924. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  3925. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  3926. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d))*sumi + GGML_FP16_TO_FP32(x[i].m)*GGML_FP16_TO_FP32(y[i].s);
  3927. }
  3928. *s = sumf;
  3929. #elif defined(__POWER9_VECTOR__)
  3930. const vector signed char lowMask = vec_splats((signed char)0xF);
  3931. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  3932. vector float vsumf0 = vec_splats(0.0f);
  3933. #pragma GCC unroll 4
  3934. for (int i = 0; i < nb; ++i) {
  3935. __builtin_prefetch(x[i].qs, 0, 1);
  3936. __builtin_prefetch(y[i].qs, 0, 1);
  3937. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  3938. vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d));
  3939. vector float vd = vec_mul(vxd, vyd);
  3940. vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].m));
  3941. vector float vys = {GGML_FP16_TO_FP32(y[i].s), 0.f, 0.f, 0.f};
  3942. vsumf0 = vec_madd(vxmin, vys, vsumf0);
  3943. vector unsigned long long aux64x2_0 = {(uint64_t)(table_b2b_0[x[i].qh[0]]), (uint64_t)(table_b2b_0[x[i].qh[1]])};
  3944. vector unsigned long long aux64x2_1 = {(uint64_t)(table_b2b_0[x[i].qh[2]]), (uint64_t)(table_b2b_0[x[i].qh[3]])};
  3945. vector signed char qh0 = (vector signed char)aux64x2_0;
  3946. vector signed char qh1 = (vector signed char)aux64x2_1;
  3947. vector signed char qxs = (vector signed char)vec_xl( 0, x[i].qs);
  3948. vector signed char q5x0 = vec_or(vec_and(qxs, lowMask), qh0);
  3949. vector signed char q5x1 = vec_or(vec_sr(qxs, v4), qh1);
  3950. vector signed char q8y0 = vec_xl( 0, y[i].qs);
  3951. vector signed char q8y1 = vec_xl( 16, y[i].qs);
  3952. vector signed short qv0 = vec_add(vec_mule(q5x0, q8y0), vec_mulo(q5x0, q8y0));
  3953. vector signed short qv1 = vec_add(vec_mule(q5x1, q8y1), vec_mulo(q5x1, q8y1));
  3954. qv0 = vec_add(qv0, qv1);
  3955. vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0));
  3956. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  3957. }
  3958. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  3959. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  3960. *s = vec_extract(vsumf0, 0);
  3961. #else
  3962. // scalar
  3963. float sumf = 0.0;
  3964. for (int i = 0; i < nb; i++) {
  3965. uint32_t qh;
  3966. memcpy(&qh, x[i].qh, sizeof(qh));
  3967. int sumi = 0;
  3968. for (int j = 0; j < qk/2; ++j) {
  3969. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  3970. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  3971. const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
  3972. const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
  3973. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  3974. }
  3975. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d))*sumi + GGML_FP16_TO_FP32(x[i].m)*GGML_FP16_TO_FP32(y[i].s);
  3976. }
  3977. *s = sumf;
  3978. #endif
  3979. }
  3980. void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  3981. const int qk = QK8_0;
  3982. const int nb = n / qk;
  3983. assert(n % qk == 0);
  3984. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3985. assert((nrc == 2) || (nrc == 1));
  3986. #else
  3987. assert(nrc == 1);
  3988. #endif
  3989. UNUSED(nrc);
  3990. UNUSED(bx);
  3991. UNUSED(by);
  3992. UNUSED(bs);
  3993. const block_q8_0 * restrict x = vx;
  3994. const block_q8_0 * restrict y = vy;
  3995. #if defined(__ARM_FEATURE_MATMUL_INT8)
  3996. if (nrc == 2) {
  3997. const block_q8_0 * restrict vx0 = vx;
  3998. const block_q8_0 * restrict vx1 = (const block_q8_0 *) ((const uint8_t*)vx + bx);
  3999. const block_q8_0 * restrict vy0 = vy;
  4000. const block_q8_0 * restrict vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by);
  4001. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  4002. for (int i = 0; i < nb; i++) {
  4003. const block_q8_0 * restrict b_x0 = &vx0[i];
  4004. const block_q8_0 * restrict b_y0 = &vy0[i];
  4005. const block_q8_0 * restrict b_x1 = &vx1[i];
  4006. const block_q8_0 * restrict b_y1 = &vy1[i];
  4007. const int8x16_t x0_l = vld1q_s8(b_x0->qs);
  4008. const int8x16_t x0_h = vld1q_s8(b_x0->qs + 16);
  4009. const int8x16_t x1_l = vld1q_s8(b_x1->qs);
  4010. const int8x16_t x1_h = vld1q_s8(b_x1->qs + 16);
  4011. // load y
  4012. const int8x16_t y0_l = vld1q_s8(b_y0->qs);
  4013. const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
  4014. const int8x16_t y1_l = vld1q_s8(b_y1->qs);
  4015. const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
  4016. float32_t _scale[4] = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d),
  4017. GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d),
  4018. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d),
  4019. GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)};
  4020. float32x4_t scale = vld1q_f32(_scale);
  4021. int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  4022. int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
  4023. int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  4024. int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
  4025. int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  4026. int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
  4027. int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  4028. int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
  4029. sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
  4030. l1, r1)), l2, r2)), l3, r3))), scale);
  4031. }
  4032. float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2);
  4033. float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
  4034. vst1_f32(s, vget_low_f32(sumv2));
  4035. vst1_f32(s + bs, vget_high_f32(sumv2));
  4036. return;
  4037. }
  4038. #endif
  4039. #if defined(__ARM_NEON)
  4040. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  4041. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  4042. assert(nb % 2 == 0); // TODO: handle odd nb
  4043. for (int i = 0; i < nb; i += 2) {
  4044. const block_q8_0 * restrict x0 = &x[i + 0];
  4045. const block_q8_0 * restrict x1 = &x[i + 1];
  4046. const block_q8_0 * restrict y0 = &y[i + 0];
  4047. const block_q8_0 * restrict y1 = &y[i + 1];
  4048. const int8x16_t x0_0 = vld1q_s8(x0->qs);
  4049. const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
  4050. const int8x16_t x1_0 = vld1q_s8(x1->qs);
  4051. const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
  4052. // load y
  4053. const int8x16_t y0_0 = vld1q_s8(y0->qs);
  4054. const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
  4055. const int8x16_t y1_0 = vld1q_s8(y1->qs);
  4056. const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
  4057. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  4058. ggml_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
  4059. ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  4060. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  4061. ggml_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
  4062. ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  4063. }
  4064. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  4065. #elif defined(__AVX2__) || defined(__AVX__)
  4066. // Initialize accumulator with zeros
  4067. __m256 acc = _mm256_setzero_ps();
  4068. // Main loop
  4069. for (int i = 0; i < nb; ++i) {
  4070. // Compute combined scale for the block
  4071. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  4072. __m256i qx = _mm256_loadu_si256((const __m256i *)x[i].qs);
  4073. __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
  4074. const __m256 q = mul_sum_i8_pairs_float(qx, qy);
  4075. // Multiply q with scale and accumulate
  4076. #if defined(__AVX2__)
  4077. acc = _mm256_fmadd_ps( d, q, acc );
  4078. #else
  4079. acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc );
  4080. #endif
  4081. }
  4082. *s = hsum_float_8(acc);
  4083. #elif defined(__riscv_v_intrinsic)
  4084. float sumf = 0.0;
  4085. size_t vl = __riscv_vsetvl_e8m1(qk);
  4086. for (int i = 0; i < nb; i++) {
  4087. // load elements
  4088. vint8m1_t bx_0 = __riscv_vle8_v_i8m1(x[i].qs, vl);
  4089. vint8m1_t by_0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
  4090. vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx_0, by_0, vl);
  4091. vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl);
  4092. vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl);
  4093. int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum);
  4094. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  4095. }
  4096. *s = sumf;
  4097. #elif defined(__POWER9_VECTOR__)
  4098. vector float vsumf0 = vec_splats(0.0f);
  4099. #pragma GCC unroll 4
  4100. for (int i = 0; i < nb; i++) {
  4101. __builtin_prefetch(x[i].qs, 0, 1);
  4102. __builtin_prefetch(y[i].qs, 0, 1);
  4103. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  4104. vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d));
  4105. vector float vd = vec_mul(vxd, vyd);
  4106. vector signed char q8x0 = vec_xl( 0, x[i].qs);
  4107. vector signed char q8x1 = vec_xl(16, x[i].qs);
  4108. vector signed char q8y0 = vec_xl( 0, y[i].qs);
  4109. vector signed char q8y1 = vec_xl(16, y[i].qs);
  4110. vector signed short qv0 = vec_mule(q8x0, q8y0);
  4111. vector signed short qv1 = vec_mulo(q8x0, q8y0);
  4112. vector signed short qv2 = vec_mule(q8x1, q8y1);
  4113. vector signed short qv3 = vec_mulo(q8x1, q8y1);
  4114. vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackh(qv1));
  4115. vector signed int vsumi1 = vec_add(vec_unpackl(qv0), vec_unpackl(qv1));
  4116. vector signed int vsumi2 = vec_add(vec_unpackh(qv2), vec_unpackh(qv3));
  4117. vector signed int vsumi3 = vec_add(vec_unpackl(qv2), vec_unpackl(qv3));
  4118. vsumi0 = vec_add(vsumi0, vsumi2);
  4119. vsumi1 = vec_add(vsumi1, vsumi3);
  4120. vsumi0 = vec_add(vsumi0, vsumi1);
  4121. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  4122. }
  4123. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  4124. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  4125. *s = vec_extract(vsumf0, 0);
  4126. #else
  4127. // scalar
  4128. float sumf = 0.0;
  4129. for (int i = 0; i < nb; i++) {
  4130. int sumi = 0;
  4131. for (int j = 0; j < qk; j++) {
  4132. sumi += x[i].qs[j]*y[i].qs[j];
  4133. }
  4134. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  4135. }
  4136. *s = sumf;
  4137. #endif
  4138. }
  4139. #if QK_K == 256
  4140. void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4141. assert(nrc == 1);
  4142. UNUSED(nrc);
  4143. UNUSED(bx);
  4144. UNUSED(by);
  4145. UNUSED(bs);
  4146. const block_q2_K * restrict x = vx;
  4147. const block_q8_K * restrict y = vy;
  4148. const int nb = n / QK_K;
  4149. #ifdef __ARM_NEON
  4150. const uint8x16_t m3 = vdupq_n_u8(0x3);
  4151. const uint8x16_t m4 = vdupq_n_u8(0xF);
  4152. const int32x4_t vzero = vdupq_n_s32(0);
  4153. ggml_int8x16x2_t q2bytes;
  4154. uint8_t aux[16];
  4155. float sum = 0;
  4156. for (int i = 0; i < nb; ++i) {
  4157. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4158. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4159. const uint8_t * restrict q2 = x[i].qs;
  4160. const int8_t * restrict q8 = y[i].qs;
  4161. const uint8_t * restrict sc = x[i].scales;
  4162. const uint8x16_t mins_and_scales = vld1q_u8(sc);
  4163. const uint8x16_t scales = vandq_u8(mins_and_scales, m4);
  4164. vst1q_u8(aux, scales);
  4165. const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4);
  4166. const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
  4167. const ggml_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}};
  4168. const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])),
  4169. vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0])));
  4170. const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])),
  4171. vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1])));
  4172. sum += dmin * vaddvq_s32(vaddq_s32(s0, s1));
  4173. int isum = 0;
  4174. int is = 0;
  4175. // We use this macro instead of a function call because for some reason
  4176. // the code runs 2-3% slower, even if the function is declared inline
  4177. #define MULTIPLY_ACCUM_WITH_SCALE(index)\
  4178. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\
  4179. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)];
  4180. #define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\
  4181. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\
  4182. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\
  4183. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\
  4184. MULTIPLY_ACCUM_WITH_SCALE((index));
  4185. for (int j = 0; j < QK_K/128; ++j) {
  4186. const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32;
  4187. ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  4188. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3));
  4189. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3));
  4190. MULTIPLY_ACCUM_WITH_SCALE(0);
  4191. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2);
  4192. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4);
  4193. SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6);
  4194. is += 8;
  4195. }
  4196. sum += d * isum;
  4197. }
  4198. *s = sum;
  4199. #elif defined __AVX2__
  4200. const __m256i m3 = _mm256_set1_epi8(3);
  4201. const __m128i m4 = _mm_set1_epi8(0xF);
  4202. __m256 acc = _mm256_setzero_ps();
  4203. for (int i = 0; i < nb; ++i) {
  4204. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4205. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4206. const uint8_t * restrict q2 = x[i].qs;
  4207. const int8_t * restrict q8 = y[i].qs;
  4208. const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  4209. const __m128i scales8 = _mm_and_si128(mins_and_scales, m4);
  4210. const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
  4211. const __m256i mins = _mm256_cvtepi8_epi16(mins8);
  4212. const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums));
  4213. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc);
  4214. const __m256i all_scales = _mm256_cvtepi8_epi16(scales8);
  4215. const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
  4216. const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
  4217. const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
  4218. __m256i sumi = _mm256_setzero_si256();
  4219. for (int j = 0; j < QK_K/128; ++j) {
  4220. const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32;
  4221. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4222. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4223. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4224. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4225. const __m256i q2_0 = _mm256_and_si256(q2bits, m3);
  4226. const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3);
  4227. const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3);
  4228. const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3);
  4229. __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
  4230. __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
  4231. __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2);
  4232. __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3);
  4233. p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0);
  4234. p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1);
  4235. p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2);
  4236. p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3);
  4237. p0 = _mm256_add_epi32(p0, p1);
  4238. p2 = _mm256_add_epi32(p2, p3);
  4239. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2));
  4240. }
  4241. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  4242. }
  4243. *s = hsum_float_8(acc);
  4244. #elif defined __AVX__
  4245. const __m128i m3 = _mm_set1_epi8(0x3);
  4246. const __m128i m4 = _mm_set1_epi8(0xF);
  4247. const __m128i m2 = _mm_set1_epi8(0x2);
  4248. __m256 acc = _mm256_setzero_ps();
  4249. for (int i = 0; i < nb; ++i) {
  4250. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4251. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4252. const uint8_t * restrict q2 = x[i].qs;
  4253. const int8_t * restrict q8 = y[i].qs;
  4254. // load mins and scales from block_q2_K.scales[QK_K/16]
  4255. const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  4256. const __m128i scales16 = _mm_and_si128(mins_and_scales, m4);
  4257. const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
  4258. const __m128i mins_0 = _mm_cvtepi8_epi16(mins16);
  4259. const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16));
  4260. // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2
  4261. const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0]));
  4262. const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8]));
  4263. // sumf += -dmin * summs in 32bits*8
  4264. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc);
  4265. const __m128i scales_0 = _mm_cvtepi8_epi16(scales16);
  4266. const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16));
  4267. const __m128i scales[2] = { scales_0, scales_1 };
  4268. __m128i sumi_0 = _mm_setzero_si128();
  4269. __m128i sumi_1 = _mm_setzero_si128();
  4270. for (int j = 0; j < QK_K/128; ++j) {
  4271. // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K]
  4272. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4273. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4274. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4275. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4276. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4277. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4278. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4279. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4280. // load 2bits*16*8 from block_q2_K.qs[QK_K/4]
  4281. __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
  4282. const __m128i q2_0 = _mm_and_si128(q2bits, m3);
  4283. const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  4284. const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  4285. const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  4286. q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
  4287. const __m128i q2_1 = _mm_and_si128(q2bits, m3);
  4288. const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  4289. const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  4290. const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  4291. // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8
  4292. __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0);
  4293. __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1);
  4294. __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2);
  4295. __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3);
  4296. __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4);
  4297. __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5);
  4298. __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6);
  4299. __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7);
  4300. // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8
  4301. __m128i shuffle = _mm_set1_epi16(0x0100);
  4302. p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0);
  4303. shuffle = _mm_add_epi16(shuffle, m2);
  4304. p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1);
  4305. shuffle = _mm_add_epi16(shuffle, m2);
  4306. p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2);
  4307. shuffle = _mm_add_epi16(shuffle, m2);
  4308. p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3);
  4309. shuffle = _mm_add_epi16(shuffle, m2);
  4310. p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4);
  4311. shuffle = _mm_add_epi16(shuffle, m2);
  4312. p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5);
  4313. shuffle = _mm_add_epi16(shuffle, m2);
  4314. p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6);
  4315. shuffle = _mm_add_epi16(shuffle, m2);
  4316. p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7);
  4317. p0 = _mm_add_epi32(p0, p1);
  4318. p2 = _mm_add_epi32(p2, p3);
  4319. p4 = _mm_add_epi32(p4, p5);
  4320. p6 = _mm_add_epi32(p6, p7);
  4321. // isum in 32bits*4*2
  4322. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2));
  4323. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6));
  4324. }
  4325. // sumf += dall * isum - dmin * summs in 32bits
  4326. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  4327. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc);
  4328. }
  4329. *s = hsum_float_8(acc);
  4330. #elif defined __riscv_v_intrinsic
  4331. float sumf = 0;
  4332. uint8_t temp_01[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  4333. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
  4334. for (int i = 0; i < nb; ++i) {
  4335. const uint8_t * q2 = x[i].qs;
  4336. const int8_t * q8 = y[i].qs;
  4337. const uint8_t * sc = x[i].scales;
  4338. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4339. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4340. size_t vl = 16;
  4341. vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl);
  4342. vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl);
  4343. vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl);
  4344. vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl);
  4345. vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl);
  4346. vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl));
  4347. vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl);
  4348. vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  4349. sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums);
  4350. vl = 32;
  4351. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  4352. vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl);
  4353. uint8_t is=0;
  4354. int isum=0;
  4355. for (int j = 0; j < QK_K/128; ++j) {
  4356. // load Q2
  4357. vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl);
  4358. vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl);
  4359. vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03 , vl);
  4360. vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03 , vl);
  4361. vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03 , vl);
  4362. // duplicate scale elements for product
  4363. vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0+is, vl), vl);
  4364. vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2+is, vl), vl);
  4365. vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4+is, vl), vl);
  4366. vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6+is, vl), vl);
  4367. vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl));
  4368. vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl));
  4369. vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl));
  4370. vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl));
  4371. // load Q8
  4372. vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
  4373. vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
  4374. vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8+64, vl);
  4375. vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8+96, vl);
  4376. vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl);
  4377. vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl);
  4378. vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl);
  4379. vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl);
  4380. vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl);
  4381. vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl);
  4382. isum += __riscv_vmv_x_s_i32m1_i32(isum1);
  4383. q2+=32; q8+=128; is=8;
  4384. }
  4385. sumf += dall * isum;
  4386. }
  4387. *s = sumf;
  4388. #elif defined(__POWER9_VECTOR__)
  4389. const vector signed char lowMask = vec_splats((signed char)0x3);
  4390. const vector signed char lowScaleMask = vec_splats((signed char)0xF);
  4391. const vector unsigned char v2 = vec_splats((unsigned char)0x2);
  4392. const vector unsigned char v6 = vec_splats((unsigned char)0x6);
  4393. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  4394. vector float vsumf0 = vec_splats(0.0f);
  4395. vector float vsumf1 = vec_splats(0.0f);
  4396. vector float vsumf2 = vec_splats(0.0f);
  4397. vector float vsumf3 = vec_splats(0.0f);
  4398. for (int i = 0; i < nb; ++i) {
  4399. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  4400. vector float vyd = vec_splats(y[i].d);
  4401. vector float vd = vec_mul(vxd, vyd);
  4402. vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin));
  4403. vector float vdmin = vec_mul(vxmin, vyd);
  4404. vector signed short q8ysums0 = vec_xl( 0, y[i].bsums);
  4405. vector signed short q8ysums1 = vec_xl(16, y[i].bsums);
  4406. vector signed char q2xmins = (vector signed char)vec_xl( 0, x[i].scales);
  4407. vector signed char vscales = vec_and(q2xmins, lowScaleMask);
  4408. q2xmins = vec_sr(q2xmins, v4);
  4409. vector signed short q2xmins0 = vec_unpackh(q2xmins);
  4410. vector signed short q2xmins1 = vec_unpackl(q2xmins);
  4411. vector signed int prod0 = vec_mule(q2xmins0, q8ysums0);
  4412. vector signed int prod1 = vec_mulo(q2xmins0, q8ysums0);
  4413. vector signed int prod2 = vec_mule(q2xmins1, q8ysums1);
  4414. vector signed int prod3 = vec_mulo(q2xmins1, q8ysums1);
  4415. vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0);
  4416. vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1);
  4417. vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2);
  4418. vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3);
  4419. vector signed int vsumi0 = vec_splats((int32_t)0);
  4420. vector signed int vsumi1 = vec_splats((int32_t)0);
  4421. vector signed int vsumi2 = vec_splats((int32_t)0);
  4422. vector signed int vsumi3 = vec_splats((int32_t)0);
  4423. vector signed int vsumi4 = vec_splats((int32_t)0);
  4424. vector signed int vsumi5 = vec_splats((int32_t)0);
  4425. vector signed int vsumi6 = vec_splats((int32_t)0);
  4426. vector signed int vsumi7 = vec_splats((int32_t)0);
  4427. const uint8_t * restrict q2 = x[i].qs;
  4428. const int8_t * restrict q8 = y[i].qs;
  4429. for (int j = 0; j < QK_K/128; ++j) {
  4430. __builtin_prefetch(q2, 0, 1);
  4431. __builtin_prefetch(q8, 0, 1);
  4432. vector signed char qxs0 = (vector signed char)vec_xl( 0, q2);
  4433. vector signed char qxs1 = (vector signed char)vec_xl(16, q2);
  4434. q2 += 32;
  4435. vector signed char q2x00 = vec_and(qxs0, lowMask);
  4436. vector signed char q2x01 = vec_and(vec_sr(qxs0, v2), lowMask);
  4437. vector signed char q2x02 = vec_and(vec_sr(qxs0, v4), lowMask);
  4438. vector signed char q2x03 = vec_and(vec_sr(qxs0, v6), lowMask);
  4439. vector signed char q2x10 = vec_and(qxs1, lowMask);
  4440. vector signed char q2x11 = vec_and(vec_sr(qxs1, v2), lowMask);
  4441. vector signed char q2x12 = vec_and(vec_sr(qxs1, v4), lowMask);
  4442. vector signed char q2x13 = vec_and(vec_sr(qxs1, v6), lowMask);
  4443. vector signed char q8y00 = vec_xl( 0, q8);
  4444. vector signed char q8y10 = vec_xl( 16, q8);
  4445. vector signed char q8y01 = vec_xl( 32, q8);
  4446. vector signed char q8y11 = vec_xl( 48, q8);
  4447. vector signed char q8y02 = vec_xl( 64, q8);
  4448. vector signed char q8y12 = vec_xl( 80, q8);
  4449. vector signed char q8y03 = vec_xl( 96, q8);
  4450. vector signed char q8y13 = vec_xl(112, q8);
  4451. q8 += 128;
  4452. vector signed short qv0 = vec_add(vec_mule(q2x00, q8y00), vec_mulo(q2x00, q8y00));
  4453. vector signed short qv1 = vec_add(vec_mule(q2x01, q8y01), vec_mulo(q2x01, q8y01));
  4454. vector signed short qv2 = vec_add(vec_mule(q2x02, q8y02), vec_mulo(q2x02, q8y02));
  4455. vector signed short qv3 = vec_add(vec_mule(q2x03, q8y03), vec_mulo(q2x03, q8y03));
  4456. vector signed short qv4 = vec_add(vec_mule(q2x10, q8y10), vec_mulo(q2x10, q8y10));
  4457. vector signed short qv5 = vec_add(vec_mule(q2x11, q8y11), vec_mulo(q2x11, q8y11));
  4458. vector signed short qv6 = vec_add(vec_mule(q2x12, q8y12), vec_mulo(q2x12, q8y12));
  4459. vector signed short qv7 = vec_add(vec_mule(q2x13, q8y13), vec_mulo(q2x13, q8y13));
  4460. vector signed short vscales_h = vec_unpackh(vscales);
  4461. vector signed short vs0 = vec_splat(vscales_h, 0);
  4462. vector signed short vs1 = vec_splat(vscales_h, 1);
  4463. vector signed short vs2 = vec_splat(vscales_h, 2);
  4464. vector signed short vs3 = vec_splat(vscales_h, 3);
  4465. vector signed short vs4 = vec_splat(vscales_h, 4);
  4466. vector signed short vs5 = vec_splat(vscales_h, 5);
  4467. vector signed short vs6 = vec_splat(vscales_h, 6);
  4468. vector signed short vs7 = vec_splat(vscales_h, 7);
  4469. vscales = vec_sld(vscales, vscales, 8);
  4470. qv0 = vec_mul(qv0, vs0);
  4471. qv1 = vec_mul(qv1, vs2);
  4472. qv2 = vec_mul(qv2, vs4);
  4473. qv3 = vec_mul(qv3, vs6);
  4474. qv0 = vec_madd(qv4, vs1, qv0);
  4475. qv1 = vec_madd(qv5, vs3, qv1);
  4476. qv2 = vec_madd(qv6, vs5, qv2);
  4477. qv3 = vec_madd(qv7, vs7, qv3);
  4478. vsumi0 = vec_add(vec_unpackh(qv0), vsumi0);
  4479. vsumi1 = vec_add(vec_unpackh(qv1), vsumi1);
  4480. vsumi2 = vec_add(vec_unpackh(qv2), vsumi2);
  4481. vsumi3 = vec_add(vec_unpackh(qv3), vsumi3);
  4482. vsumi4 = vec_add(vec_unpackl(qv0), vsumi4);
  4483. vsumi5 = vec_add(vec_unpackl(qv1), vsumi5);
  4484. vsumi6 = vec_add(vec_unpackl(qv2), vsumi6);
  4485. vsumi7 = vec_add(vec_unpackl(qv3), vsumi7);
  4486. }
  4487. vsumi0 = vec_add(vsumi0, vsumi4);
  4488. vsumi1 = vec_add(vsumi1, vsumi5);
  4489. vsumi2 = vec_add(vsumi2, vsumi6);
  4490. vsumi3 = vec_add(vsumi3, vsumi7);
  4491. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  4492. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  4493. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  4494. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  4495. }
  4496. vsumf0 = vec_add(vsumf0, vsumf2);
  4497. vsumf1 = vec_add(vsumf1, vsumf3);
  4498. vsumf0 = vec_add(vsumf0, vsumf1);
  4499. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  4500. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  4501. *s = vec_extract(vsumf0, 0);
  4502. #else
  4503. float sumf = 0;
  4504. for (int i = 0; i < nb; ++i) {
  4505. const uint8_t * q2 = x[i].qs;
  4506. const int8_t * q8 = y[i].qs;
  4507. const uint8_t * sc = x[i].scales;
  4508. int summs = 0;
  4509. for (int j = 0; j < 16; ++j) {
  4510. summs += y[i].bsums[j] * (sc[j] >> 4);
  4511. }
  4512. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4513. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4514. int isum = 0;
  4515. int is = 0;
  4516. int d;
  4517. for (int k = 0; k < QK_K/128; ++k) {
  4518. int shift = 0;
  4519. for (int j = 0; j < 4; ++j) {
  4520. d = sc[is++] & 0xF;
  4521. int isuml = 0;
  4522. for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
  4523. isum += d * isuml;
  4524. d = sc[is++] & 0xF;
  4525. isuml = 0;
  4526. for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
  4527. isum += d * isuml;
  4528. shift += 2;
  4529. q8 += 32;
  4530. }
  4531. q2 += 32;
  4532. }
  4533. sumf += dall * isum - dmin * summs;
  4534. }
  4535. *s = sumf;
  4536. #endif
  4537. }
  4538. #else
  4539. void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4540. assert(nrc == 1);
  4541. UNUSED(nrc);
  4542. UNUSED(bx);
  4543. UNUSED(by);
  4544. UNUSED(bs);
  4545. const block_q2_K * restrict x = vx;
  4546. const block_q8_K * restrict y = vy;
  4547. const int nb = n / QK_K;
  4548. #ifdef __ARM_NEON
  4549. const uint8x16_t m3 = vdupq_n_u8(0x3);
  4550. const int32x4_t vzero = vdupq_n_s32(0);
  4551. ggml_int8x16x4_t q2bytes;
  4552. uint32_t aux32[2];
  4553. const uint8_t * scales = (const uint8_t *)aux32;
  4554. float sum = 0;
  4555. for (int i = 0; i < nb; ++i) {
  4556. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4557. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4558. const uint8_t * restrict q2 = x[i].qs;
  4559. const int8_t * restrict q8 = y[i].qs;
  4560. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4561. aux32[0] = sc[0] & 0x0f0f0f0f;
  4562. aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
  4563. sum += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
  4564. int isum1 = 0, isum2 = 0;
  4565. const uint8x16_t q2bits = vld1q_u8(q2);
  4566. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  4567. q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits, m3));
  4568. q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 2), m3));
  4569. q2bytes.val[2] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 4), m3));
  4570. q2bytes.val[3] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 6), m3));
  4571. isum1 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * scales[0];
  4572. isum2 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * scales[1];
  4573. isum1 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[2], q8bytes.val[2])) * scales[2];
  4574. isum2 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[3], q8bytes.val[3])) * scales[3];
  4575. sum += d * (isum1 + isum2);
  4576. }
  4577. *s = sum;
  4578. #elif defined __AVX2__
  4579. const __m256i m3 = _mm256_set1_epi8(3);
  4580. __m256 acc = _mm256_setzero_ps();
  4581. uint32_t ud, um;
  4582. const uint8_t * restrict db = (const uint8_t *)&ud;
  4583. const uint8_t * restrict mb = (const uint8_t *)&um;
  4584. float summs = 0;
  4585. // TODO: optimize this
  4586. for (int i = 0; i < nb; ++i) {
  4587. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4588. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4589. const uint8_t * restrict q2 = x[i].qs;
  4590. const int8_t * restrict q8 = y[i].qs;
  4591. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4592. ud = (sc[0] >> 0) & 0x0f0f0f0f;
  4593. um = (sc[0] >> 4) & 0x0f0f0f0f;
  4594. int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
  4595. summs += dmin * smin;
  4596. const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
  4597. const __m256i q2_0 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 2), q2bits), m3);
  4598. const __m256i q2_1 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 6), _mm_srli_epi16(q2bits, 4)), m3);
  4599. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  4600. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  4601. const __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
  4602. const __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
  4603. const __m256i p_0 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 0));
  4604. const __m256i p_1 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 1));
  4605. const __m256i p_2 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 0));
  4606. const __m256i p_3 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 1));
  4607. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0), acc);
  4608. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1), acc);
  4609. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2), acc);
  4610. acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3), acc);
  4611. }
  4612. *s = hsum_float_8(acc) + summs;
  4613. #elif defined __AVX__
  4614. const __m128i m3 = _mm_set1_epi8(3);
  4615. __m256 acc = _mm256_setzero_ps();
  4616. uint32_t ud, um;
  4617. const uint8_t * restrict db = (const uint8_t *)&ud;
  4618. const uint8_t * restrict mb = (const uint8_t *)&um;
  4619. float summs = 0;
  4620. // TODO: optimize this
  4621. for (int i = 0; i < nb; ++i) {
  4622. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4623. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4624. const uint8_t * restrict q2 = x[i].qs;
  4625. const int8_t * restrict q8 = y[i].qs;
  4626. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4627. ud = (sc[0] >> 0) & 0x0f0f0f0f;
  4628. um = (sc[0] >> 4) & 0x0f0f0f0f;
  4629. int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
  4630. summs += dmin * smin;
  4631. const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
  4632. const __m128i q2_0 = _mm_and_si128(q2bits, m3);
  4633. const __m128i q2_1 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
  4634. const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
  4635. const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
  4636. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  4637. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  4638. const __m128i p0 = _mm_maddubs_epi16(q2_0, _mm256_extractf128_si256(q8_0, 0));
  4639. const __m128i p1 = _mm_maddubs_epi16(q2_1, _mm256_extractf128_si256(q8_0, 1));
  4640. const __m128i p2 = _mm_maddubs_epi16(q2_2, _mm256_extractf128_si256(q8_1, 0));
  4641. const __m128i p3 = _mm_maddubs_epi16(q2_3, _mm256_extractf128_si256(q8_1, 1));
  4642. const __m256i p_0 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p0, p0)), _mm_cvtepi16_epi32(p0));
  4643. const __m256i p_1 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p1, p1)), _mm_cvtepi16_epi32(p1));
  4644. const __m256i p_2 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p2, p2)), _mm_cvtepi16_epi32(p2));
  4645. const __m256i p_3 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p3, p3)), _mm_cvtepi16_epi32(p3));
  4646. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0)), acc);
  4647. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1)), acc);
  4648. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2)), acc);
  4649. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3)), acc);
  4650. }
  4651. *s = hsum_float_8(acc) + summs;
  4652. #elif defined __riscv_v_intrinsic
  4653. uint32_t aux32[2];
  4654. const uint8_t * scales = (const uint8_t *)aux32;
  4655. float sumf = 0;
  4656. for (int i = 0; i < nb; ++i) {
  4657. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4658. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4659. const uint8_t * restrict q2 = x[i].qs;
  4660. const int8_t * restrict q8 = y[i].qs;
  4661. const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
  4662. aux32[0] = sc[0] & 0x0f0f0f0f;
  4663. aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
  4664. sumf += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
  4665. int isum1 = 0;
  4666. int isum2 = 0;
  4667. size_t vl = 16;
  4668. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  4669. // load Q2
  4670. vuint8mf2_t q2_x = __riscv_vle8_v_u8mf2(q2, vl);
  4671. vint8mf2_t q2_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q2_x, 0x03, vl));
  4672. vint8mf2_t q2_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x2, vl), 0x03 , vl));
  4673. vint8mf2_t q2_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x4, vl), 0x03 , vl));
  4674. vint8mf2_t q2_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x6, vl), 0x03 , vl));
  4675. // load Q8, and take product with Q2
  4676. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q2_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  4677. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q2_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  4678. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q2_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  4679. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q2_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  4680. vint16m1_t vs_0 = __riscv_vredsum_vs_i16m1_i16m1(p0, vzero, vl);
  4681. vint16m1_t vs_1 = __riscv_vredsum_vs_i16m1_i16m1(p1, vzero, vl);
  4682. vint16m1_t vs_2 = __riscv_vredsum_vs_i16m1_i16m1(p2, vzero, vl);
  4683. vint16m1_t vs_3 = __riscv_vredsum_vs_i16m1_i16m1(p3, vzero, vl);
  4684. isum1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[0];
  4685. isum2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[1];
  4686. isum1 += __riscv_vmv_x_s_i16m1_i16(vs_2) * scales[2];
  4687. isum2 += __riscv_vmv_x_s_i16m1_i16(vs_3) * scales[3];
  4688. sumf += d * (isum1 + isum2);
  4689. }
  4690. *s = sumf;
  4691. #elif defined(__POWER9_VECTOR__)
  4692. const vector signed char lowMask = vec_splats((signed char)0x3);
  4693. const vector signed char lowScaleMask = vec_splats((signed char)0xF);
  4694. const vector unsigned char v2 = vec_splats((unsigned char)0x2);
  4695. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  4696. const vector unsigned char v6 = vec_splats((unsigned char)0x6);
  4697. vector float vsumf0 = vec_splats(0.0f);
  4698. vector float vsumf1 = vec_splats(0.0f);
  4699. vector float vsumf2 = vec_splats(0.0f);
  4700. vector float vsumf3 = vec_splats(0.0f);
  4701. #pragma GCC unroll 2
  4702. for (int i = 0; i < nb; ++i) {
  4703. __builtin_prefetch(x[i].qs, 0, 1);
  4704. __builtin_prefetch(y[i].qs, 0, 1);
  4705. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  4706. vector float vyd = vec_splats(y[i].d);
  4707. vector float vd = vec_mul(vxd, vyd);
  4708. vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin));
  4709. vector float vdmin = vec_mul(vxmin, vyd);
  4710. vector signed short q8ysums0 = vec_xl_len(y[i].bsums, 8);
  4711. vector signed char q2xmins = (vector signed char)vec_xl_len(x[i].scales, 4);
  4712. vector signed char vscales = vec_and(q2xmins, lowScaleMask);
  4713. q2xmins = vec_sr(q2xmins, v4);
  4714. vector signed short q2xmins0 = vec_unpackh((vector signed char)q2xmins);
  4715. vector signed int prod0 = vec_mule(q2xmins0, q8ysums0);
  4716. vector signed int prod1 = vec_mulo(q2xmins0, q8ysums0);
  4717. vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0);
  4718. vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1);
  4719. vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].qs);
  4720. vector signed char q2x00 = vec_and(qxs0, lowMask);
  4721. vector signed char q2x01 = vec_and(vec_sr(qxs0, v2), lowMask);
  4722. vector signed char q2x02 = vec_and(vec_sr(qxs0, v4), lowMask);
  4723. vector signed char q2x03 = vec_and(vec_sr(qxs0, v6), lowMask);
  4724. vector signed char q8y00 = vec_xl( 0, y[i].qs);
  4725. vector signed char q8y01 = vec_xl( 16, y[i].qs);
  4726. vector signed char q8y02 = vec_xl( 32, y[i].qs);
  4727. vector signed char q8y03 = vec_xl( 48, y[i].qs);
  4728. vector signed short qv0 = vec_add(vec_mule(q2x00, q8y00), vec_mulo(q2x00, q8y00));
  4729. vector signed short qv1 = vec_add(vec_mule(q2x01, q8y01), vec_mulo(q2x01, q8y01));
  4730. vector signed short qv2 = vec_add(vec_mule(q2x02, q8y02), vec_mulo(q2x02, q8y02));
  4731. vector signed short qv3 = vec_add(vec_mule(q2x03, q8y03), vec_mulo(q2x03, q8y03));
  4732. vector signed short vscales_h = vec_unpackh(vscales);
  4733. vector signed short vs0 = vec_splat(vscales_h, 0);
  4734. vector signed short vs1 = vec_splat(vscales_h, 1);
  4735. vector signed short vs2 = vec_splat(vscales_h, 2);
  4736. vector signed short vs3 = vec_splat(vscales_h, 3);
  4737. vector signed int vsumi0 = vec_add(vec_mule(qv0, vs0), vec_mulo(qv0, vs0));
  4738. vector signed int vsumi1 = vec_add(vec_mule(qv1, vs1), vec_mulo(qv1, vs1));
  4739. vector signed int vsumi2 = vec_add(vec_mule(qv2, vs2), vec_mulo(qv2, vs2));
  4740. vector signed int vsumi3 = vec_add(vec_mule(qv3, vs3), vec_mulo(qv3, vs3));
  4741. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  4742. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  4743. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  4744. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  4745. }
  4746. vsumf0 = vec_add(vsumf0, vsumf2);
  4747. vsumf1 = vec_add(vsumf1, vsumf3);
  4748. vsumf0 = vec_add(vsumf0, vsumf1);
  4749. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  4750. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  4751. *s = vec_extract(vsumf0, 0);
  4752. #else
  4753. float sumf = 0;
  4754. int isum[QK_K/16];
  4755. for (int i = 0; i < nb; ++i) {
  4756. const uint8_t * q2 = x[i].qs;
  4757. const int8_t * q8 = y[i].qs;
  4758. const uint8_t * sc = x[i].scales;
  4759. int summs = 0;
  4760. for (int j = 0; j < QK_K/16; ++j) {
  4761. summs += y[i].bsums[j] * (sc[j] >> 4);
  4762. }
  4763. const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4764. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  4765. memset(isum, 0, (QK_K/16)*sizeof(int));
  4766. for (int l = 0; l < 16; ++l) {
  4767. isum[0] += q8[l+ 0] * ((q2[l] >> 0) & 3);
  4768. isum[1] += q8[l+16] * ((q2[l] >> 2) & 3);
  4769. isum[2] += q8[l+32] * ((q2[l] >> 4) & 3);
  4770. isum[3] += q8[l+48] * ((q2[l] >> 6) & 3);
  4771. }
  4772. for (int l = 0; l < QK_K/16; ++l) {
  4773. isum[l] *= (sc[l] & 0xF);
  4774. }
  4775. sumf += dall * (isum[0] + isum[1] + isum[2] + isum[3]) - dmin * summs;
  4776. }
  4777. *s = sumf;
  4778. #endif
  4779. }
  4780. #endif
  4781. #if QK_K == 256
  4782. void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  4783. assert(n % QK_K == 0);
  4784. assert(nrc == 1);
  4785. UNUSED(nrc);
  4786. UNUSED(bx);
  4787. UNUSED(by);
  4788. UNUSED(bs);
  4789. const uint32_t kmask1 = 0x03030303;
  4790. const uint32_t kmask2 = 0x0f0f0f0f;
  4791. const block_q3_K * restrict x = vx;
  4792. const block_q8_K * restrict y = vy;
  4793. const int nb = n / QK_K;
  4794. #ifdef __ARM_NEON
  4795. uint32_t aux[3];
  4796. uint32_t utmp[4];
  4797. const uint8x16_t m3b = vdupq_n_u8(0x3);
  4798. const int32x4_t vzero = vdupq_n_s32(0);
  4799. const uint8x16_t m0 = vdupq_n_u8(1);
  4800. const uint8x16_t m1 = vshlq_n_u8(m0, 1);
  4801. const uint8x16_t m2 = vshlq_n_u8(m0, 2);
  4802. const uint8x16_t m3 = vshlq_n_u8(m0, 3);
  4803. const int8_t m32 = 32;
  4804. ggml_int8x16x4_t q3bytes;
  4805. float sum = 0;
  4806. for (int i = 0; i < nb; ++i) {
  4807. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4808. const uint8_t * restrict q3 = x[i].qs;
  4809. const uint8_t * restrict qh = x[i].hmask;
  4810. const int8_t * restrict q8 = y[i].qs;
  4811. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
  4812. ggml_uint8x16x4_t q3h;
  4813. int32_t isum = 0;
  4814. // Set up scales
  4815. memcpy(aux, x[i].scales, 12);
  4816. utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
  4817. utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
  4818. utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
  4819. utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
  4820. int8_t * scale = (int8_t *)utmp;
  4821. for (int j = 0; j < 16; ++j) scale[j] -= m32;
  4822. for (int j = 0; j < QK_K/128; ++j) {
  4823. const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32;
  4824. const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64;
  4825. const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64;
  4826. q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2);
  4827. q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2);
  4828. q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1);
  4829. q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1);
  4830. q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0]));
  4831. q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1]));
  4832. q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
  4833. q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
  4834. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0];
  4835. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1];
  4836. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2];
  4837. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3];
  4838. scale += 4;
  4839. q3h.val[0] = vbicq_u8(m2, qhbits.val[0]);
  4840. q3h.val[1] = vbicq_u8(m2, qhbits.val[1]);
  4841. q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1);
  4842. q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1);
  4843. q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0]));
  4844. q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1]));
  4845. q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
  4846. q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
  4847. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0];
  4848. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1];
  4849. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2];
  4850. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3];
  4851. scale += 4;
  4852. if (j == 0) {
  4853. qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4);
  4854. qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4);
  4855. }
  4856. }
  4857. sum += d * isum;
  4858. }
  4859. *s = sum;
  4860. #elif defined __AVX2__
  4861. const __m256i m3 = _mm256_set1_epi8(3);
  4862. const __m256i mone = _mm256_set1_epi8(1);
  4863. const __m128i m32 = _mm_set1_epi8(32);
  4864. __m256 acc = _mm256_setzero_ps();
  4865. uint32_t aux[3];
  4866. for (int i = 0; i < nb; ++i) {
  4867. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4868. const uint8_t * restrict q3 = x[i].qs;
  4869. const int8_t * restrict q8 = y[i].qs;
  4870. // Set up scales
  4871. memcpy(aux, x[i].scales, 12);
  4872. __m128i scales128 = _mm_set_epi32(
  4873. ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
  4874. ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
  4875. (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
  4876. (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
  4877. scales128 = _mm_sub_epi8(scales128, m32);
  4878. const __m256i all_scales = _mm256_cvtepi8_epi16(scales128);
  4879. const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
  4880. const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
  4881. const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
  4882. // high bit
  4883. const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask);
  4884. // integer accumulator
  4885. __m256i sumi = _mm256_setzero_si256();
  4886. int bit = 0;
  4887. int is = 0;
  4888. for (int j = 0; j < QK_K/128; ++j) {
  4889. // load low 2 bits
  4890. const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32;
  4891. // prepare low and high bits
  4892. const __m256i q3l_0 = _mm256_and_si256(q3bits, m3);
  4893. const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4894. ++bit;
  4895. const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3);
  4896. const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4897. ++bit;
  4898. const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3);
  4899. const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4900. ++bit;
  4901. const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3);
  4902. const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
  4903. ++bit;
  4904. // load Q8 quants
  4905. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4906. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4907. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4908. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  4909. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  4910. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  4911. // and 2 if the high bit was set)
  4912. __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
  4913. __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
  4914. __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2);
  4915. __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3);
  4916. __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
  4917. __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
  4918. __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2);
  4919. __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3);
  4920. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  4921. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  4922. p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
  4923. p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
  4924. // multiply with scales
  4925. p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0);
  4926. p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1);
  4927. p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2);
  4928. p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3);
  4929. // accumulate
  4930. p16_0 = _mm256_add_epi32(p16_0, p16_1);
  4931. p16_2 = _mm256_add_epi32(p16_2, p16_3);
  4932. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2));
  4933. }
  4934. // multiply with block scale and accumulate
  4935. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  4936. }
  4937. *s = hsum_float_8(acc);
  4938. #elif defined __AVX__
  4939. const __m128i m3 = _mm_set1_epi8(3);
  4940. const __m128i mone = _mm_set1_epi8(1);
  4941. const __m128i m32 = _mm_set1_epi8(32);
  4942. const __m128i m2 = _mm_set1_epi8(2);
  4943. __m256 acc = _mm256_setzero_ps();
  4944. const uint32_t *aux;
  4945. for (int i = 0; i < nb; ++i) {
  4946. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  4947. const uint8_t * restrict q3 = x[i].qs;
  4948. const int8_t * restrict q8 = y[i].qs;
  4949. // Set up scales
  4950. aux = (const uint32_t *)x[i].scales;
  4951. __m128i scales128 = _mm_set_epi32(
  4952. ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
  4953. ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
  4954. (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
  4955. (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
  4956. scales128 = _mm_sub_epi8(scales128, m32);
  4957. const __m128i scales_0 = _mm_cvtepi8_epi16(scales128);
  4958. const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128));
  4959. const __m128i scales[2] = { scales_0, scales_1 };
  4960. // high bit *128*2 from block_q3_K.hmask[QK_K/8]
  4961. const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]);
  4962. const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]);
  4963. // integer accumulator
  4964. __m128i sumi_0 = _mm_setzero_si128();
  4965. __m128i sumi_1 = _mm_setzero_si128();
  4966. for (int j = 0; j < QK_K/128; ++j) {
  4967. // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4]
  4968. const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
  4969. const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
  4970. // prepare low and high bits
  4971. const int bit = j << 2;
  4972. const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3);
  4973. const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3);
  4974. const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2);
  4975. const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2);
  4976. const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3);
  4977. const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3);
  4978. const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
  4979. const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
  4980. const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3);
  4981. const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3);
  4982. const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
  4983. const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
  4984. const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3);
  4985. const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3);
  4986. const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
  4987. const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
  4988. // load Q8 quants from block_q8_K.qs[QK_K]
  4989. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4990. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4991. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4992. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4993. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4994. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4995. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4996. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  4997. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  4998. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  4999. // and 2 if the high bit was set)
  5000. __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0);
  5001. __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1);
  5002. __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2);
  5003. __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3);
  5004. __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4);
  5005. __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5);
  5006. __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6);
  5007. __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7);
  5008. __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0);
  5009. __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1);
  5010. __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2);
  5011. __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3);
  5012. __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4);
  5013. __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5);
  5014. __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6);
  5015. __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7);
  5016. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  5017. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  5018. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  5019. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  5020. p16_4 = _mm_sub_epi16(p16_4, q8s_4);
  5021. p16_5 = _mm_sub_epi16(p16_5, q8s_5);
  5022. p16_6 = _mm_sub_epi16(p16_6, q8s_6);
  5023. p16_7 = _mm_sub_epi16(p16_7, q8s_7);
  5024. // multiply with scales
  5025. __m128i shuffle = _mm_set1_epi16(0x0100);
  5026. p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0);
  5027. shuffle = _mm_add_epi16(shuffle, m2);
  5028. p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1);
  5029. shuffle = _mm_add_epi16(shuffle, m2);
  5030. p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2);
  5031. shuffle = _mm_add_epi16(shuffle, m2);
  5032. p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3);
  5033. shuffle = _mm_add_epi16(shuffle, m2);
  5034. p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4);
  5035. shuffle = _mm_add_epi16(shuffle, m2);
  5036. p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5);
  5037. shuffle = _mm_add_epi16(shuffle, m2);
  5038. p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6);
  5039. shuffle = _mm_add_epi16(shuffle, m2);
  5040. p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7);
  5041. // accumulate
  5042. p16_0 = _mm_add_epi32(p16_0, p16_1);
  5043. p16_2 = _mm_add_epi32(p16_2, p16_3);
  5044. p16_4 = _mm_add_epi32(p16_4, p16_5);
  5045. p16_6 = _mm_add_epi32(p16_6, p16_7);
  5046. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  5047. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6));
  5048. }
  5049. // multiply with block scale and accumulate
  5050. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  5051. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
  5052. }
  5053. *s = hsum_float_8(acc);
  5054. #elif defined __riscv_v_intrinsic
  5055. uint32_t aux[3];
  5056. uint32_t utmp[4];
  5057. float sumf = 0;
  5058. for (int i = 0; i < nb; ++i) {
  5059. const uint8_t * restrict q3 = x[i].qs;
  5060. const uint8_t * restrict qh = x[i].hmask;
  5061. const int8_t * restrict q8 = y[i].qs;
  5062. memcpy(aux, x[i].scales, 12);
  5063. utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
  5064. utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
  5065. utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
  5066. utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
  5067. int8_t * scale = (int8_t *)utmp;
  5068. for (int j = 0; j < 16; ++j) scale[j] -= 32;
  5069. size_t vl = 32;
  5070. uint8_t m = 1;
  5071. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  5072. vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl);
  5073. int sum_t = 0;
  5074. for (int j = 0; j < QK_K; j += 128) {
  5075. vl = 32;
  5076. // load Q3
  5077. vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl);
  5078. vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl));
  5079. vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03 , vl));
  5080. vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03 , vl));
  5081. vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03 , vl));
  5082. // compute mask for subtraction
  5083. vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl);
  5084. vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl);
  5085. vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_m(vmask_0, q3_0, 0x4, vl);
  5086. m <<= 1;
  5087. vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
  5088. vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl);
  5089. vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_m(vmask_1, q3_1, 0x4, vl);
  5090. m <<= 1;
  5091. vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
  5092. vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl);
  5093. vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_m(vmask_2, q3_2, 0x4, vl);
  5094. m <<= 1;
  5095. vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl);
  5096. vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl);
  5097. vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_m(vmask_3, q3_3, 0x4, vl);
  5098. m <<= 1;
  5099. // load Q8 and take product with Q3
  5100. vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl);
  5101. vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  5102. vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
  5103. vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
  5104. vl = 16;
  5105. // retrieve lane to multiply with scale
  5106. vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl);
  5107. vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl);
  5108. vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl);
  5109. vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl);
  5110. vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl);
  5111. vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl);
  5112. vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl);
  5113. vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl);
  5114. vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl);
  5115. vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl);
  5116. vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl);
  5117. vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl);
  5118. sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
  5119. q3 += 32; q8 += 128; scale += 8;
  5120. }
  5121. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5122. sumf += d*sum_t;
  5123. }
  5124. *s = sumf;
  5125. #elif defined(__POWER9_VECTOR__)
  5126. const vector signed char lowMask = vec_splats((signed char)0x3);
  5127. const vector signed char v1 = vec_splats((signed char)0x1);
  5128. const vector unsigned char v2 = vec_splats((unsigned char)0x2);
  5129. const vector unsigned char v3 = vec_splats((unsigned char)0x3);
  5130. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  5131. const vector unsigned char v6 = vec_splats((unsigned char)0x6);
  5132. const vector signed char off = vec_splats((signed char)0x20);
  5133. vector float vsumf0 = vec_splats(0.0f);
  5134. vector float vsumf1 = vec_splats(0.0f);
  5135. vector float vsumf2 = vec_splats(0.0f);
  5136. vector float vsumf3 = vec_splats(0.0f);
  5137. for (int i = 0; i < nb; ++i) {
  5138. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  5139. vector float vyd = vec_splats(y[i].d);
  5140. vector float vd = vec_mul(vxd, vyd);
  5141. uint32_t aux[3];
  5142. uint32_t utmp[4];
  5143. memcpy(aux, x[i].scales, 12);
  5144. utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
  5145. utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
  5146. utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
  5147. utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
  5148. vector signed char vscales = (vector signed char)vec_xl( 0, utmp);
  5149. vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].hmask);
  5150. vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].hmask);
  5151. vscales = vec_sub(vscales, off);
  5152. vector signed int vsumi0 = vec_splats((int32_t)0);
  5153. vector signed int vsumi1 = vec_splats((int32_t)0);
  5154. vector signed int vsumi2 = vec_splats((int32_t)0);
  5155. vector signed int vsumi3 = vec_splats((int32_t)0);
  5156. vector signed int vsumi4 = vec_splats((int32_t)0);
  5157. vector signed int vsumi5 = vec_splats((int32_t)0);
  5158. vector signed int vsumi6 = vec_splats((int32_t)0);
  5159. vector signed int vsumi7 = vec_splats((int32_t)0);
  5160. const uint8_t * restrict q3 = x[i].qs;
  5161. const int8_t * restrict q8 = y[i].qs;
  5162. for (int j = 0; j < QK_K/128; ++j) {
  5163. __builtin_prefetch(q3, 0, 1);
  5164. __builtin_prefetch(q8, 0, 1);
  5165. vector signed char qxs0 = (vector signed char)vec_xl( 0, q3);
  5166. vector signed char qxs1 = (vector signed char)vec_xl(16, q3);
  5167. q3 += 32;
  5168. //the low 2 bits
  5169. vector signed char qxs00 = vec_and(qxs0, lowMask);
  5170. vector signed char qxs01 = vec_and(vec_sr(qxs0, v2), lowMask);
  5171. vector signed char qxs02 = vec_and(vec_sr(qxs0, v4), lowMask);
  5172. vector signed char qxs03 = vec_and(vec_sr(qxs0, v6), lowMask);
  5173. vector signed char qxs10 = vec_and(qxs1, lowMask);
  5174. vector signed char qxs11 = vec_and(vec_sr(qxs1, v2), lowMask);
  5175. vector signed char qxs12 = vec_and(vec_sr(qxs1, v4), lowMask);
  5176. vector signed char qxs13 = vec_and(vec_sr(qxs1, v6), lowMask);
  5177. //the 3rd bit
  5178. vector signed char qxh00 = vec_sl(vec_andc(v1, qxhs0), v2);
  5179. vector signed char qxh01 = vec_sl(vec_andc(v1, vec_sr(qxhs0, (vector unsigned char)v1)), v2);
  5180. vector signed char qxh02 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v2)), v2);
  5181. vector signed char qxh03 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v3)), v2);
  5182. vector signed char qxh10 = vec_sl(vec_andc(v1, qxhs1), v2);
  5183. vector signed char qxh11 = vec_sl(vec_andc(v1, vec_sr(qxhs1, (vector unsigned char)v1)), v2);
  5184. vector signed char qxh12 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v2)), v2);
  5185. vector signed char qxh13 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v3)), v2);
  5186. qxhs0 = vec_sr(qxhs0, v4);
  5187. qxhs1 = vec_sr(qxhs1, v4);
  5188. vector signed char q3x00 = vec_sub(qxs00, qxh00);
  5189. vector signed char q3x01 = vec_sub(qxs01, qxh01);
  5190. vector signed char q3x02 = vec_sub(qxs02, qxh02);
  5191. vector signed char q3x03 = vec_sub(qxs03, qxh03);
  5192. vector signed char q3x10 = vec_sub(qxs10, qxh10);
  5193. vector signed char q3x11 = vec_sub(qxs11, qxh11);
  5194. vector signed char q3x12 = vec_sub(qxs12, qxh12);
  5195. vector signed char q3x13 = vec_sub(qxs13, qxh13);
  5196. vector signed char q8y00 = vec_xl( 0, q8);
  5197. vector signed char q8y10 = vec_xl( 16, q8);
  5198. vector signed char q8y01 = vec_xl( 32, q8);
  5199. vector signed char q8y11 = vec_xl( 48, q8);
  5200. vector signed char q8y02 = vec_xl( 64, q8);
  5201. vector signed char q8y12 = vec_xl( 80, q8);
  5202. vector signed char q8y03 = vec_xl( 96, q8);
  5203. vector signed char q8y13 = vec_xl(112, q8);
  5204. q8 += 128;
  5205. vector signed short vscales_h = vec_unpackh(vscales);
  5206. vector signed short vs0 = vec_splat(vscales_h, 0);
  5207. vector signed short vs1 = vec_splat(vscales_h, 1);
  5208. vector signed short vs2 = vec_splat(vscales_h, 2);
  5209. vector signed short vs3 = vec_splat(vscales_h, 3);
  5210. vector signed short vs4 = vec_splat(vscales_h, 4);
  5211. vector signed short vs5 = vec_splat(vscales_h, 5);
  5212. vector signed short vs6 = vec_splat(vscales_h, 6);
  5213. vector signed short vs7 = vec_splat(vscales_h, 7);
  5214. vscales = vec_sld(vscales, vscales, 8);
  5215. vector signed short qv00 = vec_add(vec_mule(q3x00, q8y00), vec_mulo(q3x00, q8y00));
  5216. vector signed short qv01 = vec_add(vec_mule(q3x01, q8y01), vec_mulo(q3x01, q8y01));
  5217. vector signed short qv02 = vec_add(vec_mule(q3x02, q8y02), vec_mulo(q3x02, q8y02));
  5218. vector signed short qv03 = vec_add(vec_mule(q3x03, q8y03), vec_mulo(q3x03, q8y03));
  5219. vector signed short qv10 = vec_add(vec_mule(q3x10, q8y10), vec_mulo(q3x10, q8y10));
  5220. vector signed short qv11 = vec_add(vec_mule(q3x11, q8y11), vec_mulo(q3x11, q8y11));
  5221. vector signed short qv12 = vec_add(vec_mule(q3x12, q8y12), vec_mulo(q3x12, q8y12));
  5222. vector signed short qv13 = vec_add(vec_mule(q3x13, q8y13), vec_mulo(q3x13, q8y13));
  5223. vector signed int vsum0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0));
  5224. vector signed int vsum1 = vec_add(vec_mule(qv01, vs2), vec_mulo(qv01, vs2));
  5225. vector signed int vsum2 = vec_add(vec_mule(qv02, vs4), vec_mulo(qv02, vs4));
  5226. vector signed int vsum3 = vec_add(vec_mule(qv03, vs6), vec_mulo(qv03, vs6));
  5227. vector signed int vsum4 = vec_add(vec_mule(qv10, vs1), vec_mulo(qv10, vs1));
  5228. vector signed int vsum5 = vec_add(vec_mule(qv11, vs3), vec_mulo(qv11, vs3));
  5229. vector signed int vsum6 = vec_add(vec_mule(qv12, vs5), vec_mulo(qv12, vs5));
  5230. vector signed int vsum7 = vec_add(vec_mule(qv13, vs7), vec_mulo(qv13, vs7));
  5231. vsumi0 = vec_add(vsum0, vsumi0);
  5232. vsumi1 = vec_add(vsum1, vsumi1);
  5233. vsumi2 = vec_add(vsum2, vsumi2);
  5234. vsumi3 = vec_add(vsum3, vsumi3);
  5235. vsumi4 = vec_add(vsum4, vsumi4);
  5236. vsumi5 = vec_add(vsum5, vsumi5);
  5237. vsumi6 = vec_add(vsum6, vsumi6);
  5238. vsumi7 = vec_add(vsum7, vsumi7);
  5239. }
  5240. vsumi0 = vec_add(vsumi0, vsumi4);
  5241. vsumi1 = vec_add(vsumi1, vsumi5);
  5242. vsumi2 = vec_add(vsumi2, vsumi6);
  5243. vsumi3 = vec_add(vsumi3, vsumi7);
  5244. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  5245. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  5246. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  5247. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  5248. }
  5249. vsumf0 = vec_add(vsumf0, vsumf2);
  5250. vsumf1 = vec_add(vsumf1, vsumf3);
  5251. vsumf0 = vec_add(vsumf0, vsumf1);
  5252. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  5253. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  5254. *s = vec_extract(vsumf0, 0);
  5255. #else
  5256. // scalar version
  5257. // This function is written like this so the compiler can manage to vectorize most of it
  5258. // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
  5259. // manually vectorized version above. Every other version I tried would run at least 4 times slower.
  5260. // The ideal situation would be if we could just write the code once, and the compiler would
  5261. // automatically produce the best possible set of machine instructions, instead of us having to manually
  5262. // write vectorized versions for AVX, ARM_NEON, etc.
  5263. int8_t aux8[QK_K];
  5264. int16_t aux16[8];
  5265. float sums [8];
  5266. int32_t aux32[8];
  5267. memset(sums, 0, 8*sizeof(float));
  5268. uint32_t auxs[4];
  5269. const int8_t * scales = (const int8_t*)auxs;
  5270. float sumf = 0;
  5271. for (int i = 0; i < nb; ++i) {
  5272. const uint8_t * restrict q3 = x[i].qs;
  5273. const uint8_t * restrict hm = x[i].hmask;
  5274. const int8_t * restrict q8 = y[i].qs;
  5275. memset(aux32, 0, 8*sizeof(int32_t));
  5276. int8_t * restrict a = aux8;
  5277. uint8_t m = 1;
  5278. for (int j = 0; j < QK_K; j += 128) {
  5279. for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
  5280. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  5281. a += 32; m <<= 1;
  5282. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
  5283. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  5284. a += 32; m <<= 1;
  5285. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
  5286. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  5287. a += 32; m <<= 1;
  5288. for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
  5289. for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
  5290. a += 32; m <<= 1;
  5291. q3 += 32;
  5292. }
  5293. a = aux8;
  5294. memcpy(auxs, x[i].scales, 12);
  5295. uint32_t tmp = auxs[2];
  5296. auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
  5297. auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
  5298. auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
  5299. auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
  5300. for (int j = 0; j < QK_K/16; ++j) {
  5301. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5302. for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
  5303. q8 += 8; a += 8;
  5304. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5305. for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
  5306. q8 += 8; a += 8;
  5307. }
  5308. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5309. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  5310. }
  5311. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5312. *s = sumf;
  5313. #endif
  5314. }
  5315. #else
  5316. void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  5317. assert(n % QK_K == 0);
  5318. assert(nrc == 1);
  5319. UNUSED(nrc);
  5320. UNUSED(bx);
  5321. UNUSED(by);
  5322. UNUSED(bs);
  5323. const block_q3_K * restrict x = vx;
  5324. const block_q8_K * restrict y = vy;
  5325. const int nb = n / QK_K;
  5326. #ifdef __ARM_NEON
  5327. const int32x4_t vzero = vdupq_n_s32(0);
  5328. const uint8x16_t m3b = vdupq_n_u8(0x3);
  5329. const uint8x16_t mh = vdupq_n_u8(4);
  5330. ggml_int8x16x4_t q3bytes;
  5331. uint16_t aux16[2];
  5332. int8_t * scales = (int8_t *)aux16;
  5333. float sum = 0;
  5334. for (int i = 0; i < nb; ++i) {
  5335. ggml_uint8x16x4_t q3h;
  5336. const uint8x8_t hbits = vld1_u8(x[i].hmask);
  5337. const uint8x16_t q3bits = vld1q_u8(x[i].qs);
  5338. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(y[i].qs);
  5339. const uint16_t a = *(const uint16_t *)x[i].scales;
  5340. aux16[0] = a & 0x0f0f;
  5341. aux16[1] = (a >> 4) & 0x0f0f;
  5342. for (int j = 0; j < 4; ++j) scales[j] -= 8;
  5343. int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
  5344. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5345. const uint8x16_t htmp = vcombine_u8(hbits, vshr_n_u8(hbits, 1));
  5346. q3h.val[0] = vandq_u8(mh, vshlq_n_u8(htmp, 2));
  5347. q3h.val[1] = vandq_u8(mh, htmp);
  5348. q3h.val[2] = vandq_u8(mh, vshrq_n_u8(htmp, 2));
  5349. q3h.val[3] = vandq_u8(mh, vshrq_n_u8(htmp, 4));
  5350. q3bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q3bits, m3b), q3h.val[0]));
  5351. q3bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 2), m3b), q3h.val[1]));
  5352. q3bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 4), m3b), q3h.val[2]));
  5353. q3bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q3bits, 6), q3h.val[3]));
  5354. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes.val[0])) * scales[0];
  5355. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes.val[1])) * scales[2];
  5356. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes.val[2])) * scales[1];
  5357. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes.val[3])) * scales[3];
  5358. sum += d * isum;
  5359. }
  5360. *s = sum;
  5361. #elif defined __AVX2__
  5362. const __m256i m3 = _mm256_set1_epi8(3);
  5363. const __m256i m1 = _mm256_set1_epi8(1);
  5364. __m256 acc = _mm256_setzero_ps();
  5365. uint64_t aux64;
  5366. uint16_t aux16[2];
  5367. const int8_t * aux8 = (const int8_t *)aux16;
  5368. for (int i = 0; i < nb; ++i) {
  5369. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5370. const uint8_t * restrict q3 = x[i].qs;
  5371. const int8_t * restrict q8 = y[i].qs;
  5372. const uint16_t a = *(const uint16_t *)x[i].scales;
  5373. aux16[0] = a & 0x0f0f;
  5374. aux16[1] = (a >> 4) & 0x0f0f;
  5375. const __m256i scale_0 = MM256_SET_M128I(_mm_set1_epi16(aux8[2] - 8), _mm_set1_epi16(aux8[0] - 8));
  5376. const __m256i scale_1 = MM256_SET_M128I(_mm_set1_epi16(aux8[3] - 8), _mm_set1_epi16(aux8[1] - 8));
  5377. memcpy(&aux64, x[i].hmask, 8);
  5378. const __m128i haux = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
  5379. __m256i q3h_0 = MM256_SET_M128I(_mm_srli_epi16(haux, 2), haux);
  5380. __m256i q3h_1 = _mm256_srli_epi16(q3h_0, 4);
  5381. q3h_0 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_0, m1), 2);
  5382. q3h_1 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_1, m1), 2);
  5383. // load low 2 bits
  5384. const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
  5385. // prepare low and high bits
  5386. const __m256i q3aux = MM256_SET_M128I(_mm_srli_epi16(q3bits, 2), q3bits);
  5387. const __m256i q3l_0 = _mm256_and_si256(q3aux, m3);
  5388. const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3aux, 4), m3);
  5389. // load Q8 quants
  5390. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5391. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5392. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
  5393. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  5394. // and 2 if the high bit was set)
  5395. const __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
  5396. const __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
  5397. __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
  5398. __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
  5399. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  5400. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  5401. // multiply with scales
  5402. p16_0 = _mm256_madd_epi16(scale_0, p16_0);
  5403. p16_1 = _mm256_madd_epi16(scale_1, p16_1);
  5404. p16_0 = _mm256_add_epi32(p16_0, p16_1);
  5405. // multiply with block scale and accumulate
  5406. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16_0), acc);
  5407. }
  5408. *s = hsum_float_8(acc);
  5409. #elif defined __AVX__
  5410. const __m128i m3 = _mm_set1_epi8(3);
  5411. const __m128i m1 = _mm_set1_epi8(1);
  5412. __m256 acc = _mm256_setzero_ps();
  5413. uint64_t aux64;
  5414. uint16_t aux16[2];
  5415. const int8_t * aux8 = (const int8_t *)aux16;
  5416. for (int i = 0; i < nb; ++i) {
  5417. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5418. const uint8_t * restrict q3 = x[i].qs;
  5419. const int8_t * restrict q8 = y[i].qs;
  5420. const uint16_t a = *(const uint16_t *)x[i].scales;
  5421. aux16[0] = a & 0x0f0f;
  5422. aux16[1] = (a >> 4) & 0x0f0f;
  5423. const __m128i scale_0 = _mm_set1_epi16(aux8[0] - 8);
  5424. const __m128i scale_1 = _mm_set1_epi16(aux8[2] - 8);
  5425. const __m128i scale_2 = _mm_set1_epi16(aux8[1] - 8);
  5426. const __m128i scale_3 = _mm_set1_epi16(aux8[3] - 8);
  5427. memcpy(&aux64, x[i].hmask, 8);
  5428. __m128i q3h_0 = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
  5429. __m128i q3h_1 = _mm_srli_epi16(q3h_0, 2);
  5430. __m128i q3h_2 = _mm_srli_epi16(q3h_0, 4);
  5431. __m128i q3h_3 = _mm_srli_epi16(q3h_0, 6);
  5432. q3h_0 = _mm_slli_epi16(_mm_andnot_si128(q3h_0, m1), 2);
  5433. q3h_1 = _mm_slli_epi16(_mm_andnot_si128(q3h_1, m1), 2);
  5434. q3h_2 = _mm_slli_epi16(_mm_andnot_si128(q3h_2, m1), 2);
  5435. q3h_3 = _mm_slli_epi16(_mm_andnot_si128(q3h_3, m1), 2);
  5436. // load low 2 bits
  5437. const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
  5438. // prepare low and high bits
  5439. const __m128i q3l_0 = _mm_and_si128(q3bits, m3);
  5440. const __m128i q3l_1 = _mm_and_si128(_mm_srli_epi16(q3bits, 2), m3);
  5441. const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits, 4), m3);
  5442. const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits, 6), m3);
  5443. // load Q8 quants
  5444. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  5445. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  5446. // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm_maddubs_epi16,
  5447. // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
  5448. // and 2 if the high bit was set)
  5449. const __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, _mm256_extractf128_si256(q8_0, 0));
  5450. const __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, _mm256_extractf128_si256(q8_0, 1));
  5451. const __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, _mm256_extractf128_si256(q8_1, 0));
  5452. const __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, _mm256_extractf128_si256(q8_1, 1));
  5453. __m128i p16_0 = _mm_maddubs_epi16(q3l_0, _mm256_extractf128_si256(q8_0, 0));
  5454. __m128i p16_1 = _mm_maddubs_epi16(q3l_1, _mm256_extractf128_si256(q8_0, 1));
  5455. __m128i p16_2 = _mm_maddubs_epi16(q3l_2, _mm256_extractf128_si256(q8_1, 0));
  5456. __m128i p16_3 = _mm_maddubs_epi16(q3l_3, _mm256_extractf128_si256(q8_1, 1));
  5457. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  5458. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  5459. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  5460. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  5461. // multiply with scales
  5462. p16_0 = _mm_madd_epi16(scale_0, p16_0);
  5463. p16_1 = _mm_madd_epi16(scale_1, p16_1);
  5464. p16_2 = _mm_madd_epi16(scale_2, p16_2);
  5465. p16_3 = _mm_madd_epi16(scale_3, p16_3);
  5466. p16_0 = _mm_add_epi32(p16_0, p16_2);
  5467. p16_1 = _mm_add_epi32(p16_1, p16_3);
  5468. __m256i p16 = MM256_SET_M128I(p16_1, p16_0);
  5469. // multiply with block scale and accumulate
  5470. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16)), acc);
  5471. }
  5472. *s = hsum_float_8(acc);
  5473. #elif defined __riscv_v_intrinsic
  5474. uint16_t aux16[2];
  5475. int8_t * scales = (int8_t *)aux16;
  5476. float sumf = 0;
  5477. for (int i = 0; i < nb; ++i) {
  5478. const uint8_t * restrict q3 = x[i].qs;
  5479. const int8_t * restrict q8 = y[i].qs;
  5480. const uint16_t a = *(const uint16_t *)x[i].scales;
  5481. aux16[0] = a & 0x0f0f;
  5482. aux16[1] = (a >> 4) & 0x0f0f;
  5483. for (int j = 0; j < 4; ++j) scales[j] -= 8;
  5484. int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
  5485. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5486. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  5487. // load qh
  5488. vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(x[i].hmask, 8);
  5489. vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
  5490. size_t vl = 16;
  5491. // extend and combine both qh_x1 and qh_x2
  5492. vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
  5493. vuint8mf2_t qh_0 = __riscv_vand_vx_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
  5494. vuint8mf2_t qh_1 = __riscv_vand_vx_u8mf2(qh_x, 0x4, vl);
  5495. vuint8mf2_t qh_2 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
  5496. vuint8mf2_t qh_3 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), 0x4, vl);
  5497. // load Q3
  5498. vuint8mf2_t q3_x = __riscv_vle8_v_u8mf2(q3, vl);
  5499. vuint8mf2_t q3h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q3_x, 0x3, vl), qh_0, vl);
  5500. vuint8mf2_t q3h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 2, vl), 0x3, vl), qh_1, vl);
  5501. vuint8mf2_t q3h_2 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 4, vl), 0x3, vl), qh_2, vl);
  5502. vuint8mf2_t q3h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 0x6, vl), qh_3, vl);
  5503. vint8mf2_t q3_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_0);
  5504. vint8mf2_t q3_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_1);
  5505. vint8mf2_t q3_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_2);
  5506. vint8mf2_t q3_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_3);
  5507. // load Q8 and take product with Q3
  5508. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q3_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  5509. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q3_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  5510. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q3_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  5511. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q3_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  5512. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  5513. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  5514. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  5515. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  5516. isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scales[0];
  5517. isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scales[2];
  5518. isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scales[1];
  5519. isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scales[3];
  5520. sumf += d * isum;
  5521. }
  5522. *s = sumf;
  5523. #elif defined(__POWER9_VECTOR__)
  5524. const vector signed char lowMask = vec_splats((signed char)0x3);
  5525. const vector signed char v1 = vec_splats((signed char)0x1);
  5526. const vector unsigned char v2 = vec_splats((unsigned char)0x2);
  5527. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  5528. const vector unsigned char v6 = vec_splats((unsigned char)0x6);
  5529. const vector signed char off = vec_splats((signed char)0x8);
  5530. vector float vsumf0 = vec_splats(0.0f);
  5531. vector float vsumf1 = vec_splats(0.0f);
  5532. vector float vsumf2 = vec_splats(0.0f);
  5533. vector float vsumf3 = vec_splats(0.0f);
  5534. #pragma GCC unroll 2
  5535. for (int i = 0; i < nb; ++i) {
  5536. __builtin_prefetch(x[i].qs, 0, 1);
  5537. __builtin_prefetch(y[i].qs, 0, 1);
  5538. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  5539. vector float vyd = vec_splats(y[i].d);
  5540. vector float vd = vec_mul(vxd, vyd);
  5541. uint16_t aux16[2];
  5542. int8_t * scales = (int8_t *)aux16;
  5543. const uint16_t a = *(const uint16_t *)x[i].scales;
  5544. aux16[0] = a & 0x0f0f;
  5545. aux16[1] = (a >> 4) & 0x0f0f;
  5546. vector signed char vscales = (vector signed char)vec_xl_len(scales, 8);
  5547. vector signed char qxhs0 = (vector signed char)vec_xl_len(x[i].hmask, 8);
  5548. qxhs0 = vec_or(qxhs0, vec_sr(vec_sld(qxhs0, qxhs0, 8), (vector unsigned char)v1));
  5549. vscales = vec_sub(vscales, off);
  5550. vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].qs);
  5551. vector signed char qxs00 = vec_and(qxs0, lowMask);
  5552. vector signed char qxs01 = vec_and(vec_sr(qxs0, v2), lowMask);
  5553. vector signed char qxs10 = vec_and(vec_sr(qxs0, v4), lowMask);
  5554. vector signed char qxs11 = vec_and(vec_sr(qxs0, v6), lowMask);
  5555. //the 3rd bit
  5556. vector signed char qxh00 = vec_sl(vec_andc(v1, qxhs0), v2);
  5557. vector signed char qxh01 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v2)), v2);
  5558. vector signed char qxh02 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v4)), v2);
  5559. vector signed char qxh03 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v6)), v2);
  5560. qxhs0 = vec_sr(qxhs0, v4);
  5561. vector signed char q3x00 = vec_sub(qxs00, qxh00);
  5562. vector signed char q3x01 = vec_sub(qxs01, qxh01);
  5563. vector signed char q3x10 = vec_sub(qxs10, qxh02);
  5564. vector signed char q3x11 = vec_sub(qxs11, qxh03);
  5565. vector signed char q8y00 = vec_xl( 0, y[i].qs);
  5566. vector signed char q8y01 = vec_xl( 16, y[i].qs);
  5567. vector signed char q8y10 = vec_xl( 32, y[i].qs);
  5568. vector signed char q8y11 = vec_xl( 48, y[i].qs);
  5569. vector signed short vscales_h = vec_unpackh(vscales);
  5570. vector signed short vs0 = vec_splat(vscales_h, 0);
  5571. vector signed short vs1 = vec_splat(vscales_h, 1);
  5572. vector signed short vs2 = vec_splat(vscales_h, 2);
  5573. vector signed short vs3 = vec_splat(vscales_h, 3);
  5574. vector signed short qv00 = vec_add(vec_mule(q3x00, q8y00), vec_mulo(q3x00, q8y00));
  5575. vector signed short qv10 = vec_add(vec_mule(q3x10, q8y10), vec_mulo(q3x10, q8y10));
  5576. vector signed short qv01 = vec_add(vec_mule(q3x01, q8y01), vec_mulo(q3x01, q8y01));
  5577. vector signed short qv11 = vec_add(vec_mule(q3x11, q8y11), vec_mulo(q3x11, q8y11));
  5578. vector signed int vsumi0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0));
  5579. vector signed int vsumi1 = vec_add(vec_mule(qv10, vs1), vec_mulo(qv10, vs1));
  5580. vector signed int vsumi2 = vec_add(vec_mule(qv01, vs2), vec_mulo(qv01, vs2));
  5581. vector signed int vsumi3 = vec_add(vec_mule(qv11, vs3), vec_mulo(qv11, vs3));
  5582. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  5583. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  5584. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  5585. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  5586. }
  5587. vsumf0 = vec_add(vsumf0, vsumf2);
  5588. vsumf1 = vec_add(vsumf1, vsumf3);
  5589. vsumf0 = vec_add(vsumf0, vsumf1);
  5590. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  5591. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  5592. *s = vec_extract(vsumf0, 0);
  5593. #else
  5594. int8_t aux8[QK_K];
  5595. int16_t aux16[8];
  5596. float sums [8];
  5597. int32_t aux32[8];
  5598. int32_t scales[4];
  5599. memset(sums, 0, 8*sizeof(float));
  5600. float sumf = 0;
  5601. for (int i = 0; i < nb; ++i) {
  5602. const uint8_t * restrict q3 = x[i].qs;
  5603. const uint8_t * restrict hm = x[i].hmask;
  5604. const int8_t * restrict q8 = y[i].qs;
  5605. int8_t * restrict a = aux8;
  5606. for (int l = 0; l < 8; ++l) {
  5607. a[l+ 0] = (int8_t)((q3[l+0] >> 0) & 3) - (hm[l] & 0x01 ? 0 : 4);
  5608. a[l+ 8] = (int8_t)((q3[l+8] >> 0) & 3) - (hm[l] & 0x02 ? 0 : 4);
  5609. a[l+16] = (int8_t)((q3[l+0] >> 2) & 3) - (hm[l] & 0x04 ? 0 : 4);
  5610. a[l+24] = (int8_t)((q3[l+8] >> 2) & 3) - (hm[l] & 0x08 ? 0 : 4);
  5611. a[l+32] = (int8_t)((q3[l+0] >> 4) & 3) - (hm[l] & 0x10 ? 0 : 4);
  5612. a[l+40] = (int8_t)((q3[l+8] >> 4) & 3) - (hm[l] & 0x20 ? 0 : 4);
  5613. a[l+48] = (int8_t)((q3[l+0] >> 6) & 3) - (hm[l] & 0x40 ? 0 : 4);
  5614. a[l+56] = (int8_t)((q3[l+8] >> 6) & 3) - (hm[l] & 0x80 ? 0 : 4);
  5615. }
  5616. scales[0] = (x[i].scales[0] & 0xF) - 8;
  5617. scales[1] = (x[i].scales[0] >> 4) - 8;
  5618. scales[2] = (x[i].scales[1] & 0xF) - 8;
  5619. scales[3] = (x[i].scales[1] >> 4) - 8;
  5620. memset(aux32, 0, 8*sizeof(int32_t));
  5621. for (int j = 0; j < QK_K/16; ++j) {
  5622. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5623. q8 += 8; a += 8;
  5624. for (int l = 0; l < 8; ++l) aux16[l] += q8[l] * a[l];
  5625. q8 += 8; a += 8;
  5626. for (int l = 0; l < 8; ++l) aux32[l] += scales[j] * aux16[l];
  5627. }
  5628. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  5629. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  5630. }
  5631. for (int l = 0; l < 8; ++l) sumf += sums[l];
  5632. *s = sumf;
  5633. #endif
  5634. }
  5635. #endif
  5636. #if QK_K == 256
  5637. void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  5638. assert(n % QK_K == 0);
  5639. assert(nrc == 1);
  5640. UNUSED(nrc);
  5641. UNUSED(bx);
  5642. UNUSED(by);
  5643. UNUSED(bs);
  5644. const block_q4_K * restrict x = vx;
  5645. const block_q8_K * restrict y = vy;
  5646. const int nb = n / QK_K;
  5647. static const uint32_t kmask1 = 0x3f3f3f3f;
  5648. static const uint32_t kmask2 = 0x0f0f0f0f;
  5649. static const uint32_t kmask3 = 0x03030303;
  5650. uint32_t utmp[4];
  5651. #ifdef __ARM_NEON
  5652. const uint8x16_t m4b = vdupq_n_u8(0xf);
  5653. const int32x4_t mzero = vdupq_n_s32(0);
  5654. ggml_int8x16x2_t q4bytes;
  5655. ggml_int8x16x2_t q8bytes;
  5656. float sumf = 0;
  5657. for (int i = 0; i < nb; ++i) {
  5658. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5659. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5660. const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
  5661. memcpy(utmp, x[i].scales, 12);
  5662. uint32x2_t mins8 = { 0 };
  5663. mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0);
  5664. mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1);
  5665. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5666. utmp[0] &= kmask1;
  5667. const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8)));
  5668. const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
  5669. vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
  5670. sumf -= dmin * vaddvq_s32(prod);
  5671. const uint8_t * scales = (const uint8_t *)utmp;
  5672. const uint8_t * restrict q4 = x[i].qs;
  5673. const int8_t * restrict q8 = y[i].qs;
  5674. int32_t sumi1 = 0;
  5675. int32_t sumi2 = 0;
  5676. for (int j = 0; j < QK_K/64; ++j) {
  5677. const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32;
  5678. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  5679. q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
  5680. q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
  5681. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  5682. sumi1 += vaddvq_s32(p1) * scales[2*j+0];
  5683. q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
  5684. q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
  5685. q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
  5686. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  5687. sumi2 += vaddvq_s32(p2) * scales[2*j+1];
  5688. }
  5689. sumf += d * (sumi1 + sumi2);
  5690. }
  5691. *s = sumf;
  5692. #elif defined __AVX2__
  5693. const __m256i m4 = _mm256_set1_epi8(0xF);
  5694. __m256 acc = _mm256_setzero_ps();
  5695. __m128 acc_m = _mm_setzero_ps();
  5696. for (int i = 0; i < nb; ++i) {
  5697. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5698. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5699. memcpy(utmp, x[i].scales, 12);
  5700. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5701. const uint32_t uaux = utmp[1] & kmask1;
  5702. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5703. utmp[2] = uaux;
  5704. utmp[0] &= kmask1;
  5705. const uint8_t * restrict q4 = x[i].qs;
  5706. const int8_t * restrict q8 = y[i].qs;
  5707. const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
  5708. const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
  5709. const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
  5710. const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
  5711. acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m);
  5712. const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
  5713. const __m256i scales = MM256_SET_M128I(sc128, sc128);
  5714. __m256i sumi = _mm256_setzero_si256();
  5715. for (int j = 0; j < QK_K/64; ++j) {
  5716. const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
  5717. const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
  5718. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  5719. const __m256i q4l = _mm256_and_si256(q4bits, m4);
  5720. const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
  5721. const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5722. __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
  5723. p16l = _mm256_madd_epi16(scale_l, p16l);
  5724. const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  5725. __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
  5726. p16h = _mm256_madd_epi16(scale_h, p16h);
  5727. const __m256i sumj = _mm256_add_epi32(p16l, p16h);
  5728. sumi = _mm256_add_epi32(sumi, sumj);
  5729. }
  5730. __m256 vd = _mm256_set1_ps(d);
  5731. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
  5732. }
  5733. acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
  5734. acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
  5735. *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
  5736. #elif defined __AVX__
  5737. const __m128i m4 = _mm_set1_epi8(0xF);
  5738. const __m128i m2 = _mm_set1_epi8(0x2);
  5739. __m256 acc = _mm256_setzero_ps();
  5740. __m128 acc_m = _mm_setzero_ps();
  5741. for (int i = 0; i < nb; ++i) {
  5742. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5743. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5744. const uint8_t * restrict q4 = x[i].qs;
  5745. const int8_t * restrict q8 = y[i].qs;
  5746. memcpy(utmp, x[i].scales, 12);
  5747. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5748. const uint32_t uaux = utmp[1] & kmask1;
  5749. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5750. utmp[2] = uaux;
  5751. utmp[0] &= kmask1;
  5752. const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
  5753. const __m128i scales = _mm_cvtepu8_epi16(utmps);
  5754. const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
  5755. const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
  5756. const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
  5757. const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
  5758. const __m128i prod = _mm_madd_epi16(mins, q8s);
  5759. acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m);
  5760. __m128i sumi_0 = _mm_setzero_si128();
  5761. __m128i sumi_1 = _mm_setzero_si128();
  5762. __m128i shuffle = _mm_set1_epi16(0x0100);
  5763. for (int j = 0; j < QK_K/64; ++j) {
  5764. const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle);
  5765. shuffle = _mm_add_epi16(shuffle, m2);
  5766. const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle);
  5767. shuffle = _mm_add_epi16(shuffle, m2);
  5768. __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  5769. const __m128i q4l_0 = _mm_and_si128(q4bits, m4);
  5770. const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
  5771. q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  5772. const __m128i q4l_1 = _mm_and_si128(q4bits, m4);
  5773. const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
  5774. const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5775. __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0);
  5776. p16l = _mm_madd_epi16(scale_l, p16l);
  5777. sumi_0 = _mm_add_epi32(sumi_0, p16l);
  5778. const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5779. p16l = _mm_maddubs_epi16(q4l_1, q8l_1);
  5780. p16l = _mm_madd_epi16(scale_l, p16l);
  5781. sumi_1 = _mm_add_epi32(sumi_1, p16l);
  5782. const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5783. __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0);
  5784. p16h = _mm_madd_epi16(scale_h, p16h);
  5785. sumi_0 = _mm_add_epi32(sumi_0, p16h);
  5786. const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  5787. p16h = _mm_maddubs_epi16(q4h_1, q8h_1);
  5788. p16h = _mm_madd_epi16(scale_h, p16h);
  5789. sumi_1 = _mm_add_epi32(sumi_1, p16h);
  5790. }
  5791. __m256 vd = _mm256_set1_ps(d);
  5792. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  5793. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
  5794. }
  5795. acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
  5796. acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
  5797. *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
  5798. #elif defined __riscv_v_intrinsic
  5799. const uint8_t * scales = (const uint8_t*)&utmp[0];
  5800. const uint8_t * mins = (const uint8_t*)&utmp[2];
  5801. float sumf = 0;
  5802. for (int i = 0; i < nb; ++i) {
  5803. size_t vl = 8;
  5804. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  5805. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  5806. vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
  5807. vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
  5808. vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
  5809. memcpy(utmp, x[i].scales, 12);
  5810. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5811. const uint32_t uaux = utmp[1] & kmask1;
  5812. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5813. utmp[2] = uaux;
  5814. utmp[0] &= kmask1;
  5815. vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
  5816. vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
  5817. vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
  5818. vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  5819. sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
  5820. const uint8_t * restrict q4 = x[i].qs;
  5821. const int8_t * restrict q8 = y[i].qs;
  5822. vl = 32;
  5823. int32_t sum_1 = 0;
  5824. int32_t sum_2 = 0;
  5825. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  5826. for (int j = 0; j < QK_K/64; ++j) {
  5827. // load Q4
  5828. vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
  5829. // load Q8 and multiply it with lower Q4 nibble
  5830. vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
  5831. vint8m1_t q4_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
  5832. vint16m2_t qv_0 = __riscv_vwmul_vv_i16m2(q4_0, q8_0, vl);
  5833. vint16m1_t vs_0 = __riscv_vredsum_vs_i16m2_i16m1(qv_0, vzero, vl);
  5834. sum_1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[2*j+0];
  5835. // load Q8 and multiply it with upper Q4 nibble
  5836. vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
  5837. vint8m1_t q4_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
  5838. vint16m2_t qv_1 = __riscv_vwmul_vv_i16m2(q4_1, q8_1, vl);
  5839. vint16m1_t vs_1 = __riscv_vredsum_vs_i16m2_i16m1(qv_1, vzero, vl);
  5840. sum_2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[2*j+1];
  5841. q4 += 32; q8 += 64;
  5842. }
  5843. sumf += d*(sum_1 + sum_2);
  5844. }
  5845. *s = sumf;
  5846. #elif defined(__POWER9_VECTOR__)
  5847. const vector signed char lowMask = vec_splats((signed char)0xF);
  5848. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  5849. vector float vsumf0 = vec_splats(0.0f);
  5850. vector float vsumf1 = vec_splats(0.0f);
  5851. vector float vsumf2 = vec_splats(0.0f);
  5852. vector float vsumf3 = vec_splats(0.0f);
  5853. for (int i = 0; i < nb; ++i) {
  5854. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  5855. vector float vyd = vec_splats(y[i].d);
  5856. vector float vd = vec_mul(vxd, vyd);
  5857. vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin));
  5858. vector float vdmin = vec_mul(vxmin, vyd);
  5859. vector signed short q8ysums0 = vec_xl( 0, y[i].bsums);
  5860. vector signed short q8ysums1 = vec_xl(16, y[i].bsums);
  5861. memcpy(utmp, x[i].scales, 12);
  5862. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5863. const uint32_t uaux = utmp[1] & kmask1;
  5864. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5865. utmp[2] = uaux;
  5866. utmp[0] &= kmask1;
  5867. vector signed char utmps = (vector signed char)vec_xl( 0, utmp);
  5868. vector signed short vscales = vec_unpackh(utmps);
  5869. vector signed short q4xmins = vec_unpackl(utmps);
  5870. vector signed short q4xmins0 = vec_mergeh(q4xmins, q4xmins);
  5871. vector signed short q4xmins1 = vec_mergel(q4xmins, q4xmins);
  5872. vector signed int prod0 = vec_mule(q4xmins0, q8ysums0);
  5873. vector signed int prod1 = vec_mule(q4xmins1, q8ysums1);
  5874. vector signed int prod2 = vec_mulo(q4xmins0, q8ysums0);
  5875. vector signed int prod3 = vec_mulo(q4xmins1, q8ysums1);
  5876. vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0);
  5877. vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1);
  5878. vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2);
  5879. vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3);
  5880. vector signed int vsumi0 = vec_splats((int32_t)0);
  5881. vector signed int vsumi1 = vec_splats((int32_t)0);
  5882. vector signed int vsumi2 = vec_splats((int32_t)0);
  5883. vector signed int vsumi3 = vec_splats((int32_t)0);
  5884. vector signed int vsumi4 = vec_splats((int32_t)0);
  5885. vector signed int vsumi5 = vec_splats((int32_t)0);
  5886. vector signed int vsumi6 = vec_splats((int32_t)0);
  5887. vector signed int vsumi7 = vec_splats((int32_t)0);
  5888. const uint8_t * restrict q4 = x[i].qs;
  5889. const int8_t * restrict q8 = y[i].qs;
  5890. for (int j = 0; j < QK_K/64; j+=2) {
  5891. __builtin_prefetch(q4, 0, 1);
  5892. __builtin_prefetch(q8, 0, 1);
  5893. vector signed char qxs0 = (vector signed char)vec_xl( 0, q4);
  5894. vector signed char qxs1 = (vector signed char)vec_xl(16, q4);
  5895. vector signed char qxs2 = (vector signed char)vec_xl(32, q4);
  5896. vector signed char qxs3 = (vector signed char)vec_xl(48, q4);
  5897. q4 += 64;
  5898. vector signed char q4x00 = vec_and(qxs0, lowMask);
  5899. vector signed char q4x01 = vec_sr(qxs0, v4);
  5900. vector signed char q4x10 = vec_and(qxs1, lowMask);
  5901. vector signed char q4x11 = vec_sr(qxs1, v4);
  5902. vector signed char q4x20 = vec_and(qxs2, lowMask);
  5903. vector signed char q4x21 = vec_sr(qxs2, v4);
  5904. vector signed char q4x30 = vec_and(qxs3, lowMask);
  5905. vector signed char q4x31 = vec_sr(qxs3, v4);
  5906. vector signed char q8y00 = vec_xl( 0, q8);
  5907. vector signed char q8y10 = vec_xl( 16, q8);
  5908. vector signed char q8y01 = vec_xl( 32, q8);
  5909. vector signed char q8y11 = vec_xl( 48, q8);
  5910. vector signed char q8y20 = vec_xl( 64, q8);
  5911. vector signed char q8y30 = vec_xl( 80, q8);
  5912. vector signed char q8y21 = vec_xl( 96, q8);
  5913. vector signed char q8y31 = vec_xl(112, q8);
  5914. q8 += 128;
  5915. vector signed short qv00 = vec_add(vec_mule(q4x00, q8y00), vec_mulo(q4x00, q8y00));
  5916. vector signed short qv01 = vec_add(vec_mule(q4x01, q8y01), vec_mulo(q4x01, q8y01));
  5917. vector signed short qv10 = vec_add(vec_mule(q4x10, q8y10), vec_mulo(q4x10, q8y10));
  5918. vector signed short qv11 = vec_add(vec_mule(q4x11, q8y11), vec_mulo(q4x11, q8y11));
  5919. vector signed short qv20 = vec_add(vec_mule(q4x20, q8y20), vec_mulo(q4x20, q8y20));
  5920. vector signed short qv21 = vec_add(vec_mule(q4x21, q8y21), vec_mulo(q4x21, q8y21));
  5921. vector signed short qv30 = vec_add(vec_mule(q4x30, q8y30), vec_mulo(q4x30, q8y30));
  5922. vector signed short qv31 = vec_add(vec_mule(q4x31, q8y31), vec_mulo(q4x31, q8y31));
  5923. vector signed short vs0 = vec_splat(vscales, 0);
  5924. vector signed short vs1 = vec_splat(vscales, 1);
  5925. vector signed short vs2 = vec_splat(vscales, 2);
  5926. vector signed short vs3 = vec_splat(vscales, 3);
  5927. vscales = vec_sld(vscales, vscales, 8);
  5928. qv00 = vec_add(qv00, qv10);
  5929. qv10 = vec_add(qv01, qv11);
  5930. qv20 = vec_add(qv20, qv30);
  5931. qv30 = vec_add(qv21, qv31);
  5932. vsumi0 = vec_add(vec_mule(qv00, vs0), vsumi0);
  5933. vsumi1 = vec_add(vec_mulo(qv00, vs0), vsumi1);
  5934. vsumi2 = vec_add(vec_mule(qv10, vs1), vsumi2);
  5935. vsumi3 = vec_add(vec_mulo(qv10, vs1), vsumi3);
  5936. vsumi4 = vec_add(vec_mule(qv20, vs2), vsumi4);
  5937. vsumi5 = vec_add(vec_mulo(qv20, vs2), vsumi5);
  5938. vsumi6 = vec_add(vec_mule(qv30, vs3), vsumi6);
  5939. vsumi7 = vec_add(vec_mulo(qv30, vs3), vsumi7);
  5940. }
  5941. vsumi0 = vec_add(vsumi0, vsumi4);
  5942. vsumi1 = vec_add(vsumi1, vsumi5);
  5943. vsumi2 = vec_add(vsumi2, vsumi6);
  5944. vsumi3 = vec_add(vsumi3, vsumi7);
  5945. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  5946. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  5947. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  5948. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  5949. }
  5950. vsumf0 = vec_add(vsumf0, vsumf2);
  5951. vsumf1 = vec_add(vsumf1, vsumf3);
  5952. vsumf0 = vec_add(vsumf0, vsumf1);
  5953. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  5954. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  5955. *s = vec_extract(vsumf0, 0);
  5956. #else
  5957. const uint8_t * scales = (const uint8_t*)&utmp[0];
  5958. const uint8_t * mins = (const uint8_t*)&utmp[2];
  5959. int8_t aux8[QK_K];
  5960. int16_t aux16[8];
  5961. float sums [8];
  5962. int32_t aux32[8];
  5963. memset(sums, 0, 8*sizeof(float));
  5964. float sumf = 0;
  5965. for (int i = 0; i < nb; ++i) {
  5966. const uint8_t * restrict q4 = x[i].qs;
  5967. const int8_t * restrict q8 = y[i].qs;
  5968. memset(aux32, 0, 8*sizeof(int32_t));
  5969. int8_t * restrict a = aux8;
  5970. for (int j = 0; j < QK_K/64; ++j) {
  5971. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
  5972. a += 32;
  5973. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
  5974. a += 32; q4 += 32;
  5975. }
  5976. memcpy(utmp, x[i].scales, 12);
  5977. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  5978. const uint32_t uaux = utmp[1] & kmask1;
  5979. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  5980. utmp[2] = uaux;
  5981. utmp[0] &= kmask1;
  5982. int sumi = 0;
  5983. for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
  5984. a = aux8;
  5985. int is = 0;
  5986. for (int j = 0; j < QK_K/32; ++j) {
  5987. int32_t scale = scales[is++];
  5988. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5989. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5990. q8 += 8; a += 8;
  5991. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5992. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5993. q8 += 8; a += 8;
  5994. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5995. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5996. q8 += 8; a += 8;
  5997. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  5998. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  5999. q8 += 8; a += 8;
  6000. }
  6001. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6002. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  6003. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  6004. sumf -= dmin * sumi;
  6005. }
  6006. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6007. *s = sumf;
  6008. #endif
  6009. }
  6010. #else
  6011. void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6012. assert(n % QK_K == 0);
  6013. assert(nrc == 1);
  6014. UNUSED(nrc);
  6015. UNUSED(bx);
  6016. UNUSED(by);
  6017. UNUSED(bs);
  6018. const block_q4_K * restrict x = vx;
  6019. const block_q8_K * restrict y = vy;
  6020. const int nb = n / QK_K;
  6021. #ifdef __ARM_NEON
  6022. const uint8x16_t m4b = vdupq_n_u8(0xf);
  6023. const int32x4_t mzero = vdupq_n_s32(0);
  6024. float sumf = 0;
  6025. ggml_int8x16x2_t q4bytes;
  6026. ggml_int8x16x4_t q8bytes;
  6027. float sum_mins = 0.f;
  6028. uint16_t aux16[2];
  6029. const uint8_t * restrict scales = (const uint8_t *)aux16;
  6030. for (int i = 0; i < nb; ++i) {
  6031. const uint8_t * restrict q4 = x[i].qs;
  6032. const int8_t * restrict q8 = y[i].qs;
  6033. const uint16_t * restrict a = (const uint16_t *)x[i].scales;
  6034. aux16[0] = a[0] & 0x0f0f;
  6035. aux16[1] = (a[0] >> 4) & 0x0f0f;
  6036. const int32_t summi = scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]);
  6037. sum_mins += y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * summi;
  6038. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
  6039. const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4);
  6040. q8bytes = ggml_vld1q_s8_x4(q8);
  6041. q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
  6042. q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
  6043. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
  6044. const int32_t sumi1 = vaddvq_s32(p1) * scales[0];
  6045. q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
  6046. q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
  6047. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[2]), q4bytes.val[1], q8bytes.val[3]);
  6048. const int32_t sumi2 = vaddvq_s32(p2) * scales[1];
  6049. sumf += d * (sumi1 + sumi2);
  6050. }
  6051. *s = sumf - sum_mins;
  6052. #elif defined __AVX2__
  6053. const __m256i m4 = _mm256_set1_epi8(0xF);
  6054. __m256 acc = _mm256_setzero_ps();
  6055. float summs = 0;
  6056. uint16_t aux16[2];
  6057. const uint8_t * scales = (const uint8_t *)aux16;
  6058. for (int i = 0; i < nb; ++i) {
  6059. const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
  6060. const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
  6061. const __m256 vd = _mm256_set1_ps(d);
  6062. const uint16_t * a = (const uint16_t *)x[i].scales;
  6063. aux16[0] = a[0] & 0x0f0f;
  6064. aux16[1] = (a[0] >> 4) & 0x0f0f;
  6065. summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  6066. const uint8_t * restrict q4 = x[i].qs;
  6067. const int8_t * restrict q8 = y[i].qs;
  6068. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
  6069. const __m256i q4l = _mm256_and_si256(q4bits, m4);
  6070. const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
  6071. const __m256i q8l = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6072. const __m256i q8h = _mm256_loadu_si256((const __m256i*)(q8+32));
  6073. const __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
  6074. const __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
  6075. const __m256i p32l = _mm256_madd_epi16(_mm256_set1_epi16(scales[0]), p16l);
  6076. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32l), acc);
  6077. const __m256i p32h = _mm256_madd_epi16(_mm256_set1_epi16(scales[1]), p16h);
  6078. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32h), acc);
  6079. }
  6080. *s = hsum_float_8(acc) - summs;
  6081. #elif defined __AVX__
  6082. const __m128i m4 = _mm_set1_epi8(0xF);
  6083. __m256 acc = _mm256_setzero_ps();
  6084. float summs = 0;
  6085. uint16_t aux16[2];
  6086. const uint8_t * scales = (const uint8_t *)aux16;
  6087. for (int i = 0; i < nb; ++i) {
  6088. const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
  6089. const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
  6090. const __m256 vd = _mm256_set1_ps(d);
  6091. const uint16_t * a = (const uint16_t *)x[i].scales;
  6092. aux16[0] = a[0] & 0x0f0f;
  6093. aux16[1] = (a[0] >> 4) & 0x0f0f;
  6094. summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  6095. const uint8_t * restrict q4 = x[i].qs;
  6096. const int8_t * restrict q8 = y[i].qs;
  6097. const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
  6098. const __m128i q4bits_0 = _mm256_extractf128_si256(q4bits, 0);
  6099. const __m128i q4bits_1 = _mm256_extractf128_si256(q4bits, 1);
  6100. const __m128i q4_0 = _mm_and_si128(q4bits_0, m4);
  6101. const __m128i q4_1 = _mm_and_si128(q4bits_1, m4);
  6102. const __m128i q4_2 = _mm_and_si128(_mm_srli_epi16(q4bits_0, 4), m4);
  6103. const __m128i q4_3 = _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4);
  6104. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6105. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6106. const __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
  6107. const __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
  6108. const __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
  6109. const __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
  6110. const __m128i p32_0 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_0);
  6111. const __m128i p32_1 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_1);
  6112. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_1, p32_0))), acc);
  6113. const __m128i p32_2 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_2);
  6114. const __m128i p32_3 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_3);
  6115. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_3, p32_2))), acc);
  6116. }
  6117. *s = hsum_float_8(acc) - summs;
  6118. #elif defined __riscv_v_intrinsic
  6119. uint16_t s16[2];
  6120. const uint8_t * restrict scales = (const uint8_t *)s16;
  6121. float sumf = 0;
  6122. for (int i = 0; i < nb; ++i) {
  6123. const uint8_t * restrict q4 = x[i].qs;
  6124. const int8_t * restrict q8 = y[i].qs;
  6125. const uint16_t * restrict b = (const uint16_t *)x[i].scales;
  6126. s16[0] = b[0] & 0x0f0f;
  6127. s16[1] = (b[0] >> 4) & 0x0f0f;
  6128. sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  6129. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
  6130. size_t vl = 32;
  6131. vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
  6132. // load Q4
  6133. vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
  6134. // load Q8 and multiply it with lower Q4 nibble
  6135. vint8m1_t q4_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
  6136. vint16m2_t va_0 = __riscv_vwmul_vv_i16m2(q4_a, __riscv_vle8_v_i8m1(q8, vl), vl);
  6137. vint16m1_t aux1 = __riscv_vredsum_vs_i16m2_i16m1(va_0, vzero, vl);
  6138. sumf += d*scales[0]*__riscv_vmv_x_s_i16m1_i16(aux1);
  6139. // load Q8 and multiply it with upper Q4 nibble
  6140. vint8m1_t q4_s = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
  6141. vint16m2_t va_1 = __riscv_vwmul_vv_i16m2(q4_s, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  6142. vint16m1_t aux2 = __riscv_vredsum_vs_i16m2_i16m1(va_1, vzero, vl);
  6143. sumf += d*scales[1]*__riscv_vmv_x_s_i16m1_i16(aux2);
  6144. }
  6145. *s = sumf;
  6146. #elif defined(__POWER9_VECTOR__)
  6147. const vector signed char lowMask = vec_splats((signed char)0xF);
  6148. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  6149. vector float vsumf0 = vec_splats(0.0f);
  6150. vector float vsumf1 = vec_splats(0.0f);
  6151. vector float vsumf2 = vec_splats(0.0f);
  6152. vector float vsumf3 = vec_splats(0.0f);
  6153. #pragma GCC unroll 2
  6154. for (int i = 0; i < nb; ++i) {
  6155. __builtin_prefetch(x[i].qs, 0, 1);
  6156. __builtin_prefetch(y[i].qs, 0, 1);
  6157. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d[1]));
  6158. vector float vyd = vec_splats(y[i].d);
  6159. vector float vd= vec_mul(vxd, vyd);
  6160. uint16_t s16[2];
  6161. const uint8_t * scales = (const uint8_t *)s16;
  6162. const uint16_t * restrict b = (const uint16_t *)x[i].scales;
  6163. s16[0] = b[0] & 0x0f0f;
  6164. s16[1] = (b[0] >> 4) & 0x0f0f;
  6165. vector signed char utmps = (vector signed char)vec_xl_len(scales, 4);
  6166. vector signed short vscales = (vector signed short)vec_unpackh(utmps);
  6167. vector signed short q4xmins0 = vec_mergeh(vscales, vscales);
  6168. q4xmins0 = vec_sld(q4xmins0, q4xmins0, 8);
  6169. vector signed short q8ysums0 = vec_xl_len((const int16_t *)(y[i].bsums), 8);
  6170. vector signed int prod0 = vec_mule(q4xmins0, q8ysums0);
  6171. vector signed int prod1 = vec_mulo(q4xmins0, q8ysums0);
  6172. vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vd, vsumf0);
  6173. vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vd, vsumf1);
  6174. vd = vec_mul(vyd, vec_splats(GGML_FP16_TO_FP32(x[i].d[0])));
  6175. vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].qs);
  6176. vector signed char qxs1 = (vector signed char)vec_xl(16, x[i].qs);
  6177. vector signed char q4x00 = vec_and(qxs0, lowMask);
  6178. vector signed char q4x01 = vec_sr(qxs0, v4);
  6179. vector signed char q4x10 = vec_and(qxs1, lowMask);
  6180. vector signed char q4x11 = vec_sr(qxs1, v4);
  6181. vector signed char q8y00 = vec_xl( 0, y[i].qs);
  6182. vector signed char q8y10 = vec_xl(16, y[i].qs);
  6183. vector signed char q8y01 = vec_xl(32, y[i].qs);
  6184. vector signed char q8y11 = vec_xl(48, y[i].qs);
  6185. vector signed short qv00 = vec_add(vec_mule(q4x00, q8y00), vec_mulo(q4x00, q8y00));
  6186. vector signed short qv01 = vec_add(vec_mule(q4x01, q8y01), vec_mulo(q4x01, q8y01));
  6187. vector signed short qv10 = vec_add(vec_mule(q4x10, q8y10), vec_mulo(q4x10, q8y10));
  6188. vector signed short qv11 = vec_add(vec_mule(q4x11, q8y11), vec_mulo(q4x11, q8y11));
  6189. vector signed short vs0 = vec_splat(vscales, 0);
  6190. vector signed short vs1 = vec_splat(vscales, 1);
  6191. vector signed int vsumi0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0));
  6192. vector signed int vsumi1 = vec_add(vec_mule(qv10, vs0), vec_mulo(qv10, vs0));
  6193. vector signed int vsumi2 = vec_add(vec_mule(qv01, vs1), vec_mulo(qv01, vs1));
  6194. vector signed int vsumi3 = vec_add(vec_mule(qv11, vs1), vec_mulo(qv11, vs1));
  6195. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  6196. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  6197. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  6198. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  6199. }
  6200. vsumf0 = vec_add(vsumf0, vsumf2);
  6201. vsumf1 = vec_add(vsumf1, vsumf3);
  6202. vsumf0 = vec_add(vsumf0, vsumf1);
  6203. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  6204. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  6205. *s = vec_extract(vsumf0, 0);
  6206. #else
  6207. uint8_t aux8[QK_K];
  6208. int16_t aux16[16];
  6209. float sums [8];
  6210. memset(sums, 0, 8*sizeof(float));
  6211. uint16_t s16[2];
  6212. const uint8_t * restrict scales = (const uint8_t *)s16;
  6213. float sumf = 0;
  6214. for (int i = 0; i < nb; ++i) {
  6215. const uint8_t * restrict q4 = x[i].qs;
  6216. const int8_t * restrict q8 = y[i].qs;
  6217. uint8_t * restrict a = aux8;
  6218. for (int l = 0; l < 32; ++l) a[l+ 0] = q4[l] & 0xF;
  6219. for (int l = 0; l < 32; ++l) a[l+32] = q4[l] >> 4;
  6220. const uint16_t * restrict b = (const uint16_t *)x[i].scales;
  6221. s16[0] = b[0] & 0x0f0f;
  6222. s16[1] = (b[0] >> 4) & 0x0f0f;
  6223. sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
  6224. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
  6225. for (int j = 0; j < QK_K/32; ++j) {
  6226. for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
  6227. q8 += 16; a += 16;
  6228. for (int l = 0; l < 16; ++l) aux16[l] += q8[l] * a[l];
  6229. q8 += 16; a += 16;
  6230. const float dl = d * scales[j];
  6231. for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[l+8]);
  6232. }
  6233. }
  6234. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6235. *s = sumf;
  6236. #endif
  6237. }
  6238. #endif
  6239. #if QK_K == 256
  6240. void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6241. assert(n % QK_K == 0);
  6242. assert(nrc == 1);
  6243. UNUSED(nrc);
  6244. UNUSED(bx);
  6245. UNUSED(by);
  6246. UNUSED(bs);
  6247. const block_q5_K * restrict x = vx;
  6248. const block_q8_K * restrict y = vy;
  6249. const int nb = n / QK_K;
  6250. static const uint32_t kmask1 = 0x3f3f3f3f;
  6251. static const uint32_t kmask2 = 0x0f0f0f0f;
  6252. static const uint32_t kmask3 = 0x03030303;
  6253. uint32_t utmp[4];
  6254. #ifdef __ARM_NEON
  6255. const uint8x16_t m4b = vdupq_n_u8(0xf);
  6256. const uint8x16_t mone = vdupq_n_u8(1);
  6257. const uint8x16_t mtwo = vdupq_n_u8(2);
  6258. const int32x4_t mzero = vdupq_n_s32(0);
  6259. ggml_int8x16x4_t q5bytes;
  6260. float sumf = 0;
  6261. for (int i = 0; i < nb; ++i) {
  6262. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6263. const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  6264. const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
  6265. memcpy(utmp, x[i].scales, 12);
  6266. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6267. const uint32_t uaux = utmp[1] & kmask1;
  6268. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6269. utmp[2] = uaux;
  6270. utmp[0] &= kmask1;
  6271. const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8);
  6272. const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8));
  6273. const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
  6274. vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
  6275. int32_t sumi_mins = vaddvq_s32(prod);
  6276. const uint8_t * scales = (const uint8_t *)utmp;
  6277. const uint8_t * restrict q5 = x[i].qs;
  6278. const uint8_t * restrict qh = x[i].qh;
  6279. const int8_t * restrict q8 = y[i].qs;
  6280. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
  6281. ggml_uint8x16x4_t q5h;
  6282. int32_t sumi = 0;
  6283. for (int j = 0; j < QK_K/64; ++j) {
  6284. const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32;
  6285. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  6286. q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
  6287. q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
  6288. q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3);
  6289. q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3);
  6290. qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2);
  6291. qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2);
  6292. q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0]));
  6293. q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1]));
  6294. q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2]));
  6295. q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3]));
  6296. sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++;
  6297. sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++;
  6298. }
  6299. sumf += d * sumi - dmin * sumi_mins;
  6300. }
  6301. *s = sumf;
  6302. #elif defined __AVX2__
  6303. const __m256i m4 = _mm256_set1_epi8(0xF);
  6304. const __m128i mzero = _mm_setzero_si128();
  6305. const __m256i mone = _mm256_set1_epi8(1);
  6306. __m256 acc = _mm256_setzero_ps();
  6307. float summs = 0.f;
  6308. for (int i = 0; i < nb; ++i) {
  6309. const uint8_t * restrict q5 = x[i].qs;
  6310. const int8_t * restrict q8 = y[i].qs;
  6311. #if QK_K == 256
  6312. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6313. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  6314. memcpy(utmp, x[i].scales, 12);
  6315. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6316. const uint32_t uaux = utmp[1] & kmask1;
  6317. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6318. utmp[2] = uaux;
  6319. utmp[0] &= kmask1;
  6320. #else
  6321. // TODO
  6322. const float d = 0, dmin = 0;
  6323. #endif
  6324. const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
  6325. const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
  6326. const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
  6327. const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
  6328. const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
  6329. summs += dmin * _mm_extract_epi32(hsum, 0);
  6330. const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
  6331. const __m256i scales = MM256_SET_M128I(sc128, sc128);
  6332. const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh);
  6333. __m256i hmask = mone;
  6334. __m256i sumi = _mm256_setzero_si256();
  6335. int bit = 0;
  6336. for (int j = 0; j < QK_K/64; ++j) {
  6337. const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
  6338. const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
  6339. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32;
  6340. const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
  6341. const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
  6342. const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0);
  6343. hmask = _mm256_slli_epi16(hmask, 1);
  6344. const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
  6345. const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
  6346. const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1);
  6347. hmask = _mm256_slli_epi16(hmask, 1);
  6348. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6349. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  6350. __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0);
  6351. __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1);
  6352. p16_0 = _mm256_madd_epi16(scale_0, p16_0);
  6353. p16_1 = _mm256_madd_epi16(scale_1, p16_1);
  6354. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  6355. }
  6356. __m256 vd = _mm256_set1_ps(d);
  6357. acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
  6358. }
  6359. *s = hsum_float_8(acc) + summs;
  6360. #elif defined __AVX__
  6361. const __m128i m4 = _mm_set1_epi8(0xF);
  6362. const __m128i mzero = _mm_setzero_si128();
  6363. const __m128i mone = _mm_set1_epi8(1);
  6364. const __m128i m2 = _mm_set1_epi8(2);
  6365. __m256 acc = _mm256_setzero_ps();
  6366. float summs = 0.f;
  6367. for (int i = 0; i < nb; ++i) {
  6368. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6369. const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
  6370. const uint8_t * restrict q5 = x[i].qs;
  6371. const int8_t * restrict q8 = y[i].qs;
  6372. memcpy(utmp, x[i].scales, 12);
  6373. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6374. const uint32_t uaux = utmp[1] & kmask1;
  6375. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6376. utmp[2] = uaux;
  6377. utmp[0] &= kmask1;
  6378. const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
  6379. const __m128i scales = _mm_cvtepu8_epi16(utmps);
  6380. const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
  6381. const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
  6382. const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
  6383. const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
  6384. const __m128i prod = _mm_madd_epi16(mins, q8s);
  6385. const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
  6386. summs += dmin * _mm_extract_epi32(hsum, 0);
  6387. const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]);
  6388. const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]);
  6389. __m128i hmask = mone;
  6390. __m128i sumi_0 = _mm_setzero_si128();
  6391. __m128i sumi_1 = _mm_setzero_si128();
  6392. int bit = 0;
  6393. __m128i shuffle = _mm_set1_epi16(0x0100);
  6394. for (int j = 0; j < QK_K/64; ++j) {
  6395. const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
  6396. shuffle = _mm_add_epi16(shuffle, m2);
  6397. const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
  6398. shuffle = _mm_add_epi16(shuffle, m2);
  6399. const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
  6400. const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
  6401. __m128i q5l_0 = _mm_and_si128(q5bits_0, m4);
  6402. __m128i q5l_1 = _mm_and_si128(q5bits_1, m4);
  6403. __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
  6404. __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
  6405. __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0);
  6406. __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1);
  6407. hmask = _mm_slli_epi16(hmask, 1);
  6408. __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6409. __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6410. __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0);
  6411. __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1);
  6412. p16_0 = _mm_madd_epi16(scale_0, p16_0);
  6413. p16_1 = _mm_madd_epi16(scale_0, p16_1);
  6414. q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4);
  6415. q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4);
  6416. q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
  6417. q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
  6418. q5_0 = _mm_add_epi8(q5l_0, q5h_0);
  6419. q5_1 = _mm_add_epi8(q5l_1, q5h_1);
  6420. hmask = _mm_slli_epi16(hmask, 1);
  6421. q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6422. q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  6423. __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0);
  6424. __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1);
  6425. p16_2 = _mm_madd_epi16(scale_1, p16_2);
  6426. p16_3 = _mm_madd_epi16(scale_1, p16_3);
  6427. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  6428. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  6429. }
  6430. __m256 vd = _mm256_set1_ps(d);
  6431. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  6432. acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
  6433. }
  6434. *s = hsum_float_8(acc) + summs;
  6435. #elif defined __riscv_v_intrinsic
  6436. const uint8_t * scales = (const uint8_t*)&utmp[0];
  6437. const uint8_t * mins = (const uint8_t*)&utmp[2];
  6438. float sumf = 0;
  6439. float sums = 0.0;
  6440. size_t vl;
  6441. for (int i = 0; i < nb; ++i) {
  6442. vl = 8;
  6443. const uint8_t * restrict q5 = x[i].qs;
  6444. const uint8_t * restrict hm = x[i].qh;
  6445. const int8_t * restrict q8 = y[i].qs;
  6446. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6447. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  6448. vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
  6449. vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
  6450. vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
  6451. memcpy(utmp, x[i].scales, 12);
  6452. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6453. const uint32_t uaux = utmp[1] & kmask1;
  6454. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6455. utmp[2] = uaux;
  6456. utmp[0] &= kmask1;
  6457. vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
  6458. vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
  6459. vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
  6460. vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
  6461. sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
  6462. vl = 32;
  6463. int32_t aux32 = 0;
  6464. int is = 0;
  6465. uint8_t m = 1;
  6466. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  6467. vuint8m1_t vqh = __riscv_vle8_v_u8m1(hm, vl);
  6468. for (int j = 0; j < QK_K/64; ++j) {
  6469. // load Q5 and Q8
  6470. vuint8m1_t q5_x = __riscv_vle8_v_u8m1(q5, vl);
  6471. vint8m1_t q8_y1 = __riscv_vle8_v_i8m1(q8, vl);
  6472. vint8m1_t q8_y2 = __riscv_vle8_v_i8m1(q8+32, vl);
  6473. // compute mask for addition
  6474. vint8m1_t q5_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q5_x, 0x0F, vl));
  6475. vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
  6476. vbool8_t vmask_1 = __riscv_vmsne_vx_u8m1_b8(qh_m1, 0, vl);
  6477. vint8m1_t q5_m1 = __riscv_vadd_vx_i8m1_m(vmask_1, q5_a, 16, vl);
  6478. m <<= 1;
  6479. vint8m1_t q5_l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q5_x, 0x04, vl));
  6480. vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
  6481. vbool8_t vmask_2 = __riscv_vmsne_vx_u8m1_b8(qh_m2, 0, vl);
  6482. vint8m1_t q5_m2 = __riscv_vadd_vx_i8m1_m(vmask_2, q5_l, 16, vl);
  6483. m <<= 1;
  6484. vint16m2_t v0 = __riscv_vwmul_vv_i16m2(q5_m1, q8_y1, vl);
  6485. vint16m2_t v1 = __riscv_vwmul_vv_i16m2(q5_m2, q8_y2, vl);
  6486. vint32m4_t vs1 = __riscv_vwmul_vx_i32m4(v0, scales[is++], vl);
  6487. vint32m4_t vs2 = __riscv_vwmul_vx_i32m4(v1, scales[is++], vl);
  6488. vint32m1_t vacc1 = __riscv_vredsum_vs_i32m4_i32m1(vs1, vzero, vl);
  6489. vint32m1_t vacc2 = __riscv_vredsum_vs_i32m4_i32m1(vs2, vzero, vl);
  6490. aux32 += __riscv_vmv_x_s_i32m1_i32(vacc1) + __riscv_vmv_x_s_i32m1_i32(vacc2);
  6491. q5 += 32; q8 += 64;
  6492. }
  6493. vfloat32m1_t vaux = __riscv_vfmul_vf_f32m1(__riscv_vfmv_v_f_f32m1(aux32, 1), d, 1);
  6494. sums += __riscv_vfmv_f_s_f32m1_f32(vaux);
  6495. }
  6496. *s = sumf+sums;
  6497. #elif defined(__POWER9_VECTOR__)
  6498. const vector signed char lowMask = vec_splats((signed char)0xF);
  6499. const vector unsigned char v1 = vec_splats((unsigned char)0x1);
  6500. const vector unsigned char v2 = vec_splats((unsigned char)0x2);
  6501. const vector unsigned char v3 = vec_splats((unsigned char)0x3);
  6502. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  6503. vector float vsumf0 = vec_splats(0.0f);
  6504. vector float vsumf1 = vec_splats(0.0f);
  6505. vector float vsumf2 = vec_splats(0.0f);
  6506. vector float vsumf3 = vec_splats(0.0f);
  6507. for (int i = 0; i < nb; ++i) {
  6508. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  6509. vector float vyd = vec_splats(y[i].d);
  6510. vector float vd = vec_mul(vxd, vyd);
  6511. vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin));
  6512. vector float vdmin = vec_mul(vxmin, vyd);
  6513. memcpy(utmp, x[i].scales, 12);
  6514. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6515. const uint32_t uaux = utmp[1] & kmask1;
  6516. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6517. utmp[2] = uaux;
  6518. utmp[0] &= kmask1;
  6519. vector signed short q8ysums0 = vec_xl( 0, y[i].bsums);
  6520. vector signed short q8ysums1 = vec_xl(16, y[i].bsums);
  6521. vector signed char utmps = (vector signed char)vec_xl( 0, utmp);
  6522. vector signed short vscales = vec_unpackh(utmps);
  6523. vector signed short q5xmins = vec_unpackl(utmps);
  6524. vector signed short q5xmins0 = vec_mergeh(q5xmins, q5xmins);
  6525. vector signed short q5xmins1 = vec_mergel(q5xmins, q5xmins);
  6526. vector signed int prod0 = vec_mule(q5xmins0, q8ysums0);
  6527. vector signed int prod1 = vec_mule(q5xmins1, q8ysums1);
  6528. vector signed int prod2 = vec_mulo(q5xmins0, q8ysums0);
  6529. vector signed int prod3 = vec_mulo(q5xmins1, q8ysums1);
  6530. vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0);
  6531. vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1);
  6532. vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2);
  6533. vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3);
  6534. vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].qh);
  6535. vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].qh);
  6536. vector signed int vsumi0 = vec_splats((int32_t)0);
  6537. vector signed int vsumi1 = vec_splats((int32_t)0);
  6538. vector signed int vsumi2 = vec_splats((int32_t)0);
  6539. vector signed int vsumi3 = vec_splats((int32_t)0);
  6540. const uint8_t * restrict q5 = x[i].qs;
  6541. const int8_t * restrict q8 = y[i].qs;
  6542. for (int j = 0; j < QK_K/64; ++j) {
  6543. __builtin_prefetch(q5, 0, 1);
  6544. __builtin_prefetch(q8, 0, 1);
  6545. vector signed char qxs0 = (vector signed char)vec_xl( 0, q5);
  6546. vector signed char qxs1 = (vector signed char)vec_xl(16, q5);
  6547. q5 += 32;
  6548. vector signed char qxs00 = vec_and(qxs0, lowMask);
  6549. vector signed char qxs01 = vec_sr(qxs0, v4);
  6550. vector signed char qxs10 = vec_and(qxs1, lowMask);
  6551. vector signed char qxs11 = vec_sr(qxs1, v4);
  6552. vector signed char q5h00 = vec_sl(vec_and((vector signed char)v1, qxhs0), v4);
  6553. vector signed char q5h01 = vec_sl(vec_and((vector signed char)v2, qxhs0), v3);
  6554. vector signed char q5h10 = vec_sl(vec_and((vector signed char)v1, qxhs1), v4);
  6555. vector signed char q5h11 = vec_sl(vec_and((vector signed char)v2, qxhs1), v3);
  6556. qxhs0 = vec_sr(qxhs0, v2);
  6557. qxhs1 = vec_sr(qxhs1, v2);
  6558. vector signed char q5x00 = vec_or(q5h00, qxs00);
  6559. vector signed char q5x01 = vec_or(q5h01, qxs01);
  6560. vector signed char q5x10 = vec_or(q5h10, qxs10);
  6561. vector signed char q5x11 = vec_or(q5h11, qxs11);
  6562. vector signed char q8y00 = vec_xl( 0, q8);
  6563. vector signed char q8y10 = vec_xl(16, q8);
  6564. vector signed char q8y01 = vec_xl(32, q8);
  6565. vector signed char q8y11 = vec_xl(48, q8);
  6566. q8 += 64;
  6567. vector signed short qv00 = vec_add(vec_mule(q5x00, q8y00), vec_mulo(q5x00, q8y00));
  6568. vector signed short qv01 = vec_add(vec_mule(q5x01, q8y01), vec_mulo(q5x01, q8y01));
  6569. vector signed short qv10 = vec_add(vec_mule(q5x10, q8y10), vec_mulo(q5x10, q8y10));
  6570. vector signed short qv11 = vec_add(vec_mule(q5x11, q8y11), vec_mulo(q5x11, q8y11));
  6571. vector signed short vs0 = vec_splat(vscales, 0);
  6572. vector signed short vs1 = vec_splat(vscales, 1);
  6573. vscales = vec_sld(vscales, vscales, 12);
  6574. qv00 = vec_add(qv00, qv10);
  6575. qv01 = vec_add(qv01, qv11);
  6576. vsumi0 = vec_add(vec_mule(qv00, vs0), vsumi0);
  6577. vsumi1 = vec_add(vec_mulo(qv00, vs0), vsumi1);
  6578. vsumi2 = vec_add(vec_mule(qv01, vs1), vsumi2);
  6579. vsumi3 = vec_add(vec_mulo(qv01, vs1), vsumi3);
  6580. }
  6581. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  6582. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  6583. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  6584. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  6585. }
  6586. vsumf0 = vec_add(vsumf0, vsumf2);
  6587. vsumf1 = vec_add(vsumf1, vsumf3);
  6588. vsumf0 = vec_add(vsumf0, vsumf1);
  6589. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  6590. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  6591. *s = vec_extract(vsumf0, 0);
  6592. #else
  6593. const uint8_t * scales = (const uint8_t*)&utmp[0];
  6594. const uint8_t * mins = (const uint8_t*)&utmp[2];
  6595. int8_t aux8[QK_K];
  6596. int16_t aux16[8];
  6597. float sums [8];
  6598. int32_t aux32[8];
  6599. memset(sums, 0, 8*sizeof(float));
  6600. float sumf = 0;
  6601. for (int i = 0; i < nb; ++i) {
  6602. const uint8_t * restrict q4 = x[i].qs;
  6603. const uint8_t * restrict hm = x[i].qh;
  6604. const int8_t * restrict q8 = y[i].qs;
  6605. memset(aux32, 0, 8*sizeof(int32_t));
  6606. int8_t * restrict a = aux8;
  6607. uint8_t m = 1;
  6608. for (int j = 0; j < QK_K/64; ++j) {
  6609. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
  6610. for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
  6611. a += 32; m <<= 1;
  6612. for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
  6613. for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
  6614. a += 32; m <<= 1;
  6615. q4 += 32;
  6616. }
  6617. memcpy(utmp, x[i].scales, 12);
  6618. utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
  6619. const uint32_t uaux = utmp[1] & kmask1;
  6620. utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
  6621. utmp[2] = uaux;
  6622. utmp[0] &= kmask1;
  6623. int sumi = 0;
  6624. for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
  6625. a = aux8;
  6626. int is = 0;
  6627. for (int j = 0; j < QK_K/32; ++j) {
  6628. int32_t scale = scales[is++];
  6629. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6630. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6631. q8 += 8; a += 8;
  6632. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6633. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6634. q8 += 8; a += 8;
  6635. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6636. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6637. q8 += 8; a += 8;
  6638. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  6639. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  6640. q8 += 8; a += 8;
  6641. }
  6642. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  6643. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  6644. const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
  6645. sumf -= dmin * sumi;
  6646. }
  6647. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6648. *s = sumf;
  6649. #endif
  6650. }
  6651. #else
  6652. void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6653. assert(n % QK_K == 0);
  6654. assert(nrc == 1);
  6655. UNUSED(nrc);
  6656. UNUSED(bx);
  6657. UNUSED(by);
  6658. UNUSED(bs);
  6659. const block_q5_K * restrict x = vx;
  6660. const block_q8_K * restrict y = vy;
  6661. const int nb = n / QK_K;
  6662. #ifdef __ARM_NEON
  6663. const uint8x16_t m4b = vdupq_n_u8(0xf);
  6664. const uint8x16_t mh = vdupq_n_u8(16);
  6665. const int32x4_t mzero = vdupq_n_s32(0);
  6666. ggml_int8x16x4_t q5bytes;
  6667. ggml_uint8x16x4_t q5h;
  6668. float sumf = 0;
  6669. for (int i = 0; i < nb; ++i) {
  6670. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6671. const int8_t * sc = x[i].scales;
  6672. const uint8_t * restrict q5 = x[i].qs;
  6673. const uint8_t * restrict qh = x[i].qh;
  6674. const int8_t * restrict q8 = y[i].qs;
  6675. const uint8x8_t qhbits = vld1_u8(qh);
  6676. const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5);
  6677. const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  6678. const uint8x16_t htmp = vcombine_u8(qhbits, vshr_n_u8(qhbits, 1));
  6679. q5h.val[0] = vbicq_u8(mh, vshlq_n_u8(htmp, 4));
  6680. q5h.val[1] = vbicq_u8(mh, vshlq_n_u8(htmp, 2));
  6681. q5h.val[2] = vbicq_u8(mh, htmp);
  6682. q5h.val[3] = vbicq_u8(mh, vshrq_n_u8(htmp, 2));
  6683. q5bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[0], m4b)), vreinterpretq_s8_u8(q5h.val[0]));
  6684. q5bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[1], m4b)), vreinterpretq_s8_u8(q5h.val[1]));
  6685. q5bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[0], 4)), vreinterpretq_s8_u8(q5h.val[2]));
  6686. q5bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[1], 4)), vreinterpretq_s8_u8(q5h.val[3]));
  6687. int32_t sumi1 = sc[0] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]));
  6688. int32_t sumi2 = sc[1] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[1], q8bytes.val[1]));
  6689. int32_t sumi3 = sc[2] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]));
  6690. int32_t sumi4 = sc[3] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[3], q8bytes.val[3]));
  6691. sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
  6692. }
  6693. *s = sumf;
  6694. #elif defined __AVX2__
  6695. const __m256i m4 = _mm256_set1_epi8(0xF);
  6696. const __m256i mone = _mm256_set1_epi8(1);
  6697. __m256 acc = _mm256_setzero_ps();
  6698. for (int i = 0; i < nb; ++i) {
  6699. const uint8_t * restrict q5 = x[i].qs;
  6700. const int8_t * restrict q8 = y[i].qs;
  6701. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6702. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
  6703. const __m256i scale_l = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[1]), _mm_set1_epi16(x[i].scales[0]));
  6704. const __m256i scale_h = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[3]), _mm_set1_epi16(x[i].scales[2]));
  6705. int64_t aux64;
  6706. memcpy(&aux64, x[i].qh, 8);
  6707. const __m128i haux128 = _mm_set_epi64x(aux64 >> 1, aux64);
  6708. const __m256i haux256 = MM256_SET_M128I(_mm_srli_epi16(haux128, 2), haux128);
  6709. const __m256i q5h_0 = _mm256_slli_epi16(_mm256_andnot_si256(haux256, mone), 4);
  6710. const __m256i q5h_1 = _mm256_slli_epi16(_mm256_andnot_si256(_mm256_srli_epi16(haux256, 4), mone), 4);
  6711. const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
  6712. const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
  6713. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6714. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6715. const __m256i p16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5l_0, q8_0));
  6716. const __m256i p16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5l_1, q8_1));
  6717. const __m256i s16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5h_0, q8_0));
  6718. const __m256i s16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5h_1, q8_1));
  6719. const __m256i dot = _mm256_sub_epi32(_mm256_add_epi32(p16_0, p16_1), _mm256_add_epi32(s16_0, s16_1));
  6720. acc = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(dot), acc);
  6721. }
  6722. *s = hsum_float_8(acc);
  6723. #elif defined __AVX__
  6724. const __m128i m4 = _mm_set1_epi8(0xF);
  6725. const __m128i mone = _mm_set1_epi8(1);
  6726. __m256 acc = _mm256_setzero_ps();
  6727. for (int i = 0; i < nb; ++i) {
  6728. const uint8_t * restrict q5 = x[i].qs;
  6729. const int8_t * restrict q8 = y[i].qs;
  6730. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6731. const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
  6732. const __m128i scale_0 = _mm_set1_epi16(x[i].scales[0]);
  6733. const __m128i scale_1 = _mm_set1_epi16(x[i].scales[1]);
  6734. const __m128i scale_2 = _mm_set1_epi16(x[i].scales[2]);
  6735. const __m128i scale_3 = _mm_set1_epi16(x[i].scales[3]);
  6736. int64_t aux64;
  6737. memcpy(&aux64, x[i].qh, 8);
  6738. const __m128i haux128_0 = _mm_set_epi64x(aux64 >> 1, aux64);
  6739. const __m128i haux128_1 = _mm_srli_epi16(haux128_0, 2);
  6740. const __m128i q5h_0 = _mm_slli_epi16(_mm_andnot_si128(haux128_0, mone), 4);
  6741. const __m128i q5h_1 = _mm_slli_epi16(_mm_andnot_si128(haux128_1, mone), 4);
  6742. const __m128i q5h_2 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_0, 4), mone), 4);
  6743. const __m128i q5h_3 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_1, 4), mone), 4);
  6744. const __m128i q5l_0 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 0), m4);
  6745. const __m128i q5l_1 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 1), m4);
  6746. const __m128i q5l_2 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 0), 4), m4);
  6747. const __m128i q5l_3 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 1), 4), m4);
  6748. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  6749. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  6750. const __m128i p16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5l_0, _mm256_extractf128_si256(q8_0, 0)));
  6751. const __m128i p16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5l_1, _mm256_extractf128_si256(q8_0, 1)));
  6752. const __m128i p16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5l_2, _mm256_extractf128_si256(q8_1, 0)));
  6753. const __m128i p16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5l_3, _mm256_extractf128_si256(q8_1, 1)));
  6754. const __m128i s16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5h_0, _mm256_extractf128_si256(q8_0, 0)));
  6755. const __m128i s16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5h_1, _mm256_extractf128_si256(q8_0, 1)));
  6756. const __m128i s16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5h_2, _mm256_extractf128_si256(q8_1, 0)));
  6757. const __m128i s16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5h_3, _mm256_extractf128_si256(q8_1, 1)));
  6758. const __m128i dot_0 = _mm_sub_epi32(_mm_add_epi32(p16_0, p16_2), _mm_add_epi32(s16_0, s16_2));
  6759. const __m128i dot_1 = _mm_sub_epi32(_mm_add_epi32(p16_1, p16_3), _mm_add_epi32(s16_1, s16_3));
  6760. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(dot_1, dot_0))), acc);
  6761. }
  6762. *s = hsum_float_8(acc);
  6763. #elif defined __riscv_v_intrinsic
  6764. float sumf = 0;
  6765. for (int i = 0; i < nb; ++i) {
  6766. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6767. const int8_t * sc = x[i].scales;
  6768. const uint8_t * restrict q5 = x[i].qs;
  6769. const uint8_t * restrict qh = x[i].qh;
  6770. const int8_t * restrict q8 = y[i].qs;
  6771. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  6772. // load qh
  6773. vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(qh, 8);
  6774. vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
  6775. size_t vl = 16;
  6776. // combine both qh_1 and qh_2
  6777. vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
  6778. vuint8mf2_t qh_h0 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
  6779. vuint8mf2_t qh_h1 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), vl), 16, vl);
  6780. vuint8mf2_t qh_h2 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(qh_x, vl), 16, vl);
  6781. vuint8mf2_t qh_h3 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
  6782. vint8mf2_t qh_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h0);
  6783. vint8mf2_t qh_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h1);
  6784. vint8mf2_t qh_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h2);
  6785. vint8mf2_t qh_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h3);
  6786. // load q5
  6787. vuint8mf2_t q5_x1 = __riscv_vle8_v_u8mf2(q5, vl);
  6788. vuint8mf2_t q5_x2 = __riscv_vle8_v_u8mf2(q5+16, vl);
  6789. vint8mf2_t q5s_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x1, 0xF, vl));
  6790. vint8mf2_t q5s_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x2, 0xF, vl));
  6791. vint8mf2_t q5s_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x1, 0x4, vl));
  6792. vint8mf2_t q5s_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x2, 0x4, vl));
  6793. vint8mf2_t q5_0 = __riscv_vsub_vv_i8mf2(q5s_0, qh_0, vl);
  6794. vint8mf2_t q5_1 = __riscv_vsub_vv_i8mf2(q5s_1, qh_1, vl);
  6795. vint8mf2_t q5_2 = __riscv_vsub_vv_i8mf2(q5s_2, qh_2, vl);
  6796. vint8mf2_t q5_3 = __riscv_vsub_vv_i8mf2(q5s_3, qh_3, vl);
  6797. // load Q8 and multiply it with Q5
  6798. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q5_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  6799. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q5_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  6800. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q5_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  6801. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q5_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  6802. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  6803. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  6804. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  6805. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  6806. int32_t sumi1 = sc[0] * __riscv_vmv_x_s_i32m1_i32(vs_0);
  6807. int32_t sumi2 = sc[1] * __riscv_vmv_x_s_i32m1_i32(vs_1);
  6808. int32_t sumi3 = sc[2] * __riscv_vmv_x_s_i32m1_i32(vs_2);
  6809. int32_t sumi4 = sc[3] * __riscv_vmv_x_s_i32m1_i32(vs_3);
  6810. sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
  6811. }
  6812. *s = sumf;
  6813. #elif defined(__POWER9_VECTOR__)
  6814. const vector signed char lowMask = vec_splats((signed char)0xF);
  6815. const vector unsigned char v1 = vec_splats((unsigned char)0x1);
  6816. const vector unsigned char v2 = vec_splats((unsigned char)0x2);
  6817. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  6818. vector float vsumf0 = vec_splats(0.0f);
  6819. vector float vsumf1 = vec_splats(0.0f);
  6820. vector float vsumf2 = vec_splats(0.0f);
  6821. vector float vsumf3 = vec_splats(0.0f);
  6822. #pragma GCC unroll 2
  6823. for (int i = 0; i < nb; ++i) {
  6824. __builtin_prefetch(x[i].qs, 0, 1);
  6825. __builtin_prefetch(y[i].qs, 0, 1);
  6826. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  6827. vector float vyd = vec_splats(y[i].d);
  6828. vector float vd= vec_mul(vxd, vyd);
  6829. vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].qs);
  6830. vector signed char qxs1 = (vector signed char)vec_xl(16, x[i].qs);
  6831. vector signed char qxs00 = (vector signed char)vec_and(qxs0, lowMask);
  6832. vector signed char qxs01 = (vector signed char)vec_sr(qxs0, v4);
  6833. vector signed char qxs10 = (vector signed char)vec_and(qxs1, lowMask);
  6834. vector signed char qxs11 = (vector signed char)vec_sr(qxs1, v4);
  6835. vector signed char qxhs = (vector signed char)vec_xl_len(x[i].qh, 8);
  6836. vector signed char qxhs0 = vec_or(qxhs, vec_sr(vec_sld(qxhs, qxhs, 8), v1));
  6837. vector signed char qxhs1 = vec_sr(qxhs0, v2);
  6838. vector signed char qxh00 = vec_sl(vec_andc((vector signed char)v1, qxhs0), v4);
  6839. vector signed char qxh10 = vec_sl(vec_andc((vector signed char)v1, qxhs1), v4);
  6840. vector signed char qxh01 = vec_sl(vec_andc((vector signed char)v1, vec_sr(qxhs0, v4)), v4);
  6841. vector signed char qxh11 = vec_sl(vec_andc((vector signed char)v1, vec_sr(qxhs1, v4)), v4);
  6842. vector signed char q5x00 = vec_sub(qxs00, qxh00);
  6843. vector signed char q5x10 = vec_sub(qxs10, qxh10);
  6844. vector signed char q5x01 = vec_sub(qxs01, qxh01);
  6845. vector signed char q5x11 = vec_sub(qxs11, qxh11);
  6846. vector signed char q8y00 = vec_xl( 0, y[i].qs);
  6847. vector signed char q8y10 = vec_xl(16, y[i].qs);
  6848. vector signed char q8y01 = vec_xl(32, y[i].qs);
  6849. vector signed char q8y11 = vec_xl(48, y[i].qs);
  6850. vector signed short qv00 = vec_add(vec_mule(q5x00, q8y00), vec_mulo(q5x00, q8y00));
  6851. vector signed short qv01 = vec_add(vec_mule(q5x01, q8y01), vec_mulo(q5x01, q8y01));
  6852. vector signed short qv10 = vec_add(vec_mule(q5x10, q8y10), vec_mulo(q5x10, q8y10));
  6853. vector signed short qv11 = vec_add(vec_mule(q5x11, q8y11), vec_mulo(q5x11, q8y11));
  6854. vector signed short vs = (vector signed short)vec_unpackh(vec_xl_len(x[i].scales, 4));
  6855. vector signed short vs0 = vec_splat(vs, 0);
  6856. vector signed short vs1 = vec_splat(vs, 1);
  6857. vector signed short vs2 = vec_splat(vs, 2);
  6858. vector signed short vs3 = vec_splat(vs, 3);
  6859. vector signed int vsumi0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0));
  6860. vector signed int vsumi1 = vec_add(vec_mule(qv10, vs1), vec_mulo(qv10, vs1));
  6861. vector signed int vsumi2 = vec_add(vec_mule(qv01, vs2), vec_mulo(qv01, vs2));
  6862. vector signed int vsumi3 = vec_add(vec_mule(qv11, vs3), vec_mulo(qv11, vs3));
  6863. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  6864. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  6865. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  6866. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  6867. }
  6868. vsumf0 = vec_add(vsumf0, vsumf2);
  6869. vsumf1 = vec_add(vsumf1, vsumf3);
  6870. vsumf0 = vec_add(vsumf0, vsumf1);
  6871. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  6872. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  6873. *s = vec_extract(vsumf0, 0);
  6874. #else
  6875. int8_t aux8[QK_K];
  6876. int16_t aux16[16];
  6877. float sums [8];
  6878. memset(sums, 0, 8*sizeof(float));
  6879. float sumf = 0;
  6880. for (int i = 0; i < nb; ++i) {
  6881. const uint8_t * restrict q4 = x[i].qs;
  6882. const uint8_t * restrict hm = x[i].qh;
  6883. const int8_t * restrict q8 = y[i].qs;
  6884. int8_t * restrict a = aux8;
  6885. for (int l = 0; l < 32; ++l) {
  6886. a[l+ 0] = q4[l] & 0xF;
  6887. a[l+32] = q4[l] >> 4;
  6888. }
  6889. for (int is = 0; is < 8; ++is) {
  6890. uint8_t m = 1 << is;
  6891. for (int l = 0; l < 8; ++l) a[8*is + l] -= (hm[l] & m ? 0 : 16);
  6892. }
  6893. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6894. const int8_t * restrict sc = x[i].scales;
  6895. for (int j = 0; j < QK_K/16; ++j) {
  6896. const float dl = d * sc[j];
  6897. for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
  6898. for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[8+l]);
  6899. q8 += 16; a += 16;
  6900. }
  6901. }
  6902. for (int l = 0; l < 8; ++l) sumf += sums[l];
  6903. *s = sumf;
  6904. #endif
  6905. }
  6906. #endif
  6907. #if QK_K == 256
  6908. void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  6909. assert(n % QK_K == 0);
  6910. assert(nrc == 1);
  6911. UNUSED(nrc);
  6912. UNUSED(bx);
  6913. UNUSED(by);
  6914. UNUSED(bs);
  6915. const block_q6_K * restrict x = vx;
  6916. const block_q8_K * restrict y = vy;
  6917. const int nb = n / QK_K;
  6918. #ifdef __ARM_NEON
  6919. float sum = 0;
  6920. const uint8x16_t m4b = vdupq_n_u8(0xF);
  6921. const int32x4_t vzero = vdupq_n_s32(0);
  6922. //const int8x16_t m32s = vdupq_n_s8(32);
  6923. const uint8x16_t mone = vdupq_n_u8(3);
  6924. ggml_int8x16x4_t q6bytes;
  6925. ggml_uint8x16x4_t q6h;
  6926. for (int i = 0; i < nb; ++i) {
  6927. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  6928. const uint8_t * restrict q6 = x[i].ql;
  6929. const uint8_t * restrict qh = x[i].qh;
  6930. const int8_t * restrict q8 = y[i].qs;
  6931. const int8_t * restrict scale = x[i].scales;
  6932. const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
  6933. const int8x16_t scales = vld1q_s8(scale);
  6934. const ggml_int16x8x2_t q6scales = {{vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}};
  6935. const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])),
  6936. vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))),
  6937. vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])),
  6938. vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1]))));
  6939. int32_t isum_mins = vaddvq_s32(prod);
  6940. int32_t isum = 0;
  6941. for (int j = 0; j < QK_K/128; ++j) {
  6942. ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32;
  6943. ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64;
  6944. ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  6945. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
  6946. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
  6947. uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2);
  6948. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6949. shifted = vshrq_n_u8(qhbits.val[1], 2);
  6950. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6951. //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
  6952. //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
  6953. //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s);
  6954. //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s);
  6955. q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0]));
  6956. q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1]));
  6957. q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2]));
  6958. q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3]));
  6959. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  6960. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  6961. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  6962. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  6963. scale += 4;
  6964. q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
  6965. shifted = vshrq_n_u8(qhbits.val[0], 4);
  6966. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6967. shifted = vshrq_n_u8(qhbits.val[1], 4);
  6968. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6969. shifted = vshrq_n_u8(qhbits.val[0], 6);
  6970. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6971. shifted = vshrq_n_u8(qhbits.val[1], 6);
  6972. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  6973. //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s);
  6974. //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s);
  6975. //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s);
  6976. //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s);
  6977. q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0]));
  6978. q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1]));
  6979. q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2]));
  6980. q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3]));
  6981. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  6982. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  6983. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  6984. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  6985. scale += 4;
  6986. }
  6987. //sum += isum * d_all * y[i].d;
  6988. sum += d_all * y[i].d * (isum - 32 * isum_mins);
  6989. }
  6990. *s = sum;
  6991. #elif defined __AVX2__
  6992. const __m256i m4 = _mm256_set1_epi8(0xF);
  6993. const __m256i m2 = _mm256_set1_epi8(3);
  6994. const __m256i m32s = _mm256_set1_epi8(32);
  6995. __m256 acc = _mm256_setzero_ps();
  6996. for (int i = 0; i < nb; ++i) {
  6997. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  6998. const uint8_t * restrict q4 = x[i].ql;
  6999. const uint8_t * restrict qh = x[i].qh;
  7000. const int8_t * restrict q8 = y[i].qs;
  7001. const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  7002. __m256i sumi = _mm256_setzero_si256();
  7003. int is = 0;
  7004. for (int j = 0; j < QK_K/128; ++j) {
  7005. const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0));
  7006. const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1));
  7007. const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2));
  7008. const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3));
  7009. is += 4;
  7010. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  7011. const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
  7012. const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32;
  7013. const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4);
  7014. const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4);
  7015. const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4);
  7016. const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4);
  7017. const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
  7018. const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1);
  7019. const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2);
  7020. const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3);
  7021. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  7022. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  7023. const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  7024. const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  7025. __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
  7026. __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
  7027. __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2);
  7028. __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3);
  7029. __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
  7030. __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
  7031. __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2);
  7032. __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3);
  7033. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  7034. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  7035. p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
  7036. p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
  7037. p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
  7038. p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
  7039. p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2);
  7040. p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3);
  7041. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  7042. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3));
  7043. }
  7044. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  7045. }
  7046. *s = hsum_float_8(acc);
  7047. #elif defined __AVX__
  7048. const __m128i m4 = _mm_set1_epi8(0xF);
  7049. const __m128i m3 = _mm_set1_epi8(3);
  7050. const __m128i m32s = _mm_set1_epi8(32);
  7051. const __m128i m2 = _mm_set1_epi8(2);
  7052. __m256 acc = _mm256_setzero_ps();
  7053. for (int i = 0; i < nb; ++i) {
  7054. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  7055. const uint8_t * restrict q4 = x[i].ql;
  7056. const uint8_t * restrict qh = x[i].qh;
  7057. const int8_t * restrict q8 = y[i].qs;
  7058. const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
  7059. __m128i sumi_0 = _mm_setzero_si128();
  7060. __m128i sumi_1 = _mm_setzero_si128();
  7061. __m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  7062. for (int j = 0; j < QK_K/128; ++j) {
  7063. const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
  7064. const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
  7065. const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4);
  7066. const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4);
  7067. const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4);
  7068. const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4);
  7069. const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4);
  7070. const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4);
  7071. const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4);
  7072. const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4);
  7073. const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  7074. const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  7075. const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  7076. const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
  7077. const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0);
  7078. const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1);
  7079. const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2);
  7080. const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3);
  7081. const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4);
  7082. const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5);
  7083. const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6);
  7084. const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7);
  7085. const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7086. const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7087. const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7088. const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7089. const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7090. const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7091. const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7092. const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
  7093. __m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0);
  7094. __m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1);
  7095. __m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2);
  7096. __m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3);
  7097. __m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4);
  7098. __m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5);
  7099. __m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6);
  7100. __m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7);
  7101. __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0);
  7102. __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1);
  7103. __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2);
  7104. __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3);
  7105. __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4);
  7106. __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5);
  7107. __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6);
  7108. __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7);
  7109. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  7110. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  7111. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  7112. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  7113. p16_4 = _mm_sub_epi16(p16_4, q8s_4);
  7114. p16_5 = _mm_sub_epi16(p16_5, q8s_5);
  7115. p16_6 = _mm_sub_epi16(p16_6, q8s_6);
  7116. p16_7 = _mm_sub_epi16(p16_7, q8s_7);
  7117. const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
  7118. shuffle = _mm_add_epi8(shuffle, m2);
  7119. const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
  7120. shuffle = _mm_add_epi8(shuffle, m2);
  7121. const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle);
  7122. shuffle = _mm_add_epi8(shuffle, m2);
  7123. const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle);
  7124. shuffle = _mm_add_epi8(shuffle, m2);
  7125. p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
  7126. p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
  7127. p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
  7128. p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
  7129. p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4);
  7130. p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5);
  7131. p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6);
  7132. p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7);
  7133. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  7134. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  7135. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6));
  7136. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7));
  7137. }
  7138. __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
  7139. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
  7140. }
  7141. *s = hsum_float_8(acc);
  7142. #elif defined __riscv_v_intrinsic
  7143. float sumf = 0;
  7144. for (int i = 0; i < nb; ++i) {
  7145. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7146. const uint8_t * restrict q6 = x[i].ql;
  7147. const uint8_t * restrict qh = x[i].qh;
  7148. const int8_t * restrict q8 = y[i].qs;
  7149. const int8_t * restrict scale = x[i].scales;
  7150. size_t vl;
  7151. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  7152. int sum_t = 0;
  7153. int is = 0;
  7154. for (int j = 0; j < QK_K/128; ++j) {
  7155. vl = 32;
  7156. // load qh
  7157. vuint8m1_t qh_x = __riscv_vle8_v_u8m1(qh, vl);
  7158. // load Q6
  7159. vuint8m1_t q6_0 = __riscv_vle8_v_u8m1(q6, vl);
  7160. vuint8m1_t q6_1 = __riscv_vle8_v_u8m1(q6+32, vl);
  7161. vuint8m1_t q6a_0 = __riscv_vand_vx_u8m1(q6_0, 0x0F, vl);
  7162. vuint8m1_t q6a_1 = __riscv_vand_vx_u8m1(q6_1, 0x0F, vl);
  7163. vuint8m1_t q6s_0 = __riscv_vsrl_vx_u8m1(q6_0, 0x04, vl);
  7164. vuint8m1_t q6s_1 = __riscv_vsrl_vx_u8m1(q6_1, 0x04, vl);
  7165. vuint8m1_t qh_0 = __riscv_vand_vx_u8m1(qh_x, 0x03, vl);
  7166. vuint8m1_t qh_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x2, vl), 0x03 , vl);
  7167. vuint8m1_t qh_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x4, vl), 0x03 , vl);
  7168. vuint8m1_t qh_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x6, vl), 0x03 , vl);
  7169. vuint8m1_t qhi_0 = __riscv_vor_vv_u8m1(q6a_0, __riscv_vsll_vx_u8m1(qh_0, 0x04, vl), vl);
  7170. vuint8m1_t qhi_1 = __riscv_vor_vv_u8m1(q6a_1, __riscv_vsll_vx_u8m1(qh_1, 0x04, vl), vl);
  7171. vuint8m1_t qhi_2 = __riscv_vor_vv_u8m1(q6s_0, __riscv_vsll_vx_u8m1(qh_2, 0x04, vl), vl);
  7172. vuint8m1_t qhi_3 = __riscv_vor_vv_u8m1(q6s_1, __riscv_vsll_vx_u8m1(qh_3, 0x04, vl), vl);
  7173. vint8m1_t a_0 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_0), 32, vl);
  7174. vint8m1_t a_1 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_1), 32, vl);
  7175. vint8m1_t a_2 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_2), 32, vl);
  7176. vint8m1_t a_3 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_3), 32, vl);
  7177. // load Q8 and take product
  7178. vint16m2_t va_q_0 = __riscv_vwmul_vv_i16m2(a_0, __riscv_vle8_v_i8m1(q8, vl), vl);
  7179. vint16m2_t va_q_1 = __riscv_vwmul_vv_i16m2(a_1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
  7180. vint16m2_t va_q_2 = __riscv_vwmul_vv_i16m2(a_2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
  7181. vint16m2_t va_q_3 = __riscv_vwmul_vv_i16m2(a_3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
  7182. vl = 16;
  7183. vint32m2_t vaux_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 0), scale[is+0], vl);
  7184. vint32m2_t vaux_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 1), scale[is+1], vl);
  7185. vint32m2_t vaux_2 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 0), scale[is+2], vl);
  7186. vint32m2_t vaux_3 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 1), scale[is+3], vl);
  7187. vint32m2_t vaux_4 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 0), scale[is+4], vl);
  7188. vint32m2_t vaux_5 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 1), scale[is+5], vl);
  7189. vint32m2_t vaux_6 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 0), scale[is+6], vl);
  7190. vint32m2_t vaux_7 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 1), scale[is+7], vl);
  7191. vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_0, vaux_1, vl), vzero, vl);
  7192. vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_2, vaux_3, vl), isum0, vl);
  7193. vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_4, vaux_5, vl), isum1, vl);
  7194. vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_6, vaux_7, vl), isum2, vl);
  7195. sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
  7196. q6 += 64; qh += 32; q8 += 128; is=8;
  7197. }
  7198. sumf += d * sum_t;
  7199. }
  7200. *s = sumf;
  7201. #elif defined(__POWER9_VECTOR__)
  7202. const vector signed char lowMask = vec_splats((signed char)0xF);
  7203. const vector unsigned char v2 = vec_splats((unsigned char)0x2);
  7204. const vector unsigned char v3 = vec_splats((unsigned char)0x3);
  7205. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  7206. const vector unsigned char v6 = vec_splats((unsigned char)0x6);
  7207. const vector signed char off = vec_splats((signed char)0x20);
  7208. vector float vsumf0 = vec_splats(0.0f);
  7209. vector float vsumf1 = vec_splats(0.0f);
  7210. vector float vsumf2 = vec_splats(0.0f);
  7211. vector float vsumf3 = vec_splats(0.0f);
  7212. for (int i = 0; i < nb; ++i) {
  7213. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  7214. vector float vyd = vec_splats(y[i].d);
  7215. vector float vd = vec_mul(vxd, vyd);
  7216. vector signed int vsumi0 = vec_splats((int32_t)0);
  7217. vector signed int vsumi1 = vec_splats((int32_t)0);
  7218. vector signed int vsumi2 = vec_splats((int32_t)0);
  7219. vector signed int vsumi3 = vec_splats((int32_t)0);
  7220. vector signed int vsumi4 = vec_splats((int32_t)0);
  7221. vector signed int vsumi5 = vec_splats((int32_t)0);
  7222. vector signed int vsumi6 = vec_splats((int32_t)0);
  7223. vector signed int vsumi7 = vec_splats((int32_t)0);
  7224. const uint8_t * restrict q6 = x[i].ql;
  7225. const uint8_t * restrict qh = x[i].qh;
  7226. const int8_t * restrict qs = x[i].scales;
  7227. const int8_t * restrict q8 = y[i].qs;
  7228. for (int j = 0; j < QK_K/128; ++j) {
  7229. __builtin_prefetch(q6, 0, 0);
  7230. __builtin_prefetch(qh, 0, 0);
  7231. __builtin_prefetch(q8, 0, 0);
  7232. vector signed char qxs0 = (vector signed char)vec_xl( 0, q6);
  7233. vector signed char qxs1 = (vector signed char)vec_xl(16, q6);
  7234. vector signed char qxs2 = (vector signed char)vec_xl(32, q6);
  7235. vector signed char qxs3 = (vector signed char)vec_xl(48, q6);
  7236. q6 += 64;
  7237. vector signed char qxs00 = vec_and(qxs0, lowMask);
  7238. vector signed char qxs01 = vec_sr(qxs0, v4);
  7239. vector signed char qxs10 = vec_and(qxs1, lowMask);
  7240. vector signed char qxs11 = vec_sr(qxs1, v4);
  7241. vector signed char qxs20 = vec_and(qxs2, lowMask);
  7242. vector signed char qxs21 = vec_sr(qxs2, v4);
  7243. vector signed char qxs30 = vec_and(qxs3, lowMask);
  7244. vector signed char qxs31 = vec_sr(qxs3, v4);
  7245. vector signed char qxhs0 = (vector signed char)vec_xl( 0, qh);
  7246. vector signed char qxhs1 = (vector signed char)vec_xl(16, qh);
  7247. qh += 32;
  7248. vector signed char qxh00 = vec_sl(vec_and((vector signed char)v3, qxhs0), v4);
  7249. vector signed char qxh01 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v4)), v4);
  7250. vector signed char qxh10 = vec_sl(vec_and((vector signed char)v3, qxhs1), v4);
  7251. vector signed char qxh11 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v4)), v4);
  7252. vector signed char qxh20 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v2)), v4);
  7253. vector signed char qxh21 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v6)), v4);
  7254. vector signed char qxh30 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v2)), v4);
  7255. vector signed char qxh31 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v6)), v4);
  7256. vector signed char q6x00 = vec_sub(vec_or(qxh00, qxs00), off);
  7257. vector signed char q6x01 = vec_sub(vec_or(qxh01, qxs01), off);
  7258. vector signed char q6x10 = vec_sub(vec_or(qxh10, qxs10), off);
  7259. vector signed char q6x11 = vec_sub(vec_or(qxh11, qxs11), off);
  7260. vector signed char q6x20 = vec_sub(vec_or(qxh20, qxs20), off);
  7261. vector signed char q6x21 = vec_sub(vec_or(qxh21, qxs21), off);
  7262. vector signed char q6x30 = vec_sub(vec_or(qxh30, qxs30), off);
  7263. vector signed char q6x31 = vec_sub(vec_or(qxh31, qxs31), off);
  7264. vector signed char q8y00 = vec_xl( 0, q8);
  7265. vector signed char q8y10 = vec_xl( 16, q8);
  7266. vector signed char q8y20 = vec_xl( 32, q8);
  7267. vector signed char q8y30 = vec_xl( 48, q8);
  7268. vector signed char q8y01 = vec_xl( 64, q8);
  7269. vector signed char q8y11 = vec_xl( 80, q8);
  7270. vector signed char q8y21 = vec_xl( 96, q8);
  7271. vector signed char q8y31 = vec_xl(112, q8);
  7272. q8 += 128;
  7273. vector signed short qv00 = vec_add(vec_mule(q6x00, q8y00), vec_mulo(q6x00, q8y00));
  7274. vector signed short qv10 = vec_add(vec_mule(q6x10, q8y10), vec_mulo(q6x10, q8y10));
  7275. vector signed short qv20 = vec_add(vec_mule(q6x20, q8y20), vec_mulo(q6x20, q8y20));
  7276. vector signed short qv30 = vec_add(vec_mule(q6x30, q8y30), vec_mulo(q6x30, q8y30));
  7277. vector signed short qv01 = vec_add(vec_mule(q6x01, q8y01), vec_mulo(q6x01, q8y01));
  7278. vector signed short qv11 = vec_add(vec_mule(q6x11, q8y11), vec_mulo(q6x11, q8y11));
  7279. vector signed short qv21 = vec_add(vec_mule(q6x21, q8y21), vec_mulo(q6x21, q8y21));
  7280. vector signed short qv31 = vec_add(vec_mule(q6x31, q8y31), vec_mulo(q6x31, q8y31));
  7281. vector signed short vscales = vec_unpackh(vec_xl_len(qs, 8));
  7282. qs += 8;
  7283. vector signed short vs0 = vec_splat(vscales, 0);
  7284. vector signed short vs1 = vec_splat(vscales, 1);
  7285. vector signed short vs2 = vec_splat(vscales, 2);
  7286. vector signed short vs3 = vec_splat(vscales, 3);
  7287. vector signed short vs4 = vec_splat(vscales, 4);
  7288. vector signed short vs5 = vec_splat(vscales, 5);
  7289. vector signed short vs6 = vec_splat(vscales, 6);
  7290. vector signed short vs7 = vec_splat(vscales, 7);
  7291. vsumi0 = vec_add(vec_mule(qv00, vs0), vsumi0);
  7292. vsumi1 = vec_add(vec_mulo(qv00, vs0), vsumi1);
  7293. vsumi2 = vec_add(vec_mule(qv01, vs4), vsumi2);
  7294. vsumi3 = vec_add(vec_mulo(qv01, vs4), vsumi3);
  7295. vsumi4 = vec_add(vec_mule(qv10, vs1), vsumi4);
  7296. vsumi5 = vec_add(vec_mulo(qv10, vs1), vsumi5);
  7297. vsumi6 = vec_add(vec_mule(qv11, vs5), vsumi6);
  7298. vsumi7 = vec_add(vec_mulo(qv11, vs5), vsumi7);
  7299. vsumi0 = vec_add(vec_mule(qv20, vs2), vsumi0);
  7300. vsumi1 = vec_add(vec_mulo(qv20, vs2), vsumi1);
  7301. vsumi2 = vec_add(vec_mule(qv21, vs6), vsumi2);
  7302. vsumi3 = vec_add(vec_mulo(qv21, vs6), vsumi3);
  7303. vsumi4 = vec_add(vec_mule(qv30, vs3), vsumi4);
  7304. vsumi5 = vec_add(vec_mulo(qv30, vs3), vsumi5);
  7305. vsumi6 = vec_add(vec_mule(qv31, vs7), vsumi6);
  7306. vsumi7 = vec_add(vec_mulo(qv31, vs7), vsumi7);
  7307. }
  7308. vsumi0 = vec_add(vsumi0, vsumi4);
  7309. vsumi1 = vec_add(vsumi1, vsumi5);
  7310. vsumi2 = vec_add(vsumi2, vsumi6);
  7311. vsumi3 = vec_add(vsumi3, vsumi7);
  7312. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  7313. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  7314. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  7315. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  7316. }
  7317. vsumf0 = vec_add(vsumf0, vsumf2);
  7318. vsumf1 = vec_add(vsumf1, vsumf3);
  7319. vsumf0 = vec_add(vsumf0, vsumf1);
  7320. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  7321. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  7322. *s = vec_extract(vsumf0, 0);
  7323. #else
  7324. int8_t aux8[QK_K];
  7325. int16_t aux16[8];
  7326. float sums [8];
  7327. int32_t aux32[8];
  7328. memset(sums, 0, 8*sizeof(float));
  7329. float sumf = 0;
  7330. for (int i = 0; i < nb; ++i) {
  7331. const uint8_t * restrict q4 = x[i].ql;
  7332. const uint8_t * restrict qh = x[i].qh;
  7333. const int8_t * restrict q8 = y[i].qs;
  7334. memset(aux32, 0, 8*sizeof(int32_t));
  7335. int8_t * restrict a = aux8;
  7336. for (int j = 0; j < QK_K; j += 128) {
  7337. for (int l = 0; l < 32; ++l) {
  7338. a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  7339. a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  7340. a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  7341. a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  7342. }
  7343. a += 128;
  7344. q4 += 64;
  7345. qh += 32;
  7346. }
  7347. a = aux8;
  7348. int is = 0;
  7349. for (int j = 0; j < QK_K/16; ++j) {
  7350. int scale = x[i].scales[is++];
  7351. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  7352. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  7353. q8 += 8; a += 8;
  7354. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  7355. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  7356. q8 += 8; a += 8;
  7357. }
  7358. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7359. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  7360. }
  7361. for (int l = 0; l < 8; ++l) sumf += sums[l];
  7362. *s = sumf;
  7363. #endif
  7364. }
  7365. #else
  7366. void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7367. assert(n % QK_K == 0);
  7368. assert(nrc == 1);
  7369. UNUSED(nrc);
  7370. UNUSED(bx);
  7371. UNUSED(by);
  7372. UNUSED(bs);
  7373. const block_q6_K * restrict x = vx;
  7374. const block_q8_K * restrict y = vy;
  7375. const int nb = n / QK_K;
  7376. #ifdef __ARM_NEON
  7377. float sum = 0;
  7378. const uint8x16_t m4b = vdupq_n_u8(0xF);
  7379. const int8x16_t m32s = vdupq_n_s8(32);
  7380. const int32x4_t vzero = vdupq_n_s32(0);
  7381. const uint8x16_t mone = vdupq_n_u8(3);
  7382. ggml_int8x16x4_t q6bytes;
  7383. ggml_uint8x16x4_t q6h;
  7384. for (int i = 0; i < nb; ++i) {
  7385. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  7386. const uint8_t * restrict q6 = x[i].ql;
  7387. const uint8_t * restrict qh = x[i].qh;
  7388. const int8_t * restrict q8 = y[i].qs;
  7389. const int8_t * restrict scale = x[i].scales;
  7390. int32_t isum = 0;
  7391. uint8x16_t qhbits = vld1q_u8(qh);
  7392. ggml_uint8x16x2_t q6bits = ggml_vld1q_u8_x2(q6);
  7393. ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
  7394. q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits), 4);
  7395. uint8x16_t shifted = vshrq_n_u8(qhbits, 2);
  7396. q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  7397. shifted = vshrq_n_u8(qhbits, 4);
  7398. q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  7399. shifted = vshrq_n_u8(qhbits, 6);
  7400. q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
  7401. q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
  7402. q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
  7403. q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[2])), m32s);
  7404. q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[3])), m32s);
  7405. isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
  7406. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
  7407. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
  7408. vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
  7409. sum += isum * d_all * y[i].d;
  7410. }
  7411. *s = sum;
  7412. #elif defined __AVX2__
  7413. const __m256i m4 = _mm256_set1_epi8(0xF);
  7414. const __m256i m2 = _mm256_set1_epi8(3);
  7415. const __m256i m32s = _mm256_set1_epi8(32);
  7416. __m256 acc = _mm256_setzero_ps();
  7417. for (int i = 0; i < nb; ++i) {
  7418. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  7419. const uint8_t * restrict q4 = x[i].ql;
  7420. const uint8_t * restrict qh = x[i].qh;
  7421. const int8_t * restrict q8 = y[i].qs;
  7422. const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
  7423. const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
  7424. const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
  7425. const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
  7426. __m256i sumi = _mm256_setzero_si256();
  7427. const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
  7428. const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
  7429. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
  7430. const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
  7431. const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4);
  7432. const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4);
  7433. const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
  7434. const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_1);
  7435. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  7436. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  7437. __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
  7438. __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
  7439. __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
  7440. __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
  7441. p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
  7442. p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
  7443. p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
  7444. p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
  7445. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
  7446. acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
  7447. }
  7448. *s = hsum_float_8(acc);
  7449. #elif defined __AVX__
  7450. const __m128i m4 = _mm_set1_epi8(0xF);
  7451. const __m128i m2 = _mm_set1_epi8(3);
  7452. const __m128i m32s = _mm_set1_epi8(32);
  7453. __m256 acc = _mm256_setzero_ps();
  7454. for (int i = 0; i < nb; ++i) {
  7455. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  7456. const uint8_t * restrict q4 = x[i].ql;
  7457. const uint8_t * restrict qh = x[i].qh;
  7458. const int8_t * restrict q8 = y[i].qs;
  7459. const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
  7460. const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
  7461. const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
  7462. const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
  7463. __m128i sumi_0 = _mm_setzero_si128();
  7464. __m128i sumi_1 = _mm_setzero_si128();
  7465. const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
  7466. const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
  7467. const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
  7468. const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
  7469. const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH, m2), 4);
  7470. const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 2), m2), 4);
  7471. const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 4), m2), 4);
  7472. const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 6), m2), 4);
  7473. const __m128i q4_0 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 0), m4), q4h_0);
  7474. const __m128i q4_1 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 1), m4), q4h_1);
  7475. const __m128i q4_2 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 0), 4), m4), q4h_2);
  7476. const __m128i q4_3 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 1), 4), m4), q4h_3);
  7477. const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
  7478. const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
  7479. __m128i q8s_0 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 0));
  7480. __m128i q8s_1 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 1));
  7481. __m128i q8s_2 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 0));
  7482. __m128i q8s_3 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 1));
  7483. __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
  7484. __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
  7485. __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
  7486. __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
  7487. p16_0 = _mm_sub_epi16(p16_0, q8s_0);
  7488. p16_1 = _mm_sub_epi16(p16_1, q8s_1);
  7489. p16_2 = _mm_sub_epi16(p16_2, q8s_2);
  7490. p16_3 = _mm_sub_epi16(p16_3, q8s_3);
  7491. p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
  7492. p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
  7493. p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
  7494. p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
  7495. sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
  7496. sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
  7497. acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi_1, sumi_0))), acc);
  7498. }
  7499. *s = hsum_float_8(acc);
  7500. #elif defined __riscv_v_intrinsic
  7501. float sumf = 0;
  7502. for (int i = 0; i < nb; ++i) {
  7503. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  7504. const uint8_t * restrict q6 = x[i].ql;
  7505. const uint8_t * restrict qh = x[i].qh;
  7506. const int8_t * restrict q8 = y[i].qs;
  7507. const int8_t * restrict scale = x[i].scales;
  7508. int32_t isum = 0;
  7509. size_t vl = 16;
  7510. vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
  7511. // load Q6
  7512. vuint8mf2_t q6_0 = __riscv_vle8_v_u8mf2(q6, vl);
  7513. vuint8mf2_t q6_1 = __riscv_vle8_v_u8mf2(q6+16, vl);
  7514. // load qh
  7515. vuint8mf2_t qh_x = __riscv_vle8_v_u8mf2(qh, vl);
  7516. vuint8mf2_t qh0 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  7517. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  7518. vuint8mf2_t qh1 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  7519. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  7520. vuint8mf2_t qh2 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  7521. qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
  7522. vuint8mf2_t qh3 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
  7523. vuint8mf2_t q6h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_0, 0xF, vl), qh0, vl);
  7524. vuint8mf2_t q6h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_1, 0xF, vl), qh1, vl);
  7525. vuint8mf2_t q6h_2 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_0, 0x4, vl), qh2, vl);
  7526. vuint8mf2_t q6h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_1, 0x4, vl), qh3, vl);
  7527. vint8mf2_t q6v_0 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_0), 32, vl);
  7528. vint8mf2_t q6v_1 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_1), 32, vl);
  7529. vint8mf2_t q6v_2 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_2), 32, vl);
  7530. vint8mf2_t q6v_3 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_3), 32, vl);
  7531. // load Q8 and take product
  7532. vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q6v_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
  7533. vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q6v_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
  7534. vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q6v_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
  7535. vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q6v_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
  7536. vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
  7537. vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
  7538. vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
  7539. vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
  7540. isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scale[0];
  7541. isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scale[1];
  7542. isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scale[2];
  7543. isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scale[3];
  7544. sumf += isum * d_all * y[i].d;
  7545. }
  7546. *s = sumf;
  7547. #elif defined(__POWER9_VECTOR__)
  7548. const vector signed char lowMask = vec_splats((signed char)0xF);
  7549. const vector unsigned char v2 = vec_splats((unsigned char)0x2);
  7550. const vector unsigned char v3 = vec_splats((unsigned char)0x3);
  7551. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  7552. const vector unsigned char v6 = vec_splats((unsigned char)0x6);
  7553. const vector signed char off = vec_splats((signed char)0x20);
  7554. vector float vsumf0 = vec_splats(0.0f);
  7555. vector float vsumf1 = vec_splats(0.0f);
  7556. vector float vsumf2 = vec_splats(0.0f);
  7557. vector float vsumf3 = vec_splats(0.0f);
  7558. #pragma GCC unroll 2
  7559. for (int i = 0; i < nb; ++i) {
  7560. __builtin_prefetch(x[i].ql, 0, 1);
  7561. __builtin_prefetch(x[i].qh, 0, 1);
  7562. __builtin_prefetch(y[i].qs, 0, 1);
  7563. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  7564. vector float vyd = vec_splats(y[i].d);
  7565. vector float vd= vec_mul(vxd, vyd);
  7566. vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].ql);
  7567. vector signed char qxs1 = (vector signed char)vec_xl(16, x[i].ql);
  7568. vector signed char qxs00 = vec_and(qxs0, lowMask);
  7569. vector signed char qxs01 = vec_sr(qxs0, v4);
  7570. vector signed char qxs10 = vec_and(qxs1, lowMask);
  7571. vector signed char qxs11 = vec_sr(qxs1, v4);
  7572. vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].qh);
  7573. vector signed char qxh00 = vec_sl(vec_and((vector signed char)v3, qxhs0), v4);
  7574. vector signed char qxh01 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v4)), v4);
  7575. vector signed char qxh10 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v2)), v4);
  7576. vector signed char qxh11 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v6)), v4);
  7577. vector signed char q6x00 = vec_sub(vec_or(qxh00, qxs00), off);
  7578. vector signed char q6x01 = vec_sub(vec_or(qxh01, qxs01), off);
  7579. vector signed char q6x10 = vec_sub(vec_or(qxh10, qxs10), off);
  7580. vector signed char q6x11 = vec_sub(vec_or(qxh11, qxs11), off);
  7581. vector signed char q8y00 = vec_xl( 0, y[i].qs);
  7582. vector signed char q8y10 = vec_xl(16, y[i].qs);
  7583. vector signed char q8y01 = vec_xl(32, y[i].qs);
  7584. vector signed char q8y11 = vec_xl(48, y[i].qs);
  7585. vector signed short qv00 = vec_add(vec_mule(q6x00, q8y00), vec_mulo(q6x00, q8y00));
  7586. vector signed short qv10 = vec_add(vec_mule(q6x10, q8y10), vec_mulo(q6x10, q8y10));
  7587. vector signed short qv01 = vec_add(vec_mule(q6x01, q8y01), vec_mulo(q6x01, q8y01));
  7588. vector signed short qv11 = vec_add(vec_mule(q6x11, q8y11), vec_mulo(q6x11, q8y11));
  7589. vector signed short vs = (vector signed short)vec_unpackh(vec_xl_len(x[i].scales, 4));
  7590. vector signed short vs0 = vec_splat(vs, 0);
  7591. vector signed short vs1 = vec_splat(vs, 1);
  7592. vector signed short vs2 = vec_splat(vs, 2);
  7593. vector signed short vs3 = vec_splat(vs, 3);
  7594. vector signed int vsumi0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0));
  7595. vector signed int vsumi1 = vec_add(vec_mule(qv10, vs1), vec_mulo(qv10, vs1));
  7596. vector signed int vsumi2 = vec_add(vec_mule(qv01, vs2), vec_mulo(qv01, vs2));
  7597. vector signed int vsumi3 = vec_add(vec_mule(qv11, vs3), vec_mulo(qv11, vs3));
  7598. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  7599. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  7600. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  7601. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  7602. }
  7603. vsumf0 = vec_add(vsumf0, vsumf2);
  7604. vsumf1 = vec_add(vsumf1, vsumf3);
  7605. vsumf0 = vec_add(vsumf0, vsumf1);
  7606. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  7607. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  7608. *s = vec_extract(vsumf0, 0);
  7609. #else
  7610. int8_t aux8[QK_K];
  7611. int16_t aux16[8];
  7612. float sums [8];
  7613. int32_t aux32[8];
  7614. memset(sums, 0, 8*sizeof(float));
  7615. float sumf = 0;
  7616. for (int i = 0; i < nb; ++i) {
  7617. const uint8_t * restrict q4 = x[i].ql;
  7618. const uint8_t * restrict qh = x[i].qh;
  7619. const int8_t * restrict q8 = y[i].qs;
  7620. memset(aux32, 0, 8*sizeof(int32_t));
  7621. int8_t * restrict a = aux8;
  7622. for (int l = 0; l < 16; ++l) {
  7623. a[l+ 0] = (int8_t)((q4[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  7624. a[l+16] = (int8_t)((q4[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  7625. a[l+32] = (int8_t)((q4[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  7626. a[l+48] = (int8_t)((q4[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  7627. }
  7628. int is = 0;
  7629. for (int j = 0; j < QK_K/16; ++j) {
  7630. int scale = x[i].scales[is++];
  7631. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  7632. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  7633. q8 += 8; a += 8;
  7634. for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
  7635. for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
  7636. q8 += 8; a += 8;
  7637. }
  7638. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7639. for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
  7640. }
  7641. for (int l = 0; l < 8; ++l) sumf += sums[l];
  7642. *s = sumf;
  7643. #endif
  7644. }
  7645. #endif
  7646. #if defined (__AVX2__) || defined (__ARM_NEON) || defined (__POWER9_VECTOR__)
  7647. static const int8_t keven_signs_q2xs[1024] = {
  7648. 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1,
  7649. 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1,
  7650. 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1,
  7651. 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1,
  7652. 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1,
  7653. 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1,
  7654. 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1,
  7655. 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1,
  7656. 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1,
  7657. 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1,
  7658. 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1,
  7659. 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1,
  7660. 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1,
  7661. 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1,
  7662. 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1,
  7663. 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1,
  7664. 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1,
  7665. 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1,
  7666. 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1,
  7667. 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1,
  7668. 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1,
  7669. 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1,
  7670. 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1,
  7671. 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1,
  7672. 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1,
  7673. 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
  7674. 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,
  7675. 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1,
  7676. 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1,
  7677. 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1,
  7678. 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,
  7679. 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1,
  7680. };
  7681. #endif
  7682. void ggml_vec_dot_iq2_xxs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7683. assert(n % QK_K == 0);
  7684. assert(nrc == 1);
  7685. UNUSED(nrc);
  7686. UNUSED(bx);
  7687. UNUSED(by);
  7688. UNUSED(bs);
  7689. const block_iq2_xxs * restrict x = vx;
  7690. const block_q8_K * restrict y = vy;
  7691. const int nb = n / QK_K;
  7692. #if defined(__ARM_NEON)
  7693. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  7694. uint32_t aux32[4];
  7695. const uint8_t * aux8 = (const uint8_t *)aux32;
  7696. ggml_int8x16x4_t q2u;
  7697. ggml_int8x16x4_t q2s;
  7698. ggml_int8x16x4_t q8b;
  7699. float sumf = 0;
  7700. for (int i = 0; i < nb; ++i) {
  7701. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7702. const uint16_t * restrict q2 = x[i].qs;
  7703. const int8_t * restrict q8 = y[i].qs;
  7704. float sumf1 = 0, sumf2 = 0;
  7705. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7706. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  7707. memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8;
  7708. q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 0])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 1])));
  7709. q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 2])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 3])));
  7710. q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 8])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 9])));
  7711. q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[10])), vld1_s8((const void *)(iq2xxs_grid + aux8[11])));
  7712. q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127))));
  7713. q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127))));
  7714. q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 7) & 127))));
  7715. q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 21) & 127))));
  7716. q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]);
  7717. q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]);
  7718. q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]);
  7719. q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]);
  7720. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]);
  7721. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]);
  7722. sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28));
  7723. sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28));
  7724. }
  7725. sumf += d*(sumf1 + sumf2);
  7726. }
  7727. *s = 0.25f * sumf;
  7728. #elif defined(__AVX2__)
  7729. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  7730. uint32_t aux32[4];
  7731. const uint8_t * aux8 = (const uint8_t *)aux32;
  7732. __m256 accumf = _mm256_setzero_ps();
  7733. for (int i = 0; i < nb; ++i) {
  7734. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7735. const uint16_t * restrict q2 = x[i].qs;
  7736. const int8_t * restrict q8 = y[i].qs;
  7737. __m256i sumi1 = _mm256_setzero_si256();
  7738. __m256i sumi2 = _mm256_setzero_si256();
  7739. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  7740. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7741. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  7742. memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8;
  7743. const __m256i q2_1 = _mm256_set_epi64x(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]);
  7744. const __m256i q2_2 = _mm256_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]);
  7745. const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127],
  7746. signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]);
  7747. const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127],
  7748. signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]);
  7749. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1);
  7750. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2);
  7751. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  7752. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  7753. const uint16_t ls1 = aux32[1] >> 28;
  7754. const uint16_t ls2 = aux32[3] >> 28;
  7755. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
  7756. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
  7757. sumi1 = _mm256_add_epi32(sumi1, p1);
  7758. sumi2 = _mm256_add_epi32(sumi2, p2);
  7759. }
  7760. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  7761. }
  7762. *s = 0.125f * hsum_float_8(accumf);
  7763. #elif defined(__POWER9_VECTOR__)
  7764. vector float vsumf0 = vec_splats(0.0f);
  7765. vector float vsumf1 = vec_splats(0.0f);
  7766. vector float vsumf2 = vec_splats(0.0f);
  7767. vector float vsumf3 = vec_splats(0.0f);
  7768. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  7769. for (int i = 0; i < nb; ++i) {
  7770. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  7771. vector float vyd = vec_splats(y[i].d);
  7772. vector float vd = vec_mul(vxd, vyd);
  7773. vector signed int vsumi0 = vec_splats((int32_t)0);
  7774. vector signed int vsumi1 = vec_splats((int32_t)0);
  7775. vector signed int vsumi2 = vec_splats((int32_t)0);
  7776. vector signed int vsumi3 = vec_splats((int32_t)0);
  7777. vector signed int vsumi4 = vec_splats((int32_t)0);
  7778. vector signed int vsumi5 = vec_splats((int32_t)0);
  7779. vector signed int vsumi6 = vec_splats((int32_t)0);
  7780. vector signed int vsumi7 = vec_splats((int32_t)0);
  7781. const uint16_t * restrict q2 = x[i].qs;
  7782. const int8_t * restrict q8 = y[i].qs;
  7783. for (int j = 0; j < QK_K/32; j += 2) {
  7784. __builtin_prefetch(q2, 0, 1);
  7785. __builtin_prefetch(q8, 0, 1);
  7786. uint32_t aux32[4];
  7787. const uint8_t * aux8 = (const uint8_t *)aux32;
  7788. memcpy(aux32, q2, 4*sizeof(uint32_t));
  7789. q2 += 8;
  7790. vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1])};
  7791. vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3])};
  7792. vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9])};
  7793. vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11])};
  7794. vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127))};
  7795. vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127))};
  7796. vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127))};
  7797. vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127))};
  7798. vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0);
  7799. vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1);
  7800. vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2);
  7801. vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3);
  7802. vector signed char q8y0 = vec_xl( 0, q8);
  7803. vector signed char q8y1 = vec_xl(16, q8);
  7804. vector signed char q8y2 = vec_xl(32, q8);
  7805. vector signed char q8y3 = vec_xl(48, q8);
  7806. q8 += 64;
  7807. vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0));
  7808. vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1));
  7809. vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2));
  7810. vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3));
  7811. const uint16_t ls0 = aux32[1] >> 28;
  7812. const uint16_t ls1 = aux32[3] >> 28;
  7813. vector signed short vscales01 = vec_splats((int16_t)(2*ls0+1));
  7814. vector signed short vscales23 = vec_splats((int16_t)(2*ls1+1));
  7815. vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0);
  7816. vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1);
  7817. vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2);
  7818. vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3);
  7819. vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4);
  7820. vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5);
  7821. vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6);
  7822. vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7);
  7823. }
  7824. vsumi0 = vec_add(vsumi0, vsumi4);
  7825. vsumi1 = vec_add(vsumi1, vsumi5);
  7826. vsumi2 = vec_add(vsumi2, vsumi6);
  7827. vsumi3 = vec_add(vsumi3, vsumi7);
  7828. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  7829. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  7830. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  7831. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  7832. }
  7833. vsumf0 = vec_add(vsumf0, vsumf2);
  7834. vsumf1 = vec_add(vsumf1, vsumf3);
  7835. vsumf0 = vec_add(vsumf0, vsumf1);
  7836. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  7837. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  7838. *s = 0.125f * vec_extract(vsumf0, 0);
  7839. #else
  7840. uint32_t aux32[2];
  7841. const uint8_t * aux8 = (const uint8_t *)aux32;
  7842. float sumf = 0.f;
  7843. for (int i = 0; i < nb; ++i) {
  7844. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7845. const uint16_t * restrict q2 = x[i].qs;
  7846. const int8_t * restrict q8 = y[i].qs;
  7847. int32_t bsum = 0;
  7848. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  7849. memcpy(aux32, q2, 2*sizeof(uint32_t));
  7850. q2 += 4;
  7851. const uint32_t ls = 2*(aux32[1] >> 28) + 1;
  7852. int32_t sumi = 0;
  7853. for (int l = 0; l < 4; ++l) {
  7854. const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
  7855. const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
  7856. for (int j = 0; j < 8; ++j) {
  7857. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  7858. }
  7859. q8 += 8;
  7860. }
  7861. bsum += sumi * ls;
  7862. }
  7863. sumf += d * bsum;
  7864. }
  7865. *s = 0.125f * sumf;
  7866. #endif
  7867. }
  7868. void ggml_vec_dot_iq2_xs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  7869. assert(n % QK_K == 0);
  7870. assert(nrc == 1);
  7871. UNUSED(nrc);
  7872. UNUSED(bx);
  7873. UNUSED(by);
  7874. UNUSED(bs);
  7875. const block_iq2_xs * restrict x = vx;
  7876. const block_q8_K * restrict y = vy;
  7877. const int nb = n / QK_K;
  7878. #if defined(__ARM_NEON)
  7879. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  7880. ggml_int8x16x4_t q2u;
  7881. ggml_int8x16x4_t q2s;
  7882. ggml_int8x16x4_t q8b;
  7883. int32x4x4_t scales32;
  7884. float sumf = 0;
  7885. for (int i = 0; i < nb; ++i) {
  7886. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7887. const uint16_t * restrict q2 = x[i].qs;
  7888. const int8_t * restrict q8 = y[i].qs;
  7889. const uint8x8_t scales8 = vld1_u8(x[i].scales);
  7890. const uint8x8_t scales_l = vand_u8(scales8, vdup_n_u8(0xf));
  7891. const uint8x8_t scales_h = vshr_n_u8(scales8, 4);
  7892. uint8x16_t scales = vcombine_u8(vzip1_u8(scales_l, scales_h), vzip2_u8(scales_l, scales_h));
  7893. scales = vaddq_u8(vshlq_n_u8(scales, 1), vdupq_n_u8(1));
  7894. const uint16x8_t scales1 = vmovl_u8(vget_low_u8(scales));
  7895. const uint16x8_t scales2 = vmovl_u8(vget_high_u8(scales));
  7896. scales32.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales1)));
  7897. scales32.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales1)));
  7898. scales32.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales2)));
  7899. scales32.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales2)));
  7900. int32x4_t sumi = vdupq_n_s32(0);
  7901. for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
  7902. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  7903. q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[0] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[1] & 511))));
  7904. q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[2] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[3] & 511))));
  7905. q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[4] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[5] & 511))));
  7906. q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[6] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[7] & 511))));
  7907. q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[0] >> 9))), vld1_s8((const void *)(signs64 + (q2[1] >> 9))));
  7908. q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[2] >> 9))), vld1_s8((const void *)(signs64 + (q2[3] >> 9))));
  7909. q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[4] >> 9))), vld1_s8((const void *)(signs64 + (q2[5] >> 9))));
  7910. q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[6] >> 9))), vld1_s8((const void *)(signs64 + (q2[7] >> 9))));
  7911. q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]);
  7912. q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]);
  7913. q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]);
  7914. q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]);
  7915. const int32x4_t p1 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]);
  7916. const int32x4_t p2 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[1], q8b.val[1]);
  7917. const int32x4_t p3 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]);
  7918. const int32x4_t p4 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[3], q8b.val[3]);
  7919. const int32x4_t p = vpaddq_s32(vpaddq_s32(p1, p2), vpaddq_s32(p3, p4));
  7920. sumi = vmlaq_s32(sumi, p, scales32.val[ib64]);
  7921. q2 += 8;
  7922. }
  7923. sumf += d*vaddvq_s32(sumi);
  7924. }
  7925. *s = 0.125f * sumf;
  7926. #elif defined(__AVX2__)
  7927. const __m256i mone = _mm256_set1_epi8(1);
  7928. static const char block_sign_shuffle_mask_1[32] = {
  7929. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
  7930. 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
  7931. };
  7932. static const char block_sign_shuffle_mask_2[32] = {
  7933. 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
  7934. 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
  7935. };
  7936. static const uint8_t bit_selector_mask_bytes[32] = {
  7937. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7938. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  7939. };
  7940. const __m256i bit_selector_mask = _mm256_loadu_si256((const __m256i*)bit_selector_mask_bytes);
  7941. const __m256i block_sign_shuffle_1 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_1);
  7942. const __m256i block_sign_shuffle_2 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_2);
  7943. #if QK_K == 64
  7944. static const uint8_t k_bit_helper[16] = {
  7945. 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
  7946. };
  7947. const __m128i bit_helper = _mm_loadu_si128((const __m128i*)k_bit_helper);
  7948. const __m128i m511 = _mm_set1_epi16(511);
  7949. typedef union {
  7950. __m128i vec_index;
  7951. uint16_t index[8];
  7952. } index_t;
  7953. index_t idx;
  7954. __m256 accumf = _mm256_setzero_ps();
  7955. for (int i = 0; i < nb; ++i) {
  7956. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  7957. const __m128i q2_data = _mm_loadu_si128((const __m128i*)x[i].qs);
  7958. idx.vec_index = _mm_and_si128(q2_data, m511);
  7959. const __m128i partial_sign_bits = _mm_srli_epi16(q2_data, 9);
  7960. const __m128i partial_sign_bits_upper = _mm_srli_epi16(q2_data, 13);
  7961. const __m128i partial_sign_bits_for_counting = _mm_xor_si128(partial_sign_bits, partial_sign_bits_upper);
  7962. const __m128i odd_bits = _mm_shuffle_epi8(bit_helper, partial_sign_bits_for_counting);
  7963. const __m128i full_sign_bits = _mm_or_si128(partial_sign_bits, odd_bits);
  7964. const __m256i full_signs = MM256_SET_M128I(full_sign_bits, full_sign_bits);
  7965. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)y[i].qs);
  7966. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)(y[i].qs+32));
  7967. const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[idx.index[3]], iq2xs_grid[idx.index[2]],
  7968. iq2xs_grid[idx.index[1]], iq2xs_grid[idx.index[0]]);
  7969. const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[idx.index[7]], iq2xs_grid[idx.index[6]],
  7970. iq2xs_grid[idx.index[5]], iq2xs_grid[idx.index[4]]);
  7971. __m256i signs;
  7972. signs = _mm256_shuffle_epi8(full_signs, block_sign_shuffle_1);
  7973. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7974. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone));
  7975. signs = _mm256_shuffle_epi8(full_signs, block_sign_shuffle_2);
  7976. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  7977. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone));
  7978. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  7979. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  7980. const __m256i sc1 = MM256_SET_M128I(_mm_set1_epi16(2*(x[i].scales[0] >> 4)+1), _mm_set1_epi16(2*(x[i].scales[0] & 0xf)+1));
  7981. const __m256i sc2 = MM256_SET_M128I(_mm_set1_epi16(2*(x[i].scales[1] >> 4)+1), _mm_set1_epi16(2*(x[i].scales[1] & 0xf)+1));
  7982. const __m256i sum = _mm256_add_epi32(_mm256_madd_epi16(sc1, dot1), _mm256_madd_epi16(sc2, dot2));
  7983. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sum), accumf);
  7984. }
  7985. *s = 0.125f * hsum_float_8(accumf);
  7986. #else
  7987. static const uint8_t k_bit_helper[32] = {
  7988. 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
  7989. 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00,
  7990. };
  7991. const __m256i bit_helper = _mm256_loadu_si256((const __m256i*)k_bit_helper);
  7992. const __m256i m511 = _mm256_set1_epi16(511);
  7993. const __m128i m4 = _mm_set1_epi8(0xf);
  7994. const __m128i m1 = _mm_set1_epi8(1);
  7995. uint64_t aux64;
  7996. // somewhat hacky, but gives a significant boost in performance
  7997. __m256i aux_gindex;
  7998. const uint16_t * gindex = (const uint16_t *)&aux_gindex;
  7999. __m256 accumf = _mm256_setzero_ps();
  8000. for (int i = 0; i < nb; ++i) {
  8001. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8002. const uint16_t * restrict q2 = x[i].qs;
  8003. const int8_t * restrict q8 = y[i].qs;
  8004. memcpy(&aux64, x[i].scales, 8);
  8005. __m128i stmp = _mm_set1_epi64x(aux64);
  8006. stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4));
  8007. const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1);
  8008. __m256i sumi1 = _mm256_setzero_si256();
  8009. __m256i sumi2 = _mm256_setzero_si256();
  8010. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) {
  8011. const __m256i q2_data = _mm256_loadu_si256((const __m256i*)q2); q2 += 16;
  8012. aux_gindex = _mm256_and_si256(q2_data, m511);
  8013. const __m256i partial_sign_bits = _mm256_srli_epi16(q2_data, 9);
  8014. const __m256i partial_sign_bits_upper = _mm256_srli_epi16(q2_data, 13);
  8015. const __m256i partial_sign_bits_for_counting = _mm256_xor_si256(partial_sign_bits, partial_sign_bits_upper);
  8016. const __m256i odd_bits = _mm256_shuffle_epi8(bit_helper, partial_sign_bits_for_counting);
  8017. const __m256i full_sign_bits = _mm256_or_si256(partial_sign_bits, odd_bits);
  8018. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8019. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8020. const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8021. const __m256i q8_4 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8022. const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]],
  8023. iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]);
  8024. const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]],
  8025. iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]);
  8026. const __m256i q2_3 = _mm256_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]],
  8027. iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]);
  8028. const __m256i q2_4 = _mm256_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]],
  8029. iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]);
  8030. const __m128i full_signs_l = _mm256_castsi256_si128(full_sign_bits);
  8031. const __m128i full_signs_h = _mm256_extractf128_si256(full_sign_bits, 1);
  8032. const __m256i full_signs_1 = MM256_SET_M128I(full_signs_l, full_signs_l);
  8033. const __m256i full_signs_2 = MM256_SET_M128I(full_signs_h, full_signs_h);
  8034. __m256i signs;
  8035. signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_1);
  8036. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  8037. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone));
  8038. signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_2);
  8039. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  8040. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone));
  8041. signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_1);
  8042. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  8043. const __m256i q8s_3 = _mm256_sign_epi8(q8_3, _mm256_or_si256(signs, mone));
  8044. signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_2);
  8045. signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask);
  8046. const __m256i q8s_4 = _mm256_sign_epi8(q8_4, _mm256_or_si256(signs, mone));
  8047. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  8048. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  8049. const __m256i dot3 = _mm256_maddubs_epi16(q2_3, q8s_3);
  8050. const __m256i dot4 = _mm256_maddubs_epi16(q2_4, q8s_4);
  8051. const __m256i sc1 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0)));
  8052. const __m256i sc2 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1)));
  8053. const __m256i sc3 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2)));
  8054. const __m256i sc4 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3)));
  8055. sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot1, sc1));
  8056. sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot2, sc2));
  8057. sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot3, sc3));
  8058. sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot4, sc4));
  8059. }
  8060. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  8061. }
  8062. *s = 0.125f * hsum_float_8(accumf);
  8063. #endif
  8064. #elif defined(__POWER9_VECTOR__)
  8065. vector float vsumf0 = vec_splats(0.0f);
  8066. vector float vsumf1 = vec_splats(0.0f);
  8067. vector float vsumf2 = vec_splats(0.0f);
  8068. vector float vsumf3 = vec_splats(0.0f);
  8069. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  8070. for (int i = 0; i < nb; ++i) {
  8071. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  8072. vector float vyd = vec_splats(y[i].d);
  8073. vector float vd = vec_mul(vxd, vyd);
  8074. vector signed int vsumi0 = vec_splats((int32_t)0);
  8075. vector signed int vsumi1 = vec_splats((int32_t)0);
  8076. vector signed int vsumi2 = vec_splats((int32_t)0);
  8077. vector signed int vsumi3 = vec_splats((int32_t)0);
  8078. vector signed int vsumi4 = vec_splats((int32_t)0);
  8079. vector signed int vsumi5 = vec_splats((int32_t)0);
  8080. vector signed int vsumi6 = vec_splats((int32_t)0);
  8081. vector signed int vsumi7 = vec_splats((int32_t)0);
  8082. const uint16_t * restrict q2 = x[i].qs;
  8083. const uint8_t * restrict sc = x[i].scales;
  8084. const int8_t * restrict q8 = y[i].qs;
  8085. for (int j = 0; j < QK_K/64; ++j) {
  8086. __builtin_prefetch(q2, 0, 1);
  8087. __builtin_prefetch(q8, 0, 1);
  8088. vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xs_grid + (q2[0] & 511)), *(const int64_t *)(iq2xs_grid + (q2[1] & 511))};
  8089. vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xs_grid + (q2[2] & 511)), *(const int64_t *)(iq2xs_grid + (q2[3] & 511))};
  8090. vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xs_grid + (q2[4] & 511)), *(const int64_t *)(iq2xs_grid + (q2[5] & 511))};
  8091. vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xs_grid + (q2[6] & 511)), *(const int64_t *)(iq2xs_grid + (q2[7] & 511))};
  8092. vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((q2[0] >> 9))), *(const int64_t *)(signs64 + ((q2[1] >> 9)))};
  8093. vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((q2[2] >> 9))), *(const int64_t *)(signs64 + ((q2[3] >> 9)))};
  8094. vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((q2[4] >> 9))), *(const int64_t *)(signs64 + ((q2[5] >> 9)))};
  8095. vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((q2[6] >> 9))), *(const int64_t *)(signs64 + ((q2[7] >> 9)))};
  8096. q2 += 8;
  8097. vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0);
  8098. vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1);
  8099. vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2);
  8100. vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3);
  8101. vector signed char q8y0 = vec_xl( 0, q8);
  8102. vector signed char q8y1 = vec_xl(16, q8);
  8103. vector signed char q8y2 = vec_xl(32, q8);
  8104. vector signed char q8y3 = vec_xl(48, q8);
  8105. q8 += 64;
  8106. vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0));
  8107. vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1));
  8108. vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2));
  8109. vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3));
  8110. const uint16_t ls0 = (uint16_t)(sc[0] & 0xf);
  8111. const uint16_t ls1 = (uint16_t)(sc[0] >> 4);
  8112. const uint16_t ls2 = (uint16_t)(sc[1] & 0xf);
  8113. const uint16_t ls3 = (uint16_t)(sc[1] >> 4);
  8114. sc += 2;
  8115. vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1));
  8116. vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1));
  8117. vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1));
  8118. vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1));
  8119. vsumi0 = vec_add(vec_mule(qv0, vscales0), vsumi0);
  8120. vsumi1 = vec_add(vec_mule(qv1, vscales1), vsumi1);
  8121. vsumi2 = vec_add(vec_mule(qv2, vscales2), vsumi2);
  8122. vsumi3 = vec_add(vec_mule(qv3, vscales3), vsumi3);
  8123. vsumi4 = vec_add(vec_mulo(qv0, vscales0), vsumi4);
  8124. vsumi5 = vec_add(vec_mulo(qv1, vscales1), vsumi5);
  8125. vsumi6 = vec_add(vec_mulo(qv2, vscales2), vsumi6);
  8126. vsumi7 = vec_add(vec_mulo(qv3, vscales3), vsumi7);
  8127. }
  8128. vsumi0 = vec_add(vsumi0, vsumi4);
  8129. vsumi1 = vec_add(vsumi1, vsumi5);
  8130. vsumi2 = vec_add(vsumi2, vsumi6);
  8131. vsumi3 = vec_add(vsumi3, vsumi7);
  8132. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  8133. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  8134. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  8135. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  8136. }
  8137. vsumf0 = vec_add(vsumf0, vsumf2);
  8138. vsumf1 = vec_add(vsumf1, vsumf3);
  8139. vsumf0 = vec_add(vsumf0, vsumf1);
  8140. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  8141. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  8142. *s = 0.125f * vec_extract(vsumf0, 0);
  8143. #else
  8144. float sumf = 0.f;
  8145. for (int i = 0; i < nb; ++i) {
  8146. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8147. const uint16_t * restrict q2 = x[i].qs;
  8148. const uint8_t * restrict sc = x[i].scales;
  8149. const int8_t * restrict q8 = y[i].qs;
  8150. int32_t bsum = 0;
  8151. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  8152. const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1;
  8153. const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1;
  8154. int32_t sumi = 0;
  8155. for (int l = 0; l < 2; ++l) {
  8156. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
  8157. const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
  8158. for (int j = 0; j < 8; ++j) {
  8159. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  8160. }
  8161. q8 += 8;
  8162. }
  8163. bsum += sumi * ls1;
  8164. sumi = 0;
  8165. for (int l = 2; l < 4; ++l) {
  8166. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
  8167. const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
  8168. for (int j = 0; j < 8; ++j) {
  8169. sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
  8170. }
  8171. q8 += 8;
  8172. }
  8173. bsum += sumi * ls2;
  8174. q2 += 4;
  8175. }
  8176. sumf += d * bsum;
  8177. }
  8178. *s = 0.125f * sumf;
  8179. #endif
  8180. }
  8181. void ggml_vec_dot_iq2_s_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  8182. assert(n % QK_K == 0);
  8183. assert(nrc == 1);
  8184. UNUSED(nrc);
  8185. UNUSED(bx);
  8186. UNUSED(by);
  8187. UNUSED(bs);
  8188. const block_iq2_s * restrict x = vx;
  8189. const block_q8_K * restrict y = vy;
  8190. const int nb = n / QK_K;
  8191. #if defined(__ARM_NEON)
  8192. static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
  8193. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
  8194. };
  8195. static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,};
  8196. const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1);
  8197. const uint8x16_t mask2 = vld1q_u8(k_mask2);
  8198. const uint8x16_t m1 = vdupq_n_u8(1);
  8199. const int32x4_t vzero = vdupq_n_s32(0);
  8200. uint8x16x2_t vs;
  8201. ggml_int8x16x4_t q2s;
  8202. ggml_int8x16x4_t q8b;
  8203. float sumf = 0;
  8204. for (int i = 0; i < nb; ++i) {
  8205. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8206. const uint8_t * restrict qs = x[i].qs;
  8207. const uint8_t * restrict qh = x[i].qh;
  8208. const uint16_t * restrict signs = (const uint16_t *)(x[i].qs + QK_K/8);
  8209. const int8_t * restrict q8 = y[i].qs;
  8210. int sumi1 = 0, sumi2 = 0;
  8211. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  8212. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  8213. q2s.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[0] | ((qh[ib32+0] << 8) & 0x300)))),
  8214. vld1_s8((const int8_t *)(iq2s_grid + (qs[1] | ((qh[ib32+0] << 6) & 0x300)))));
  8215. q2s.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[2] | ((qh[ib32+0] << 4) & 0x300)))),
  8216. vld1_s8((const int8_t *)(iq2s_grid + (qs[3] | ((qh[ib32+0] << 2) & 0x300)))));
  8217. q2s.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[4] | ((qh[ib32+1] << 8) & 0x300)))),
  8218. vld1_s8((const int8_t *)(iq2s_grid + (qs[5] | ((qh[ib32+1] << 6) & 0x300)))));
  8219. q2s.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[6] | ((qh[ib32+1] << 4) & 0x300)))),
  8220. vld1_s8((const int8_t *)(iq2s_grid + (qs[7] | ((qh[ib32+1] << 2) & 0x300)))));
  8221. qs += 8;
  8222. vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16)));
  8223. vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
  8224. vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
  8225. vs.val[0] = vceqq_u8(vs.val[0], mask2);
  8226. vs.val[1] = vceqq_u8(vs.val[1], mask2);
  8227. q2s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[0]);
  8228. q2s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[1]);
  8229. vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16)));
  8230. vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
  8231. vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
  8232. vs.val[0] = vceqq_u8(vs.val[0], mask2);
  8233. vs.val[1] = vceqq_u8(vs.val[1], mask2);
  8234. signs += 4;
  8235. q2s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[2]);
  8236. q2s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[3]);
  8237. const int32x4_t p1 = ggml_vdotq_s32(vzero, q2s.val[0], q8b.val[0]);
  8238. const int32x4_t p2 = ggml_vdotq_s32(vzero, q2s.val[1], q8b.val[1]);
  8239. const int32x4_t p3 = ggml_vdotq_s32(vzero, q2s.val[2], q8b.val[2]);
  8240. const int32x4_t p4 = ggml_vdotq_s32(vzero, q2s.val[3], q8b.val[3]);
  8241. sumi1 += vaddvq_s32(p1) * (1 + 2*(x[i].scales[ib32+0] & 0xf));
  8242. sumi2 += vaddvq_s32(p2) * (1 + 2*(x[i].scales[ib32+0] >> 4));
  8243. sumi1 += vaddvq_s32(p3) * (1 + 2*(x[i].scales[ib32+1] & 0xf));
  8244. sumi2 += vaddvq_s32(p4) * (1 + 2*(x[i].scales[ib32+1] >> 4));
  8245. }
  8246. sumf += d*(sumi1 + sumi2);
  8247. }
  8248. *s = 0.125f * sumf;
  8249. #elif defined(__AVX2__)
  8250. static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
  8251. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
  8252. };
  8253. static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  8254. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  8255. };
  8256. const __m128i m4 = _mm_set1_epi8(0xf);
  8257. const __m128i m1 = _mm_set1_epi8(1);
  8258. const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1);
  8259. const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2);
  8260. uint64_t aux64;
  8261. __m256 accumf = _mm256_setzero_ps();
  8262. for (int i = 0; i < nb; ++i) {
  8263. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8264. const uint8_t * restrict qs = x[i].qs;
  8265. const uint8_t * restrict qh = x[i].qh;
  8266. const uint16_t * restrict signs = (const uint16_t *)(x[i].qs + QK_K/8);
  8267. const int8_t * restrict q8 = y[i].qs;
  8268. memcpy(&aux64, x[i].scales, 8);
  8269. const __m128i scales8 = _mm_add_epi8(_mm_slli_epi16(_mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), m4), 1), m1);
  8270. const __m256i scales16 = _mm256_cvtepi8_epi16(scales8); // 0 2 4 6 8 10 12 14 1 3 5 7 9 11 13 15
  8271. __m256i sumi1 = _mm256_setzero_si256();
  8272. __m256i sumi2 = _mm256_setzero_si256();
  8273. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  8274. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8275. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8276. const __m256i q2_1 = _mm256_set_epi64x(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)],
  8277. iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)],
  8278. iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)],
  8279. iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]);
  8280. const __m256i q2_2 = _mm256_set_epi64x(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)],
  8281. iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)],
  8282. iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)],
  8283. iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]);
  8284. qs += 8;
  8285. __m256i aux256 = _mm256_set1_epi32(signs[0] | ((uint32_t) signs[1] << 16));
  8286. aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2);
  8287. const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2);
  8288. const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1);
  8289. aux256 = _mm256_set1_epi32(signs[2] | ((uint32_t) signs[3] << 16));
  8290. aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2);
  8291. const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2);
  8292. const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2);
  8293. signs += 4;
  8294. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); // blocks 2*ib32+0, 2*ib32+1
  8295. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); // blocks 2*ib32+2, 2*ib32+3
  8296. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+0)));
  8297. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+1)));
  8298. sumi1 = _mm256_add_epi32(sumi1, p1);
  8299. sumi2 = _mm256_add_epi32(sumi2, p2);
  8300. }
  8301. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  8302. }
  8303. *s = 0.125f * hsum_float_8(accumf);
  8304. #elif defined(__POWER9_VECTOR__)
  8305. static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
  8306. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
  8307. };
  8308. static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,};
  8309. vector float vsumf0 = vec_splats(0.0f);
  8310. vector float vsumf1 = vec_splats(0.0f);
  8311. vector float vsumf2 = vec_splats(0.0f);
  8312. vector float vsumf3 = vec_splats(0.0f);
  8313. const vector unsigned char mask0 = vec_xl( 0, k_mask1);
  8314. const vector unsigned char mask1 = vec_xl(16, k_mask1);
  8315. const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2);
  8316. for (int i = 0; i < nb; ++i) {
  8317. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  8318. vector float vyd = vec_splats(y[i].d);
  8319. vector float vd = vec_mul(vxd, vyd);
  8320. vector signed int vsumi0 = vec_splats((int32_t)0);
  8321. vector signed int vsumi1 = vec_splats((int32_t)0);
  8322. vector signed int vsumi2 = vec_splats((int32_t)0);
  8323. vector signed int vsumi3 = vec_splats((int32_t)0);
  8324. vector signed int vsumi4 = vec_splats((int32_t)0);
  8325. vector signed int vsumi5 = vec_splats((int32_t)0);
  8326. vector signed int vsumi6 = vec_splats((int32_t)0);
  8327. vector signed int vsumi7 = vec_splats((int32_t)0);
  8328. const uint8_t * restrict q2 = x[i].qs;
  8329. const uint8_t * restrict qh = x[i].qh;
  8330. const uint16_t * restrict signs = (const uint16_t *)(x[i].qs + QK_K/8);
  8331. const uint8_t * restrict sc = x[i].scales;
  8332. const int8_t * restrict q8 = y[i].qs;
  8333. for (int j = 0; j < QK_K/32; j += 2) {
  8334. __builtin_prefetch(q2, 0, 1);
  8335. __builtin_prefetch(q8, 0, 1);
  8336. vector signed long long aux64x2_0 = {*(const int64_t *)(iq2s_grid + (q2[0] | ((qh[0] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[1] | ((qh[0] << 6) & 0x300)))};
  8337. vector signed long long aux64x2_1 = {*(const int64_t *)(iq2s_grid + (q2[2] | ((qh[0] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[3] | ((qh[0] << 2) & 0x300)))};
  8338. vector signed long long aux64x2_2 = {*(const int64_t *)(iq2s_grid + (q2[4] | ((qh[1] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[5] | ((qh[1] << 6) & 0x300)))};
  8339. vector signed long long aux64x2_3 = {*(const int64_t *)(iq2s_grid + (q2[6] | ((qh[1] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[7] | ((qh[1] << 2) & 0x300)))};
  8340. q2 += 8;
  8341. qh += 2;
  8342. vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]);
  8343. vector signed char vsigns23 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]);
  8344. signs += 4;
  8345. vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0);
  8346. vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1);
  8347. vector signed char vsigns2 = vec_perm(vsigns23, vsigns23, mask0);
  8348. vector signed char vsigns3 = vec_perm(vsigns23, vsigns23, mask1);
  8349. vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2);
  8350. vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2);
  8351. vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2);
  8352. vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2);
  8353. vector signed char q2x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux64x2_0), vsigns0);
  8354. vector signed char q2x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux64x2_1), vsigns1);
  8355. vector signed char q2x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux64x2_2), vsigns2);
  8356. vector signed char q2x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux64x2_3), vsigns3);
  8357. vector signed char q8y0 = vec_xl( 0, q8);
  8358. vector signed char q8y1 = vec_xl(16, q8);
  8359. vector signed char q8y2 = vec_xl(32, q8);
  8360. vector signed char q8y3 = vec_xl(48, q8);
  8361. q8 += 64;
  8362. vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0));
  8363. vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1));
  8364. vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2));
  8365. vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3));
  8366. const uint16_t ls0 = (uint16_t)(sc[0] & 0xf);
  8367. const uint16_t ls1 = (uint16_t)(sc[0] >> 4);
  8368. const uint16_t ls2 = (uint16_t)(sc[1] & 0xf);
  8369. const uint16_t ls3 = (uint16_t)(sc[1] >> 4);
  8370. sc += 2;
  8371. vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1));
  8372. vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1));
  8373. vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1));
  8374. vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1));
  8375. vsumi0 = vec_add(vec_mule(qv0, vscales0), vsumi0);
  8376. vsumi1 = vec_add(vec_mule(qv1, vscales1), vsumi1);
  8377. vsumi2 = vec_add(vec_mule(qv2, vscales2), vsumi2);
  8378. vsumi3 = vec_add(vec_mule(qv3, vscales3), vsumi3);
  8379. vsumi4 = vec_add(vec_mulo(qv0, vscales0), vsumi4);
  8380. vsumi5 = vec_add(vec_mulo(qv1, vscales1), vsumi5);
  8381. vsumi6 = vec_add(vec_mulo(qv2, vscales2), vsumi6);
  8382. vsumi7 = vec_add(vec_mulo(qv3, vscales3), vsumi7);
  8383. }
  8384. vsumi0 = vec_add(vsumi0, vsumi4);
  8385. vsumi1 = vec_add(vsumi1, vsumi5);
  8386. vsumi2 = vec_add(vsumi2, vsumi6);
  8387. vsumi3 = vec_add(vsumi3, vsumi7);
  8388. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  8389. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  8390. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  8391. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  8392. }
  8393. vsumf0 = vec_add(vsumf0, vsumf2);
  8394. vsumf1 = vec_add(vsumf1, vsumf3);
  8395. vsumf0 = vec_add(vsumf0, vsumf1);
  8396. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  8397. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  8398. *s = 0.125f * vec_extract(vsumf0, 0);
  8399. #else
  8400. float sumf = 0;
  8401. for (int i = 0; i < nb; i++) {
  8402. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8403. const int8_t * q8 = y[i].qs;
  8404. const uint8_t * qs = x[i].qs;
  8405. const uint8_t * qh = x[i].qh;
  8406. const uint8_t * signs = qs + QK_K/8;
  8407. int bsum = 0;
  8408. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  8409. int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf);
  8410. int ls2 = 1 + 2*(x[i].scales[ib32] >> 4);
  8411. int sumi1 = 0, sumi2 = 0;
  8412. for (int l = 0; l < 2; ++l) {
  8413. const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
  8414. for (int j = 0; j < 8; ++j) {
  8415. sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
  8416. }
  8417. q8 += 8;
  8418. }
  8419. for (int l = 2; l < 4; ++l) {
  8420. const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
  8421. for (int j = 0; j < 8; ++j) {
  8422. sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
  8423. }
  8424. q8 += 8;
  8425. }
  8426. bsum += ls1 * sumi1 + ls2 * sumi2;
  8427. qs += 4;
  8428. signs += 4;
  8429. }
  8430. sumf += d * bsum;
  8431. }
  8432. *s = 0.125f * sumf;
  8433. #endif
  8434. }
  8435. void ggml_vec_dot_iq3_xxs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  8436. assert(n % QK_K == 0);
  8437. assert(nrc == 1);
  8438. UNUSED(nrc);
  8439. UNUSED(bx);
  8440. UNUSED(by);
  8441. UNUSED(bs);
  8442. const block_iq3_xxs * restrict x = vx;
  8443. const block_q8_K * restrict y = vy;
  8444. const int nb = n / QK_K;
  8445. #if defined(__ARM_NEON)
  8446. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  8447. uint32_t aux32[2];
  8448. ggml_int8x16x4_t q3s;
  8449. ggml_int8x16x4_t q8b;
  8450. float sumf = 0;
  8451. for (int i = 0; i < nb; ++i) {
  8452. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8453. const uint8_t * restrict q3 = x[i].qs;
  8454. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  8455. const int8_t * restrict q8 = y[i].qs;
  8456. float sumf1 = 0, sumf2 = 0;
  8457. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  8458. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  8459. memcpy(aux32, gas, 2*sizeof(uint32_t)); gas += 2*sizeof(uint32_t);
  8460. const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]);
  8461. const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]);
  8462. const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]);
  8463. const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]);
  8464. q3 += 16;
  8465. q3s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 7) & 127))));
  8466. q3s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 21) & 127))));
  8467. q3s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127))));
  8468. q3s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127))));
  8469. q3s.val[0] = vmulq_s8(q3s.val[0], vreinterpretq_s8_u32(aux32x4_0));
  8470. q3s.val[1] = vmulq_s8(q3s.val[1], vreinterpretq_s8_u32(aux32x4_1));
  8471. q3s.val[2] = vmulq_s8(q3s.val[2], vreinterpretq_s8_u32(aux32x4_2));
  8472. q3s.val[3] = vmulq_s8(q3s.val[3], vreinterpretq_s8_u32(aux32x4_3));
  8473. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]);
  8474. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]);
  8475. sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[0] >> 28));
  8476. sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[1] >> 28));
  8477. }
  8478. sumf += d*(sumf1 + sumf2);
  8479. }
  8480. *s = 0.5f * sumf;
  8481. #elif defined(__AVX2__)
  8482. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  8483. uint32_t aux32[2];
  8484. __m256 accumf = _mm256_setzero_ps();
  8485. for (int i = 0; i < nb; ++i) {
  8486. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8487. const uint8_t * restrict q3 = x[i].qs;
  8488. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  8489. const int8_t * restrict q8 = y[i].qs;
  8490. __m256i sumi1 = _mm256_setzero_si256();
  8491. __m256i sumi2 = _mm256_setzero_si256();
  8492. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  8493. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8494. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8495. const __m256i q2_1 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]],
  8496. iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]);
  8497. q3 += 8;
  8498. const __m256i q2_2 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]],
  8499. iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]);
  8500. q3 += 8;
  8501. memcpy(aux32, gas, 8); gas += 8;
  8502. const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127],
  8503. signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]);
  8504. const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127],
  8505. signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]);
  8506. const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1);
  8507. const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2);
  8508. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  8509. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  8510. const uint16_t ls1 = aux32[0] >> 28;
  8511. const uint16_t ls2 = aux32[1] >> 28;
  8512. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
  8513. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
  8514. sumi1 = _mm256_add_epi32(sumi1, p1);
  8515. sumi2 = _mm256_add_epi32(sumi2, p2);
  8516. }
  8517. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  8518. }
  8519. *s = 0.25f * hsum_float_8(accumf);
  8520. #elif defined(__POWER9_VECTOR__)
  8521. const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
  8522. vector float vsumf0 = vec_splats(0.0f);
  8523. vector float vsumf1 = vec_splats(0.0f);
  8524. vector float vsumf2 = vec_splats(0.0f);
  8525. vector float vsumf3 = vec_splats(0.0f);
  8526. for (int i = 0; i < nb; ++i) {
  8527. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  8528. vector float vyd = vec_splats(y[i].d);
  8529. vector float vd = vec_mul(vxd, vyd);
  8530. vector signed int vsumi0 = vec_splats((int32_t)0);
  8531. vector signed int vsumi1 = vec_splats((int32_t)0);
  8532. vector signed int vsumi2 = vec_splats((int32_t)0);
  8533. vector signed int vsumi3 = vec_splats((int32_t)0);
  8534. vector signed int vsumi4 = vec_splats((int32_t)0);
  8535. vector signed int vsumi5 = vec_splats((int32_t)0);
  8536. vector signed int vsumi6 = vec_splats((int32_t)0);
  8537. vector signed int vsumi7 = vec_splats((int32_t)0);
  8538. const uint8_t * restrict q3 = x[i].qs;
  8539. const uint32_t * restrict signs = (const uint32_t *)(x[i].qs + QK_K/4);
  8540. const int8_t * restrict q8 = y[i].qs;
  8541. #pragma GCC unroll 1
  8542. for (int j = 0; j < QK_K/32; j += 2) {
  8543. __builtin_prefetch(q3, 0, 1);
  8544. __builtin_prefetch(q8, 0, 1);
  8545. vector unsigned int aux32x4_0 = {iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]};
  8546. vector unsigned int aux32x4_1 = {iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]};
  8547. vector unsigned int aux32x4_2 = {iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]};
  8548. vector unsigned int aux32x4_3 = {iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]};
  8549. q3 += 16;
  8550. vector unsigned long long aux64x2_0 = {(uint64_t)(signs64[(signs[0] >> 0) & 127]), (uint64_t)(signs64[(signs[0] >> 7) & 127])};
  8551. vector unsigned long long aux64x2_1 = {(uint64_t)(signs64[(signs[0] >> 14) & 127]), (uint64_t)(signs64[(signs[0] >> 21) & 127])};
  8552. vector unsigned long long aux64x2_2 = {(uint64_t)(signs64[(signs[1] >> 0) & 127]), (uint64_t)(signs64[(signs[1] >> 7) & 127])};
  8553. vector unsigned long long aux64x2_3 = {(uint64_t)(signs64[(signs[1] >> 14) & 127]), (uint64_t)(signs64[(signs[1] >> 21) & 127])};
  8554. vector signed char q3x0 = vec_mul((vector signed char)aux64x2_0, (vector signed char)aux32x4_0);
  8555. vector signed char q3x1 = vec_mul((vector signed char)aux64x2_1, (vector signed char)aux32x4_1);
  8556. vector signed char q3x2 = vec_mul((vector signed char)aux64x2_2, (vector signed char)aux32x4_2);
  8557. vector signed char q3x3 = vec_mul((vector signed char)aux64x2_3, (vector signed char)aux32x4_3);
  8558. vector signed char q8y0 = vec_xl( 0, q8);
  8559. vector signed char q8y1 = vec_xl(16, q8);
  8560. vector signed char q8y2 = vec_xl(32, q8);
  8561. vector signed char q8y3 = vec_xl(48, q8);
  8562. q8 += 64;
  8563. vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0));
  8564. vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1));
  8565. vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2));
  8566. vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3));
  8567. const uint16_t ls0 = (uint16_t)(signs[0] >> 28);
  8568. const uint16_t ls1 = (uint16_t)(signs[1] >> 28);
  8569. signs += 2;
  8570. vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1));
  8571. vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1));
  8572. vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0);
  8573. vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1);
  8574. vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2);
  8575. vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3);
  8576. vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4);
  8577. vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5);
  8578. vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6);
  8579. vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7);
  8580. }
  8581. vsumi0 = vec_add(vsumi0, vsumi4);
  8582. vsumi1 = vec_add(vsumi1, vsumi5);
  8583. vsumi2 = vec_add(vsumi2, vsumi6);
  8584. vsumi3 = vec_add(vsumi3, vsumi7);
  8585. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  8586. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  8587. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  8588. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  8589. }
  8590. vsumf0 = vec_add(vsumf0, vsumf2);
  8591. vsumf1 = vec_add(vsumf1, vsumf3);
  8592. vsumf0 = vec_add(vsumf0, vsumf1);
  8593. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  8594. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  8595. *s = 0.25f * vec_extract(vsumf0, 0);
  8596. #else
  8597. uint32_t aux32;
  8598. float sumf = 0.f;
  8599. for (int i = 0; i < nb; ++i) {
  8600. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8601. const uint8_t * restrict q3 = x[i].qs;
  8602. const uint8_t * restrict gas = x[i].qs + QK_K/4;
  8603. const int8_t * restrict q8 = y[i].qs;
  8604. int32_t bsum = 0;
  8605. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  8606. memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t);
  8607. const uint32_t ls = 2*(aux32 >> 28) + 1;
  8608. int32_t sumi = 0;
  8609. for (int l = 0; l < 4; ++l) {
  8610. const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]);
  8611. const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]);
  8612. const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
  8613. for (int j = 0; j < 4; ++j) {
  8614. sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1);
  8615. sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1);
  8616. }
  8617. q8 += 8;
  8618. }
  8619. q3 += 8;
  8620. bsum += sumi * ls;
  8621. }
  8622. sumf += d * bsum;
  8623. }
  8624. *s = 0.25f * sumf;
  8625. #endif
  8626. }
  8627. void ggml_vec_dot_iq3_s_q8_K (int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  8628. assert(n % QK_K == 0);
  8629. assert(nrc == 1);
  8630. UNUSED(nrc);
  8631. UNUSED(bx);
  8632. UNUSED(by);
  8633. UNUSED(bs);
  8634. const block_iq3_s * restrict x = vx;
  8635. const block_q8_K * restrict y = vy;
  8636. const int nb = n / QK_K;
  8637. #if defined(__ARM_NEON)
  8638. typedef union {
  8639. uint16x8_t vec_index;
  8640. uint16_t index[8];
  8641. } vec_index_t;
  8642. static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
  8643. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
  8644. };
  8645. static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,};
  8646. static const int16_t k_shift[8] = {8, 7, 6, 5, 4, 3, 2, 1};
  8647. const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1);
  8648. const uint8x16_t mask2 = vld1q_u8(k_mask2);
  8649. const int16x8_t hshift = vld1q_s16(k_shift);
  8650. const uint16x8_t m256 = vdupq_n_u16(256);
  8651. const uint8x16_t m1 = vdupq_n_u8(1);
  8652. uint8x16x2_t vs;
  8653. ggml_int8x16x4_t q3s;
  8654. ggml_int8x16x4_t q8b;
  8655. vec_index_t idx;
  8656. #if QK_K == 256
  8657. uint32_t scales32[2];
  8658. const uint8_t * scales8 = (const uint8_t *)scales32;
  8659. #endif
  8660. float sumf = 0;
  8661. for (int i = 0; i < nb; ++i) {
  8662. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8663. const uint8_t * restrict qs = x[i].qs;
  8664. const uint8_t * restrict qh = x[i].qh;
  8665. const uint16_t * restrict signs = (const uint16_t *)x[i].signs;
  8666. const int8_t * restrict q8 = y[i].qs;
  8667. #if QK_K == 256
  8668. memcpy(scales32, x[i].scales, 4);
  8669. scales32[1] = (((scales32[0] >> 4) & 0x0f0f0f0f) << 1) | 0x01010101;
  8670. scales32[0] = ((scales32[0] & 0x0f0f0f0f) << 1) | 0x01010101;
  8671. #endif
  8672. int sumi1 = 0, sumi2 = 0;
  8673. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  8674. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  8675. const uint8x16_t idx_l = vld1q_u8(qs); qs += 16;
  8676. idx.vec_index = vorrq_u16(vmovl_u8(vget_low_u8 (idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+0]), hshift), m256));
  8677. const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]],
  8678. iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]);
  8679. const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]],
  8680. iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]);
  8681. idx.vec_index = vorrq_u16(vmovl_u8(vget_high_u8(idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+1]), hshift), m256));
  8682. const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]],
  8683. iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]);
  8684. const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]],
  8685. iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]);
  8686. vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16)));
  8687. vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
  8688. vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
  8689. vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1);
  8690. vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1);
  8691. q3s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_0));
  8692. q3s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_1));
  8693. vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16)));
  8694. vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
  8695. vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
  8696. vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1);
  8697. vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1);
  8698. signs += 4;
  8699. q3s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_2));
  8700. q3s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_3));
  8701. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]);
  8702. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]);
  8703. #if QK_K == 256
  8704. sumi1 += vaddvq_s32(p1) * scales8[ib32/2+0];
  8705. sumi2 += vaddvq_s32(p2) * scales8[ib32/2+4];
  8706. #else
  8707. sumi1 += vaddvq_s32(p1) * (1 + 2*(x[i].scales[ib32/2] & 0xf));
  8708. sumi2 += vaddvq_s32(p2) * (1 + 2*(x[i].scales[ib32/2] >> 4));
  8709. #endif
  8710. }
  8711. sumf += d*(sumi1 + sumi2);
  8712. }
  8713. *s = sumf;
  8714. #elif defined(__AVX2__)
  8715. static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
  8716. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
  8717. };
  8718. static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  8719. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
  8720. };
  8721. const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1);
  8722. const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2);
  8723. const __m256i idx_shift = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8);
  8724. const __m256i idx_mask = _mm256_set1_epi32(256);
  8725. typedef union {
  8726. __m256i vec[2];
  8727. uint32_t index[16];
  8728. } index_t;
  8729. index_t idx;
  8730. __m256 accumf = _mm256_setzero_ps();
  8731. for (int i = 0; i < nb; ++i) {
  8732. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8733. const uint8_t * restrict qs = x[i].qs;
  8734. const uint8_t * restrict qh = x[i].qh;
  8735. const uint16_t * restrict signs = (const uint16_t *)x[i].signs;
  8736. const int8_t * restrict q8 = y[i].qs;
  8737. __m256i sumi1 = _mm256_setzero_si256();
  8738. __m256i sumi2 = _mm256_setzero_si256();
  8739. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  8740. const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8741. const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  8742. const __m256i idx_l = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i *)qs)); qs += 16;
  8743. idx.vec[0] = _mm256_set1_epi32(qh[ib32+0]);
  8744. idx.vec[1] = _mm256_set1_epi32(qh[ib32+1]);
  8745. idx.vec[0] = _mm256_and_si256(_mm256_sllv_epi32(idx.vec[0], idx_shift), idx_mask);
  8746. idx.vec[1] = _mm256_and_si256(_mm256_sllv_epi32(idx.vec[1], idx_shift), idx_mask);
  8747. idx.vec[0] = _mm256_or_si256(idx.vec[0], _mm256_cvtepi16_epi32(_mm256_castsi256_si128(idx_l)));
  8748. idx.vec[1] = _mm256_or_si256(idx.vec[1], _mm256_cvtepi16_epi32(_mm256_extractf128_si256(idx_l, 1)));
  8749. // At leat on my CPU (Ryzen 7950X), using _mm256_i32gather_epi32 is slower than _mm256_set_epi32. Strange.
  8750. //const __m256i q2_1 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[0], 4);
  8751. //const __m256i q2_2 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[1], 4);
  8752. const __m256i q2_1 = _mm256_set_epi32(
  8753. iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]],
  8754. iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]]
  8755. );
  8756. const __m256i q2_2 = _mm256_set_epi32(
  8757. iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]],
  8758. iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[ 9]], iq3s_grid[idx.index[ 8]]
  8759. );
  8760. __m256i aux256 = _mm256_set1_epi32(signs[0] | (signs[1] << 16));
  8761. aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2);
  8762. const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2);
  8763. const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1);
  8764. aux256 = _mm256_set1_epi32(signs[2] | (signs[3] << 16));
  8765. aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2);
  8766. const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2);
  8767. const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2);
  8768. signs += 4;
  8769. const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1);
  8770. const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2);
  8771. const uint16_t ls1 = x[i].scales[ib32/2] & 0xf;
  8772. const uint16_t ls2 = x[i].scales[ib32/2] >> 4;
  8773. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1));
  8774. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1));
  8775. sumi1 = _mm256_add_epi32(sumi1, p1);
  8776. sumi2 = _mm256_add_epi32(sumi2, p2);
  8777. }
  8778. accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf);
  8779. }
  8780. *s = hsum_float_8(accumf);
  8781. #elif defined(__POWER9_VECTOR__)
  8782. static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
  8783. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
  8784. };
  8785. static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,};
  8786. vector float vsumf0 = vec_splats(0.0f);
  8787. vector float vsumf1 = vec_splats(0.0f);
  8788. vector float vsumf2 = vec_splats(0.0f);
  8789. vector float vsumf3 = vec_splats(0.0f);
  8790. const vector unsigned char mask0 = vec_xl( 0, k_mask1);
  8791. const vector unsigned char mask1 = vec_xl(16, k_mask1);
  8792. const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2);
  8793. for (int i = 0; i < nb; ++i) {
  8794. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  8795. vector float vyd = vec_splats(y[i].d);
  8796. vector float vd = vec_mul(vxd, vyd);
  8797. const uint8_t * restrict q3 = x[i].qs;
  8798. const uint8_t * restrict qh = x[i].qh;
  8799. const uint16_t * restrict signs = (const uint16_t *)(x[i].signs);
  8800. const uint8_t * restrict sc = x[i].scales;
  8801. const int8_t * restrict q8 = y[i].qs;
  8802. vector signed int vsumi0 = vec_splats((int32_t)0);
  8803. vector signed int vsumi1 = vec_splats((int32_t)0);
  8804. vector signed int vsumi2 = vec_splats((int32_t)0);
  8805. vector signed int vsumi3 = vec_splats((int32_t)0);
  8806. vector signed int vsumi4 = vec_splats((int32_t)0);
  8807. vector signed int vsumi5 = vec_splats((int32_t)0);
  8808. vector signed int vsumi6 = vec_splats((int32_t)0);
  8809. vector signed int vsumi7 = vec_splats((int32_t)0);
  8810. for (int j = 0; j < QK_K/32; j += 2) {
  8811. __builtin_prefetch(q3, 0, 1);
  8812. __builtin_prefetch(q8, 0, 1);
  8813. vector unsigned int aux32x4_0 = {iq3s_grid[q3[ 0] | ((qh[0] << 8) & 256)], iq3s_grid[q3[ 1] | ((qh[0] << 7) & 256)],
  8814. iq3s_grid[q3[ 2] | ((qh[0] << 6) & 256)], iq3s_grid[q3[ 3] | ((qh[0] << 5) & 256)]};
  8815. vector unsigned int aux32x4_1 = {iq3s_grid[q3[ 4] | ((qh[0] << 4) & 256)], iq3s_grid[q3[ 5] | ((qh[0] << 3) & 256)],
  8816. iq3s_grid[q3[ 6] | ((qh[0] << 2) & 256)], iq3s_grid[q3[ 7] | ((qh[0] << 1) & 256)]};
  8817. vector unsigned int aux32x4_2 = {iq3s_grid[q3[ 8] | ((qh[1] << 8) & 256)], iq3s_grid[q3[ 9] | ((qh[1] << 7) & 256)],
  8818. iq3s_grid[q3[10] | ((qh[1] << 6) & 256)], iq3s_grid[q3[11] | ((qh[1] << 5) & 256)]};
  8819. vector unsigned int aux32x4_3 = {iq3s_grid[q3[12] | ((qh[1] << 4) & 256)], iq3s_grid[q3[13] | ((qh[1] << 3) & 256)],
  8820. iq3s_grid[q3[14] | ((qh[1] << 2) & 256)], iq3s_grid[q3[15] | ((qh[1] << 1) & 256)]};
  8821. q3 += 16;
  8822. qh += 2;
  8823. vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]);
  8824. vector signed char vsigns02 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]);
  8825. signs += 4;
  8826. vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0);
  8827. vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1);
  8828. vector signed char vsigns2 = vec_perm(vsigns02, vsigns02, mask0);
  8829. vector signed char vsigns3 = vec_perm(vsigns02, vsigns02, mask1);
  8830. vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2);
  8831. vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2);
  8832. vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2);
  8833. vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2);
  8834. vector signed char q3x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux32x4_0), vsigns0);
  8835. vector signed char q3x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux32x4_1), vsigns1);
  8836. vector signed char q3x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux32x4_2), vsigns2);
  8837. vector signed char q3x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux32x4_3), vsigns3);
  8838. vector signed char q8y0 = vec_xl( 0, q8);
  8839. vector signed char q8y1 = vec_xl(16, q8);
  8840. vector signed char q8y2 = vec_xl(32, q8);
  8841. vector signed char q8y3 = vec_xl(48, q8);
  8842. q8 += 64;
  8843. vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0));
  8844. vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1));
  8845. vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2));
  8846. vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3));
  8847. const uint16_t ls0 = (uint16_t)(sc[0] & 0xf);
  8848. const uint16_t ls1 = (uint16_t)(sc[0] >> 4);
  8849. sc ++;
  8850. vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1));
  8851. vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1));
  8852. vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0);
  8853. vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1);
  8854. vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2);
  8855. vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3);
  8856. vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4);
  8857. vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5);
  8858. vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6);
  8859. vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7);
  8860. }
  8861. vsumi0 = vec_add(vsumi0, vsumi4);
  8862. vsumi1 = vec_add(vsumi1, vsumi5);
  8863. vsumi2 = vec_add(vsumi2, vsumi6);
  8864. vsumi3 = vec_add(vsumi3, vsumi7);
  8865. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  8866. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  8867. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  8868. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  8869. }
  8870. vsumf0 = vec_add(vsumf0, vsumf2);
  8871. vsumf1 = vec_add(vsumf1, vsumf3);
  8872. vsumf0 = vec_add(vsumf0, vsumf1);
  8873. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  8874. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  8875. *s = vec_extract(vsumf0, 0);
  8876. #else
  8877. float sumf = 0.f;
  8878. for (int i = 0; i < nb; ++i) {
  8879. const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
  8880. const uint8_t * restrict qs = x[i].qs;
  8881. const uint8_t * restrict qh = x[i].qh;
  8882. const uint8_t * restrict signs = x[i].signs;
  8883. const int8_t * restrict q8 = y[i].qs;
  8884. int32_t bsum = 0;
  8885. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  8886. const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1;
  8887. const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1;
  8888. int32_t sumi = 0;
  8889. for (int l = 0; l < 4; ++l) {
  8890. const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256)));
  8891. const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256)));
  8892. for (int j = 0; j < 4; ++j) {
  8893. sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
  8894. sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
  8895. }
  8896. q8 += 8;
  8897. }
  8898. qs += 8;
  8899. signs += 4;
  8900. bsum += sumi * ls1;
  8901. sumi = 0;
  8902. for (int l = 0; l < 4; ++l) {
  8903. const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256)));
  8904. const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256)));
  8905. for (int j = 0; j < 4; ++j) {
  8906. sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
  8907. sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
  8908. }
  8909. q8 += 8;
  8910. }
  8911. qs += 8;
  8912. signs += 4;
  8913. bsum += sumi * ls2;
  8914. }
  8915. sumf += d * bsum;
  8916. }
  8917. *s = sumf;
  8918. #endif
  8919. }
  8920. #ifdef __AVX2__
  8921. static inline __m256i mul_add_epi8(const __m256i x, const __m256i y) {
  8922. const __m256i ax = _mm256_sign_epi8(x, x);
  8923. const __m256i sy = _mm256_sign_epi8(y, x);
  8924. return _mm256_maddubs_epi16(ax, sy);
  8925. }
  8926. #endif
  8927. void ggml_vec_dot_iq1_s_q8_K (int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  8928. assert(n % QK_K == 0);
  8929. assert(nrc == 1);
  8930. UNUSED(nrc);
  8931. UNUSED(bx);
  8932. UNUSED(by);
  8933. UNUSED(bs);
  8934. const block_iq1_s * restrict x = vx;
  8935. const block_q8_K * restrict y = vy;
  8936. const int nb = n / QK_K;
  8937. #if defined __ARM_NEON
  8938. ggml_int8x16x4_t q1b;
  8939. ggml_int8x16x4_t q8b;
  8940. float sumf = 0;
  8941. for (int i = 0; i < nb; ++i) {
  8942. const int8_t * q8 = y[i].qs;
  8943. const uint8_t * qs = x[i].qs;
  8944. const uint16_t * qh = x[i].qh;
  8945. int sumi1 = 0, sumi2 = 0, sumi3 = 0;
  8946. for (int ib = 0; ib < QK_K/32; ib += 2) {
  8947. q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[ib+0] << 8) & 0x700)))),
  8948. vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[ib+0] << 5) & 0x700)))));
  8949. q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[ib+0] << 2) & 0x700)))),
  8950. vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[ib+0] >> 1) & 0x700)))));
  8951. q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[ib+1] << 8) & 0x700)))),
  8952. vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[ib+1] << 5) & 0x700)))));
  8953. q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[ib+1] << 2) & 0x700)))),
  8954. vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[ib+1] >> 1) & 0x700)))));
  8955. qs += 8;
  8956. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  8957. const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[0], q8b.val[0]), q1b.val[1], q8b.val[1]);
  8958. const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[2], q8b.val[2]), q1b.val[3], q8b.val[3]);
  8959. const int ls1 = 2*((qh[ib+0] >> 12) & 7) + 1;
  8960. const int ls2 = 2*((qh[ib+1] >> 12) & 7) + 1;
  8961. sumi1 += vaddvq_s32(p1) * ls1;
  8962. sumi2 += vaddvq_s32(p2) * ls2;
  8963. sumi3 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * ls1 * (qh[ib+0] & 0x8000 ? -1 : 1)
  8964. + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * ls2 * (qh[ib+1] & 0x8000 ? -1 : 1);
  8965. }
  8966. sumf += y[i].d * GGML_FP16_TO_FP32(x[i].d) * (sumi1 + sumi2 + IQ1S_DELTA * sumi3);
  8967. }
  8968. *s = sumf;
  8969. #elif defined __AVX2__
  8970. __m256 accum = _mm256_setzero_ps();
  8971. float accum1 = 0;
  8972. for (int i = 0; i < nb; ++i) {
  8973. const int8_t * q8 = y[i].qs;
  8974. const uint8_t * qs = x[i].qs;
  8975. const uint16_t * qh = x[i].qh;
  8976. __m256i sumi = _mm256_setzero_si256();
  8977. int sumi1 = 0;
  8978. for (int ib = 0; ib < QK_K/32; ib += 2) {
  8979. const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)],
  8980. iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)]);
  8981. const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)],
  8982. iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)]);
  8983. qs += 8;
  8984. const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  8985. const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  8986. const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1);
  8987. const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2);
  8988. const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1;
  8989. const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1;
  8990. const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(ls1));
  8991. const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(ls2));
  8992. sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p1, p2));
  8993. sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1
  8994. + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2;
  8995. }
  8996. const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
  8997. accum = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi), accum);
  8998. accum1 += d * sumi1;
  8999. }
  9000. *s = hsum_float_8(accum) + IQ1S_DELTA * accum1;
  9001. #elif defined(__POWER9_VECTOR__)
  9002. const vector unsigned char v0 = vec_splats((unsigned char)0x0);
  9003. const vector unsigned short vsign = vec_splats((unsigned short)0x8000);
  9004. vector float vsumf0 = vec_splats(0.0f);
  9005. vector float vsumf1 = vec_splats(0.0f);
  9006. vector float vsumf2 = vec_splats(0.0f);
  9007. vector float vsumf3 = vec_splats(0.0f);
  9008. for (int i = 0; i < nb; ++i) {
  9009. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d));
  9010. vector float vyd = vec_splats(y[i].d);
  9011. vector float vd = vec_mul(vxd, vyd);
  9012. vector signed int vsumi0 = vec_splats((int32_t)0);
  9013. vector signed int vsumi1 = vec_splats((int32_t)0);
  9014. vector signed int vsumi2 = vec_splats((int32_t)0);
  9015. vector signed int vsumi3 = vec_splats((int32_t)0);
  9016. vector signed int vsumi4 = vec_splats((int32_t)0);
  9017. vector signed int vsumi5 = vec_splats((int32_t)0);
  9018. vector signed int vsumi6 = vec_splats((int32_t)0);
  9019. vector signed int vsumi7 = vec_splats((int32_t)0);
  9020. vector signed int vsumi8 = vec_splats((int32_t)0);
  9021. const uint8_t * restrict q1 = x[i].qs;
  9022. const uint16_t * restrict qh = x[i].qh;
  9023. const int8_t * restrict q8 = y[i].qs;
  9024. const int16_t * restrict qs = y[i].bsums;
  9025. for (int j = 0; j < QK_K/32; j += 2) {
  9026. __builtin_prefetch(q1, 0, 1);
  9027. __builtin_prefetch(qh, 0, 1);
  9028. __builtin_prefetch(q8, 0, 1);
  9029. vector signed long long aux64x2_0 = {*(const int64_t *)(iq1s_grid + (q1[0] | ((qh[0] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[1] | ((qh[0] << 5) & 0x700)))};
  9030. vector signed long long aux64x2_1 = {*(const int64_t *)(iq1s_grid + (q1[2] | ((qh[0] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[3] | ((qh[0] >> 1) & 0x700)))};
  9031. vector signed long long aux64x2_2 = {*(const int64_t *)(iq1s_grid + (q1[4] | ((qh[1] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[5] | ((qh[1] << 5) & 0x700)))};
  9032. vector signed long long aux64x2_3 = {*(const int64_t *)(iq1s_grid + (q1[6] | ((qh[1] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[7] | ((qh[1] >> 1) & 0x700)))};
  9033. q1 += 8;
  9034. vector signed char q1x0 = (vector signed char)aux64x2_0;
  9035. vector signed char q1x1 = (vector signed char)aux64x2_1;
  9036. vector signed char q1x2 = (vector signed char)aux64x2_2;
  9037. vector signed char q1x3 = (vector signed char)aux64x2_3;
  9038. vector signed char q8y0 = vec_xl( 0, q8);
  9039. vector signed char q8y1 = vec_xl(16, q8);
  9040. vector signed char q8y2 = vec_xl(32, q8);
  9041. vector signed char q8y3 = vec_xl(48, q8);
  9042. q8 += 64;
  9043. vector signed short qv0 = vec_add(vec_mule(q1x0, q8y0), vec_mulo(q1x0, q8y0));
  9044. vector signed short qv1 = vec_add(vec_mule(q1x1, q8y1), vec_mulo(q1x1, q8y1));
  9045. vector signed short qv2 = vec_add(vec_mule(q1x2, q8y2), vec_mulo(q1x2, q8y2));
  9046. vector signed short qv3 = vec_add(vec_mule(q1x3, q8y3), vec_mulo(q1x3, q8y3));
  9047. const uint16_t ls0 = (uint16_t)((qh[0] >> 12) & 7);
  9048. const uint16_t ls1 = (uint16_t)((qh[1] >> 12) & 7);
  9049. vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1));
  9050. vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1));
  9051. vector signed short vscales = vec_sld(vscales23, vscales01, 8);
  9052. vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0);
  9053. vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1);
  9054. vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2);
  9055. vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3);
  9056. vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4);
  9057. vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5);
  9058. vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6);
  9059. vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7);
  9060. vector signed short q8ysums = vec_xl_len(qs, 8);
  9061. qs += 4;
  9062. q8ysums = vec_mergeh(q8ysums, (vector signed short)v0);
  9063. vector signed short qxh = (vector signed short)vec_sld(vec_splats(qh[1]), vec_splats(qh[0]), 8);
  9064. qh += 2;
  9065. vector __bool short vsel = vec_cmpge(qxh, (vector signed short)v0);
  9066. vector signed short q8ysum = vec_sel((vector signed short)vec_xor((vector unsigned short)q8ysums, vsign), q8ysums, vsel);
  9067. vsumi8 = vec_add(vec_mule(q8ysum, vscales), vsumi8);
  9068. }
  9069. vsumi0 = vec_add(vsumi0, vsumi4);
  9070. vsumi1 = vec_add(vsumi1, vsumi5);
  9071. vsumi2 = vec_add(vsumi2, vsumi6);
  9072. vsumi3 = vec_add(vsumi3, vsumi7);
  9073. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  9074. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  9075. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  9076. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  9077. vsumf0 = vec_madd(vec_ctf(vsumi8, 0), vec_mul(vd, vec_splats(IQ1S_DELTA)), vsumf0);
  9078. }
  9079. vsumf0 = vec_add(vsumf0, vsumf2);
  9080. vsumf1 = vec_add(vsumf1, vsumf3);
  9081. vsumf0 = vec_add(vsumf0, vsumf1);
  9082. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  9083. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  9084. *s = vec_extract(vsumf0, 0);
  9085. #else
  9086. float sumf = 0;
  9087. for (int i = 0; i < nb; i++) {
  9088. const int8_t * q8 = y[i].qs;
  9089. const uint8_t * qs = x[i].qs;
  9090. const uint16_t * qh = x[i].qh;
  9091. int sumi = 0, sumi1 = 0;
  9092. for (int ib = 0; ib < QK_K/32; ++ib) {
  9093. const int ls = 2*((qh[ib] >> 12) & 7) + 1;
  9094. const int delta = qh[ib] & 0x8000 ? -1 : 1;
  9095. int lsum = 0;
  9096. for (int l = 0; l < 4; ++l) {
  9097. const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8)));
  9098. for (int j = 0; j < 8; ++j) {
  9099. lsum += q8[j] * grid[j];
  9100. }
  9101. q8 += 8;
  9102. }
  9103. sumi += ls * lsum;
  9104. sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]);
  9105. qs += 4;
  9106. }
  9107. sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1);
  9108. }
  9109. *s = sumf;
  9110. #endif
  9111. }
  9112. void ggml_vec_dot_iq1_m_q8_K (int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  9113. assert(n % QK_K == 0);
  9114. assert(nrc == 1);
  9115. UNUSED(nrc);
  9116. UNUSED(bx);
  9117. UNUSED(by);
  9118. UNUSED(bs);
  9119. const block_iq1_m * restrict x = vx;
  9120. const block_q8_K * restrict y = vy;
  9121. const int nb = n / QK_K;
  9122. #if QK_K != 64
  9123. iq1m_scale_t scale;
  9124. #endif
  9125. #if defined __ARM_NEON
  9126. #if QK_K == 64
  9127. const int32x4_t mask = vdupq_n_s32(0xf);
  9128. #else
  9129. const int32x4_t mask = vdupq_n_s32(0x7);
  9130. #endif
  9131. const int32x4_t mone = vdupq_n_s32(1);
  9132. const int32x4_t mzero = vdupq_n_s32(0);
  9133. ggml_int8x16x4_t deltas;
  9134. deltas.val[0] = vcombine_s8(vdup_n_s8(+1), vdup_n_s8(+1));
  9135. deltas.val[1] = vcombine_s8(vdup_n_s8(-1), vdup_n_s8(+1));
  9136. deltas.val[2] = vcombine_s8(vdup_n_s8(+1), vdup_n_s8(-1));
  9137. deltas.val[3] = vcombine_s8(vdup_n_s8(-1), vdup_n_s8(-1));
  9138. ggml_int8x16x4_t q1b;
  9139. ggml_int8x16x4_t q8b;
  9140. uint32_t aux32;
  9141. const uint8_t * aux8 = (const uint8_t *)&aux32;
  9142. float sumf = 0;
  9143. for (int i = 0; i < nb; ++i) {
  9144. const int8_t * q8 = y[i].qs;
  9145. const uint8_t * qs = x[i].qs;
  9146. const uint8_t * qh = x[i].qh;
  9147. const uint16_t * sc = (const uint16_t *)x[i].scales;
  9148. #if QK_K != 64
  9149. scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
  9150. #endif
  9151. int32x4_t sumi1 = mzero;
  9152. int32x4_t sumi2 = mzero;
  9153. for (int ib = 0; ib < QK_K/32; ib += 2) {
  9154. q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[0] << 8) & 0x700)))),
  9155. vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[0] << 4) & 0x700)))));
  9156. q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[1] << 8) & 0x700)))),
  9157. vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[1] << 4) & 0x700)))));
  9158. q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[2] << 8) & 0x700)))),
  9159. vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[2] << 4) & 0x700)))));
  9160. q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[3] << 8) & 0x700)))),
  9161. vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[3] << 4) & 0x700)))));
  9162. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  9163. const int32x4_t p1 = vpaddq_s32(ggml_vdotq_s32(mzero, q1b.val[0], q8b.val[0]), ggml_vdotq_s32(mzero, q1b.val[1], q8b.val[1]));
  9164. const int32x4_t p2 = vpaddq_s32(ggml_vdotq_s32(mzero, q1b.val[2], q8b.val[2]), ggml_vdotq_s32(mzero, q1b.val[3], q8b.val[3]));
  9165. const int32x4_t p12 = vpaddq_s32(p1, p2);
  9166. const uint32_t * qh32 = (const uint32_t *)qh; // we are 4-byte aligned, so we can do that
  9167. aux32 = ((qh32[0] >> 3) & 0x01010101) | ((qh32[0] >> 6) & 0x02020202);
  9168. const int32x4_t p3 = vpaddq_s32(ggml_vdotq_s32(mzero, deltas.val[aux8[0]], q8b.val[0]), ggml_vdotq_s32(mzero, deltas.val[aux8[1]], q8b.val[1]));
  9169. const int32x4_t p4 = vpaddq_s32(ggml_vdotq_s32(mzero, deltas.val[aux8[2]], q8b.val[2]), ggml_vdotq_s32(mzero, deltas.val[aux8[3]], q8b.val[3]));
  9170. const int32x4_t p34 = vpaddq_s32(p3, p4);
  9171. #if QK_K == 64
  9172. int32x4_t scales_4 = ggml_vld1q_u32(sc[0] >> 0, sc[0] >> 4, sc[0] >> 8, sc[0] >> 12);
  9173. #else
  9174. int32x4_t scales_4 = ggml_vld1q_u32(sc[ib/2] >> 0, sc[ib/2] >> 3, sc[ib/2] >> 6, sc[ib/2] >> 9);
  9175. #endif
  9176. scales_4 = vaddq_s32(vshlq_n_s32(vandq_s32(scales_4, mask), 1), mone);
  9177. sumi1 = vmlaq_s32(sumi1, scales_4, p12);
  9178. sumi2 = vmlaq_s32(sumi2, scales_4, p34);
  9179. qs += 8; qh += 4;
  9180. }
  9181. #if QK_K == 64
  9182. sumf += y[i].d * GGML_FP16_TO_FP32(x[i].d) * (vaddvq_s32(sumi1) + IQ1M_DELTA * vaddvq_s32(sumi2));
  9183. #else
  9184. sumf += y[i].d * GGML_FP16_TO_FP32(scale.f16) * (vaddvq_s32(sumi1) + IQ1M_DELTA * vaddvq_s32(sumi2));
  9185. #endif
  9186. }
  9187. *s = sumf;
  9188. #elif defined __AVX2__
  9189. #if QK_K == 64
  9190. const __m256i mask = _mm256_set1_epi16(0xf);
  9191. #else
  9192. const __m256i mask = _mm256_set1_epi16(0x7);
  9193. #endif
  9194. const __m256i mone = _mm256_set1_epi16(1);
  9195. __m256 accum1 = _mm256_setzero_ps();
  9196. __m256 accum2 = _mm256_setzero_ps();
  9197. for (int i = 0; i < nb; ++i) {
  9198. const int8_t * q8 = y[i].qs;
  9199. const uint8_t * qs = x[i].qs;
  9200. const uint8_t * qh = x[i].qh;
  9201. const uint16_t * sc = (const uint16_t *)x[i].scales;
  9202. #if QK_K != 64
  9203. scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
  9204. #endif
  9205. __m256i sumi1 = _mm256_setzero_si256();
  9206. __m256i sumi2 = _mm256_setzero_si256();
  9207. for (int ib = 0; ib < QK_K/32; ib += 2) {
  9208. const __m256i q1b_1 = _mm256_set_epi64x(
  9209. iq1s_grid[qs[3] | (((uint16_t)qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | (((uint16_t)qh[1] << 8) & 0x700)],
  9210. iq1s_grid[qs[1] | (((uint16_t)qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | (((uint16_t)qh[0] << 8) & 0x700)]
  9211. );
  9212. const __m256i q1b_2 = _mm256_set_epi64x(
  9213. iq1s_grid[qs[7] | (((uint16_t)qh[3] << 4) & 0x700)], iq1s_grid[qs[6] | (((uint16_t)qh[3] << 8) & 0x700)],
  9214. iq1s_grid[qs[5] | (((uint16_t)qh[2] << 4) & 0x700)], iq1s_grid[qs[4] | (((uint16_t)qh[2] << 8) & 0x700)]
  9215. );
  9216. const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  9217. const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
  9218. const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1);
  9219. const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2);
  9220. const __m256i delta1 = _mm256_set_epi64x(qh[1] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101,
  9221. qh[1] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101,
  9222. qh[0] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101,
  9223. qh[0] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101);
  9224. const __m256i delta2 = _mm256_set_epi64x(qh[3] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101,
  9225. qh[3] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101,
  9226. qh[2] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101,
  9227. qh[2] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101);
  9228. const __m256i dot3 = mul_add_epi8(delta1, q8b_1);
  9229. const __m256i dot4 = mul_add_epi8(delta2, q8b_2);
  9230. #if QK_K == 64
  9231. __m256i scale1 = MM256_SET_M128I(_mm_set1_epi16(sc[0] >> 4), _mm_set1_epi16(sc[0] >> 0));
  9232. __m256i scale2 = MM256_SET_M128I(_mm_set1_epi16(sc[0] >> 12), _mm_set1_epi16(sc[0] >> 8));
  9233. #else
  9234. __m256i scale1 = MM256_SET_M128I(_mm_set1_epi16(sc[ib/2] >> 3), _mm_set1_epi16(sc[ib/2] >> 0));
  9235. __m256i scale2 = MM256_SET_M128I(_mm_set1_epi16(sc[ib/2] >> 9), _mm_set1_epi16(sc[ib/2] >> 6));
  9236. #endif
  9237. scale1 = _mm256_add_epi16(_mm256_slli_epi16(_mm256_and_si256(scale1, mask), 1), mone);
  9238. scale2 = _mm256_add_epi16(_mm256_slli_epi16(_mm256_and_si256(scale2, mask), 1), mone);
  9239. const __m256i p1 = _mm256_madd_epi16(dot1, scale1);
  9240. const __m256i p2 = _mm256_madd_epi16(dot2, scale2);
  9241. const __m256i p3 = _mm256_madd_epi16(dot3, scale1);
  9242. const __m256i p4 = _mm256_madd_epi16(dot4, scale2);
  9243. sumi1 = _mm256_add_epi32(sumi1, _mm256_add_epi32(p1, p2));
  9244. sumi2 = _mm256_add_epi32(sumi2, _mm256_add_epi32(p3, p4));
  9245. qs += 8; qh += 4;
  9246. }
  9247. #if QK_K == 64
  9248. const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(x[i].d));
  9249. #else
  9250. const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(scale.f16));
  9251. #endif
  9252. accum1 = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi1), accum1);
  9253. accum2 = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi2), accum2);
  9254. }
  9255. *s = hsum_float_8(accum1) + IQ1M_DELTA * hsum_float_8(accum2);
  9256. #else
  9257. int sum1[2], sum2[2], delta[4];
  9258. float sumf = 0;
  9259. for (int i = 0; i < nb; i++) {
  9260. const int8_t * q8 = y[i].qs;
  9261. const uint8_t * qs = x[i].qs;
  9262. const uint8_t * qh = x[i].qh;
  9263. const uint16_t * sc = (const uint16_t *)x[i].scales;
  9264. #if QK_K != 64
  9265. scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
  9266. #endif
  9267. int sumi1 = 0, sumi2 = 0;
  9268. for (int ib = 0; ib < QK_K/32; ++ib) {
  9269. delta[0] = qh[0] & 0x08 ? -1 : 1;
  9270. delta[1] = qh[0] & 0x80 ? -1 : 1;
  9271. delta[2] = qh[1] & 0x08 ? -1 : 1;
  9272. delta[3] = qh[1] & 0x80 ? -1 : 1;
  9273. sum1[0] = sum1[1] = sum2[0] = sum2[1] = 0;
  9274. for (int l = 0; l < 4; ++l) {
  9275. const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((uint16_t)qh[l/2] << (8 - 4*(l%2))) & 0x700)));
  9276. int lsum1 = 0, lsum2 = 0;
  9277. for (int j = 0; j < 8; ++j) {
  9278. lsum1 += q8[j] * grid[j];
  9279. lsum2 += q8[j];
  9280. }
  9281. q8 += 8;
  9282. sum1[l/2] += lsum1;
  9283. sum2[l/2] += lsum2*delta[l];
  9284. }
  9285. #if QK_K == 64
  9286. const int ls1 = 2*((sc[0] >> (8*(ib%2)+0)) & 0xf) + 1;
  9287. const int ls2 = 2*((sc[0] >> (8*(ib%2)+4)) & 0xf) + 1;
  9288. #else
  9289. const int ls1 = 2*((sc[ib/2] >> (6*(ib%2)+0)) & 0x7) + 1;
  9290. const int ls2 = 2*((sc[ib/2] >> (6*(ib%2)+3)) & 0x7) + 1;
  9291. #endif
  9292. sumi1 += sum1[0] * ls1 + sum1[1] * ls2;
  9293. sumi2 += sum2[0] * ls1 + sum2[1] * ls2;
  9294. qs += 4;
  9295. qh += 2;
  9296. }
  9297. #if QK_K == 64
  9298. sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2);
  9299. #else
  9300. sumf += GGML_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2);
  9301. #endif
  9302. }
  9303. *s = sumf;
  9304. #endif
  9305. }
  9306. void ggml_vec_dot_iq4_nl_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  9307. assert(nrc == 1);
  9308. UNUSED(nrc);
  9309. UNUSED(bx);
  9310. UNUSED(by);
  9311. UNUSED(bs);
  9312. assert(n % QK4_NL == 0);
  9313. static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same");
  9314. const block_iq4_nl * restrict x = vx;
  9315. const block_q8_0 * restrict y = vy;
  9316. const int nb = n / QK4_NL;
  9317. #if defined __ARM_NEON
  9318. const int8x16_t values = vld1q_s8(kvalues_iq4nl);
  9319. const uint8x16_t m4b = vdupq_n_u8(0x0f);
  9320. uint8x16x2_t q4bits;
  9321. int8x16x4_t q4b;
  9322. int8x16x4_t q8b;
  9323. int32x4_t prod_1, prod_2;
  9324. float sumf = 0;
  9325. for (int ib = 0; ib < nb; ib += 2) {
  9326. q4bits.val[0] = vld1q_u8(x[ib+0].qs);
  9327. q4bits.val[1] = vld1q_u8(x[ib+1].qs);
  9328. q8b.val[0] = vld1q_s8(y[ib+0].qs);
  9329. q8b.val[1] = vld1q_s8(y[ib+0].qs + 16);
  9330. q8b.val[2] = vld1q_s8(y[ib+1].qs);
  9331. q8b.val[3] = vld1q_s8(y[ib+1].qs + 16);
  9332. q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b));
  9333. q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4));
  9334. q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b));
  9335. q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4));
  9336. prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]);
  9337. prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]);
  9338. sumf +=
  9339. GGML_FP16_TO_FP32(x[ib+0].d) * GGML_FP16_TO_FP32(y[ib+0].d) * vaddvq_s32(prod_1) +
  9340. GGML_FP16_TO_FP32(x[ib+1].d) * GGML_FP16_TO_FP32(y[ib+1].d) * vaddvq_s32(prod_2);
  9341. }
  9342. *s = sumf;
  9343. #elif defined __AVX2__
  9344. const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl);
  9345. const __m128i m4b = _mm_set1_epi8(0x0f);
  9346. const __m256i mone = _mm256_set1_epi16(1);
  9347. __m256 accum1 = _mm256_setzero_ps();
  9348. __m256 accum2 = _mm256_setzero_ps();
  9349. for (int ib = 0; ib < nb; ib += 2) {
  9350. const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)x[0].qs);
  9351. const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)x[1].qs);
  9352. const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)y[0].qs);
  9353. const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)y[1].qs);
  9354. const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)),
  9355. _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)));
  9356. const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)),
  9357. _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)));
  9358. const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1);
  9359. const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2);
  9360. const __m256i p_1 = _mm256_madd_epi16(p16_1, mone);
  9361. const __m256i p_2 = _mm256_madd_epi16(p16_2, mone);
  9362. accum1 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[0].d)*GGML_FP16_TO_FP32(x[0].d)),
  9363. _mm256_cvtepi32_ps(p_1), accum1);
  9364. accum2 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[1].d)*GGML_FP16_TO_FP32(x[1].d)),
  9365. _mm256_cvtepi32_ps(p_2), accum2);
  9366. y += 2;
  9367. x += 2;
  9368. }
  9369. *s = hsum_float_8(_mm256_add_ps(accum1, accum2));
  9370. #elif defined(__POWER9_VECTOR__)
  9371. const vector signed char lowMask = vec_splats((signed char)0xF);
  9372. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  9373. vector float vsumf0 = vec_splats(0.0f);
  9374. vector float vsumf1 = vec_splats(0.0f);
  9375. const vector signed char values = vec_xl( 0, kvalues_iq4nl);
  9376. #pragma GCC unroll 4
  9377. for (int ib = 0; ib < nb; ++ib) {
  9378. __builtin_prefetch(x[ib].qs, 0, 1);
  9379. __builtin_prefetch(y[ib].qs, 0, 1);
  9380. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d));
  9381. vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d));
  9382. vector float vd = vec_mul(vxd, vyd);
  9383. vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs);
  9384. vector signed char q4x0 = vec_and(qxs, lowMask);
  9385. vector signed char q4x1 = vec_sr(qxs, v4);
  9386. q4x0 = vec_perm(values, values, (vector unsigned char)q4x0);
  9387. q4x1 = vec_perm(values, values, (vector unsigned char)q4x1);
  9388. vector signed char q8y0 = vec_xl( 0, y[ib].qs);
  9389. vector signed char q8y1 = vec_xl(16, y[ib].qs);
  9390. vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0));
  9391. vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1));
  9392. vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0));
  9393. vector signed int vsumi1 = vec_add(vec_unpackh(qv1), vec_unpackl(qv1));
  9394. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  9395. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  9396. }
  9397. vsumf0 = vec_add(vsumf0, vsumf1);
  9398. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  9399. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  9400. *s = vec_extract(vsumf0, 0);
  9401. #else
  9402. float sumf = 0;
  9403. for (int ib = 0; ib < nb; ++ib) {
  9404. const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d);
  9405. int sumi1 = 0, sumi2 = 0;
  9406. for (int j = 0; j < QK4_NL/2; ++j) {
  9407. sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf];
  9408. sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4];
  9409. }
  9410. sumf += d * (sumi1 + sumi2);
  9411. }
  9412. *s = sumf;
  9413. #endif
  9414. }
  9415. void ggml_vec_dot_iq4_xs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
  9416. assert(nrc == 1);
  9417. UNUSED(nrc);
  9418. UNUSED(bx);
  9419. UNUSED(by);
  9420. UNUSED(bs);
  9421. assert(n % QK_K == 0);
  9422. #if QK_K == 64
  9423. ggml_vec_dot_iq4_nl_q8_0(n, s, bs, vx, bx, vy, by, nrc);
  9424. #else
  9425. const block_iq4_xs * restrict x = vx;
  9426. const block_q8_K * restrict y = vy;
  9427. const int nb = n / QK_K;
  9428. #if defined __ARM_NEON
  9429. const int8x16_t values = vld1q_s8(kvalues_iq4nl);
  9430. const uint8x16_t m4b = vdupq_n_u8(0x0f);
  9431. ggml_uint8x16x2_t q4bits;
  9432. ggml_int8x16x4_t q4b;
  9433. ggml_int8x16x4_t q8b;
  9434. int32x4_t prod_1, prod_2;
  9435. float sumf = 0;
  9436. for (int ibl = 0; ibl < nb; ++ibl) {
  9437. const int8_t * q8 = y[ibl].qs;
  9438. const uint8_t * q4 = x[ibl].qs;
  9439. uint16_t h = x[ibl].scales_h;
  9440. int sumi1 = 0, sumi2 = 0;
  9441. for (int ib = 0; ib < QK_K/64; ++ib) {
  9442. q4bits = ggml_vld1q_u8_x2(q4); q4 += 32;
  9443. q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
  9444. q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b));
  9445. q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4));
  9446. q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b));
  9447. q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4));
  9448. prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]);
  9449. prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]);
  9450. int ls1 = ((x[ibl].scales_l[ib] & 0xf) | ((h << 4) & 0x30)) - 32;
  9451. int ls2 = ((x[ibl].scales_l[ib] >> 4) | ((h << 2) & 0x30)) - 32;
  9452. h >>= 4;
  9453. sumi1 += vaddvq_s32(prod_1) * ls1;
  9454. sumi2 += vaddvq_s32(prod_2) * ls2;
  9455. }
  9456. sumf += GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2);
  9457. }
  9458. *s = sumf;
  9459. #elif defined __AVX2__
  9460. const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl);
  9461. const __m128i m4b = _mm_set1_epi8(0x0f);
  9462. __m256 accum = _mm256_setzero_ps();
  9463. for (int ibl = 0; ibl < nb; ++ibl) {
  9464. const uint8_t * qs = x[ibl].qs;
  9465. const int8_t * q8 = y[ibl].qs;
  9466. uint16_t sh = x[ibl].scales_h;
  9467. __m256i sumi1 = _mm256_setzero_si256();
  9468. __m256i sumi2 = _mm256_setzero_si256();
  9469. for (int ib = 0; ib < QK_K/32; ib += 2) {
  9470. const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)qs); qs += 16;
  9471. const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)qs); qs += 16;
  9472. const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  9473. const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32;
  9474. const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)),
  9475. _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)));
  9476. const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)),
  9477. _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)));
  9478. const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1);
  9479. const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2);
  9480. const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32;
  9481. const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32;
  9482. sh >>= 4;
  9483. const __m256i p_1 = _mm256_madd_epi16(p16_1, _mm256_set1_epi16(ls1));
  9484. const __m256i p_2 = _mm256_madd_epi16(p16_2, _mm256_set1_epi16(ls2));
  9485. sumi1 = _mm256_add_epi32(p_1, sumi1);
  9486. sumi2 = _mm256_add_epi32(p_2, sumi2);
  9487. }
  9488. accum = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d),
  9489. _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accum);
  9490. }
  9491. *s = hsum_float_8(accum);
  9492. #elif defined(__POWER9_VECTOR__)
  9493. const vector signed char lowMask = vec_splats((signed char)0xF);
  9494. const vector unsigned char v4 = vec_splats((unsigned char)0x4);
  9495. vector float vsumf0 = vec_splats(0.0f);
  9496. vector float vsumf1 = vec_splats(0.0f);
  9497. vector float vsumf2 = vec_splats(0.0f);
  9498. vector float vsumf3 = vec_splats(0.0f);
  9499. const vector signed char values = vec_xl( 0, kvalues_iq4nl);
  9500. for (int ibl = 0; ibl < nb; ++ibl) {
  9501. vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ibl].d));
  9502. vector float vyd = vec_splats(y[ibl].d);
  9503. vector float vd = vec_mul(vxd, vyd);
  9504. vector signed int vsumi0 = vec_splats((int32_t)0);
  9505. vector signed int vsumi1 = vec_splats((int32_t)0);
  9506. vector signed int vsumi2 = vec_splats((int32_t)0);
  9507. vector signed int vsumi3 = vec_splats((int32_t)0);
  9508. vector signed int vsumi4 = vec_splats((int32_t)0);
  9509. vector signed int vsumi5 = vec_splats((int32_t)0);
  9510. vector signed int vsumi6 = vec_splats((int32_t)0);
  9511. vector signed int vsumi7 = vec_splats((int32_t)0);
  9512. uint16_t h = x[ibl].scales_h;
  9513. const uint8_t * restrict q4 = x[ibl].qs;
  9514. const uint8_t * restrict sc = x[ibl].scales_l;
  9515. const int8_t * restrict q8 = y[ibl].qs;
  9516. for (int ib = 0; ib < QK_K/64; ib ++ ) {
  9517. __builtin_prefetch(q4, 0, 1);
  9518. __builtin_prefetch(q8, 0, 1);
  9519. vector signed char qxs0 = (vector signed char)vec_xl( 0, q4);
  9520. vector signed char qxs1 = (vector signed char)vec_xl(16, q4);
  9521. q4 += 32;
  9522. vector signed char q4x00 = (vector signed char)vec_and(qxs0, lowMask);
  9523. vector signed char q4x01 = (vector signed char)vec_sr(qxs0, v4);
  9524. vector signed char q4x10 = (vector signed char)vec_and(qxs1, lowMask);
  9525. vector signed char q4x11 = (vector signed char)vec_sr(qxs1, v4);
  9526. q4x00 = vec_perm(values, values, (vector unsigned char)q4x00);
  9527. q4x01 = vec_perm(values, values, (vector unsigned char)q4x01);
  9528. q4x10 = vec_perm(values, values, (vector unsigned char)q4x10);
  9529. q4x11 = vec_perm(values, values, (vector unsigned char)q4x11);
  9530. vector signed char q8y0 = vec_xl( 0, q8);
  9531. vector signed char q8y1 = vec_xl(16, q8);
  9532. vector signed char q8y2 = vec_xl(32, q8);
  9533. vector signed char q8y3 = vec_xl(48, q8);
  9534. q8 += 64;
  9535. vector signed short qv0 = vec_add(vec_mule(q4x00, q8y0), vec_mulo(q4x00, q8y0));
  9536. vector signed short qv1 = vec_add(vec_mule(q4x01, q8y1), vec_mulo(q4x01, q8y1));
  9537. vector signed short qv2 = vec_add(vec_mule(q4x10, q8y2), vec_mulo(q4x10, q8y2));
  9538. vector signed short qv3 = vec_add(vec_mule(q4x11, q8y3), vec_mulo(q4x11, q8y3));
  9539. const uint16_t ls0 = (uint16_t)(((sc[0] & 0xf) | ((h << 4) & 0x30)) - 32);
  9540. const uint16_t ls1 = (uint16_t)(((sc[0] >> 4) | ((h << 2) & 0x30)) - 32);
  9541. h >>= 4;
  9542. sc ++;
  9543. vector signed short vscales01 = vec_splats((int16_t)ls0);
  9544. vector signed short vscales23 = vec_splats((int16_t)ls1);
  9545. vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0);
  9546. vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1);
  9547. vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2);
  9548. vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3);
  9549. vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4);
  9550. vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5);
  9551. vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6);
  9552. vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7);
  9553. }
  9554. vsumi0 = vec_add(vsumi0, vsumi4);
  9555. vsumi1 = vec_add(vsumi1, vsumi5);
  9556. vsumi2 = vec_add(vsumi2, vsumi6);
  9557. vsumi3 = vec_add(vsumi3, vsumi7);
  9558. vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
  9559. vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
  9560. vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
  9561. vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
  9562. }
  9563. vsumf0 = vec_add(vsumf0, vsumf2);
  9564. vsumf1 = vec_add(vsumf1, vsumf3);
  9565. vsumf0 = vec_add(vsumf0, vsumf1);
  9566. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
  9567. vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
  9568. *s = vec_extract(vsumf0, 0);
  9569. #else
  9570. float sumf = 0;
  9571. for (int ibl = 0; ibl < nb; ++ibl) {
  9572. const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d;
  9573. uint16_t h = x[ibl].scales_h;
  9574. const uint8_t * qs = x[ibl].qs;
  9575. const int8_t * q8 = y[ibl].qs;
  9576. for (int ib = 0; ib < QK_K/32; ib += 2) {
  9577. const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30);
  9578. const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30);
  9579. h >>= 4;
  9580. const float d1 = d4d8*(ls1 - 32);
  9581. const float d2 = d4d8*(ls2 - 32);
  9582. int sumi1 = 0, sumi2 = 0;
  9583. for (int j = 0; j < 16; ++j) {
  9584. sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
  9585. sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
  9586. }
  9587. sumf += d1 * (sumi1 + sumi2);
  9588. qs += 16;
  9589. q8 += 32;
  9590. sumi1 = sumi2 = 0;
  9591. for (int j = 0; j < 16; ++j) {
  9592. sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
  9593. sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
  9594. }
  9595. sumf += d2 * (sumi1 + sumi2);
  9596. qs += 16;
  9597. q8 += 32;
  9598. }
  9599. }
  9600. *s = sumf;
  9601. #endif
  9602. #endif
  9603. }
  9604. // ================================ IQ2 quantization =============================================
  9605. typedef struct {
  9606. uint64_t * grid;
  9607. int * map;
  9608. uint16_t * neighbours;
  9609. } iq2_entry_t;
  9610. static iq2_entry_t iq2_data[4] = {
  9611. {NULL, NULL, NULL},
  9612. {NULL, NULL, NULL},
  9613. {NULL, NULL, NULL},
  9614. {NULL, NULL, NULL},
  9615. };
  9616. static inline int iq2_data_index(enum ggml_type type) {
  9617. GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M || type == GGML_TYPE_IQ2_S);
  9618. return type == GGML_TYPE_IQ2_XXS ? 0 :
  9619. type == GGML_TYPE_IQ2_XS ? 1 :
  9620. type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M ? 2 : 3;
  9621. }
  9622. static inline int iq2_grid_size(enum ggml_type type) {
  9623. GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M || type == GGML_TYPE_IQ2_S);
  9624. return type == GGML_TYPE_IQ2_XXS ? 256 :
  9625. type == GGML_TYPE_IQ2_XS ? 512 :
  9626. type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M ? NGRID_IQ1S : 1024;
  9627. }
  9628. static int iq2_compare_func(const void * left, const void * right) {
  9629. const int * l = (const int *)left;
  9630. const int * r = (const int *)right;
  9631. return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0;
  9632. }
  9633. void iq2xs_init_impl(enum ggml_type type) {
  9634. const int gindex = iq2_data_index(type);
  9635. const int grid_size = iq2_grid_size(type);
  9636. if (iq2_data[gindex].grid) {
  9637. return;
  9638. }
  9639. static const uint16_t kgrid_2bit_256[256] = {
  9640. 0, 2, 5, 8, 10, 17, 20, 32, 34, 40, 42, 65, 68, 80, 88, 97,
  9641. 100, 128, 130, 138, 162, 257, 260, 272, 277, 320, 388, 408, 512, 514, 546, 642,
  9642. 1025, 1028, 1040, 1057, 1060, 1088, 1090, 1096, 1120, 1153, 1156, 1168, 1188, 1280, 1282, 1288,
  9643. 1312, 1350, 1385, 1408, 1425, 1545, 1552, 1600, 1668, 1700, 2048, 2053, 2056, 2068, 2088, 2113,
  9644. 2116, 2128, 2130, 2184, 2308, 2368, 2562, 2580, 4097, 4100, 4112, 4129, 4160, 4192, 4228, 4240,
  9645. 4245, 4352, 4360, 4384, 4432, 4442, 4480, 4644, 4677, 5120, 5128, 5152, 5157, 5193, 5248, 5400,
  9646. 5474, 5632, 5654, 6145, 6148, 6160, 6208, 6273, 6400, 6405, 6560, 6737, 8192, 8194, 8202, 8260,
  9647. 8289, 8320, 8322, 8489, 8520, 8704, 8706, 9217, 9220, 9232, 9280, 9302, 9472, 9537, 9572, 9872,
  9648. 10248, 10272, 10388, 10820, 16385, 16388, 16400, 16408, 16417, 16420, 16448, 16456, 16470, 16480, 16513, 16516,
  9649. 16528, 16640, 16672, 16737, 16768, 16773, 16897, 16912, 16968, 16982, 17000, 17408, 17416, 17440, 17536, 17561,
  9650. 17682, 17700, 17920, 18433, 18436, 18448, 18496, 18501, 18688, 18776, 18785, 18818, 19013, 19088, 20480, 20488,
  9651. 20497, 20505, 20512, 20608, 20616, 20740, 20802, 20900, 21137, 21648, 21650, 21770, 22017, 22100, 22528, 22545,
  9652. 22553, 22628, 22848, 23048, 24580, 24592, 24640, 24680, 24832, 24917, 25112, 25184, 25600, 25605, 25872, 25874,
  9653. 25988, 26690, 32768, 32770, 32778, 32833, 32898, 33028, 33048, 33088, 33297, 33793, 33796, 33808, 33813, 33856,
  9654. 33888, 34048, 34118, 34196, 34313, 34368, 34400, 34818, 35076, 35345, 36868, 36880, 36900, 36928, 37025, 37142,
  9655. 37248, 37445, 37888, 37922, 37956, 38225, 39041, 39200, 40962, 41040, 41093, 41225, 41472, 42008, 43088, 43268,
  9656. };
  9657. static const uint16_t kgrid_2bit_512[512] = {
  9658. 0, 2, 5, 8, 10, 17, 20, 22, 25, 32, 34, 37, 40, 65, 68, 70,
  9659. 73, 80, 82, 85, 88, 97, 100, 128, 130, 133, 136, 145, 148, 153, 160, 257,
  9660. 260, 262, 265, 272, 274, 277, 280, 282, 289, 292, 320, 322, 325, 328, 337, 340,
  9661. 352, 360, 385, 388, 400, 512, 514, 517, 520, 529, 532, 544, 577, 580, 592, 597,
  9662. 640, 650, 1025, 1028, 1030, 1033, 1040, 1042, 1045, 1048, 1057, 1060, 1088, 1090, 1093, 1096,
  9663. 1105, 1108, 1110, 1120, 1153, 1156, 1168, 1280, 1282, 1285, 1288, 1297, 1300, 1312, 1345, 1348,
  9664. 1360, 1377, 1408, 1537, 1540, 1552, 1574, 1600, 1602, 1668, 2048, 2050, 2053, 2056, 2058, 2065,
  9665. 2068, 2080, 2085, 2113, 2116, 2128, 2136, 2176, 2208, 2218, 2305, 2308, 2320, 2368, 2433, 2441,
  9666. 2560, 2592, 2600, 2710, 2720, 4097, 4100, 4102, 4105, 4112, 4114, 4117, 4120, 4129, 4132, 4160,
  9667. 4162, 4165, 4168, 4177, 4180, 4192, 4202, 4225, 4228, 4240, 4352, 4354, 4357, 4360, 4369, 4372,
  9668. 4384, 4417, 4420, 4432, 4480, 4500, 4502, 4609, 4612, 4614, 4624, 4672, 4704, 5120, 5122, 5125,
  9669. 5128, 5137, 5140, 5152, 5185, 5188, 5193, 5200, 5220, 5248, 5377, 5380, 5392, 5440, 5632, 5652,
  9670. 5705, 6145, 6148, 6160, 6162, 6208, 6228, 6278, 6400, 6405, 6502, 6737, 6825, 8192, 8194, 8197,
  9671. 8200, 8202, 8209, 8212, 8224, 8257, 8260, 8272, 8320, 8352, 8449, 8452, 8464, 8512, 8520, 8549,
  9672. 8704, 8738, 8832, 8872, 9217, 9220, 9232, 9257, 9280, 9472, 9537, 9554, 9625, 9729, 9754, 9894,
  9673. 10240, 10248, 10250, 10272, 10325, 10376, 10402, 10600, 10640, 10760, 10784, 10882, 10888, 10890, 16385, 16388,
  9674. 16390, 16393, 16400, 16402, 16405, 16408, 16417, 16420, 16448, 16450, 16453, 16456, 16458, 16465, 16468, 16480,
  9675. 16485, 16513, 16516, 16528, 16640, 16642, 16645, 16648, 16657, 16660, 16672, 16705, 16708, 16720, 16768, 16773,
  9676. 16802, 16897, 16900, 16912, 16914, 16937, 16960, 17408, 17410, 17413, 17416, 17425, 17428, 17433, 17440, 17473,
  9677. 17476, 17488, 17536, 17556, 17665, 17668, 17680, 17700, 17728, 17818, 17920, 17930, 17988, 18000, 18433, 18436,
  9678. 18448, 18496, 18501, 18516, 18530, 18688, 18705, 18756, 18768, 18793, 18948, 20480, 20482, 20485, 20488, 20497,
  9679. 20500, 20512, 20520, 20545, 20548, 20560, 20608, 20737, 20740, 20752, 20757, 20800, 20802, 20992, 21060, 21162,
  9680. 21505, 21508, 21520, 21537, 21568, 21600, 21633, 21665, 21760, 21768, 21888, 21896, 22049, 22120, 22177, 22528,
  9681. 22548, 22593, 22608, 22681, 22810, 22848, 22850, 23173, 24577, 24580, 24592, 24640, 24660, 24674, 24710, 24745,
  9682. 24832, 25124, 25162, 25234, 25600, 25622, 25872, 25920, 25925, 26020, 26625, 26730, 26917, 27142, 27220, 27234,
  9683. 32768, 32770, 32773, 32776, 32785, 32788, 32800, 32810, 32833, 32836, 32848, 32896, 32898, 32936, 32938, 33025,
  9684. 33028, 33030, 33040, 33088, 33105, 33113, 33280, 33312, 33408, 33410, 33440, 33448, 33793, 33796, 33808, 33810,
  9685. 33813, 33856, 33888, 33929, 34048, 34116, 34213, 34328, 34410, 34816, 34824, 34853, 34906, 34944, 34946, 34984,
  9686. 35078, 35362, 35456, 35464, 35478, 35496, 36865, 36868, 36880, 36928, 36950, 36996, 37120, 37154, 37220, 37462,
  9687. 37513, 37888, 37893, 37956, 37968, 37976, 38185, 38288, 38290, 38465, 38993, 39078, 39241, 39445, 39520, 40960,
  9688. 40962, 40968, 40970, 40992, 41002, 41120, 41297, 41305, 41382, 41472, 41474, 41480, 41514, 41600, 41632, 42048,
  9689. 42133, 42597, 42648, 43018, 43040, 43042, 43048, 43168, 43176, 43268, 43396, 43398, 43560, 43562, 43665, 43690,
  9690. };
  9691. static const uint16_t kgrid_1bit_2048[NGRID_IQ1S] = {
  9692. 0, 2, 5, 8, 10, 17, 21, 32, 34, 40, 42, 69, 81, 84, 86, 101,
  9693. 128, 130, 136, 138, 149, 160, 162, 168, 170, 260, 261, 273, 276, 278, 281, 282,
  9694. 293, 321, 326, 329, 338, 341, 346, 353, 356, 358, 360, 389, 401, 404, 406, 421,
  9695. 512, 514, 520, 522, 533, 544, 546, 552, 554, 581, 593, 601, 612, 617, 640, 642,
  9696. 648, 650, 657, 661, 665, 672, 674, 680, 682, 1041, 1044, 1046, 1061, 1089, 1097, 1109,
  9697. 1114, 1124, 1125, 1169, 1177, 1189, 1281, 1284, 1285, 1286, 1301, 1304, 1306, 1321, 1344, 1349,
  9698. 1354, 1360, 1361, 1364, 1365, 1366, 1369, 1376, 1378, 1381, 1384, 1386, 1409, 1425, 1429, 1432,
  9699. 1434, 1441, 1444, 1445, 1446, 1449, 1556, 1561, 1601, 1604, 1616, 1618, 1621, 1624, 1632, 1633,
  9700. 1638, 1641, 1669, 1681, 1684, 1689, 2048, 2050, 2056, 2058, 2069, 2080, 2082, 2088, 2090, 2117,
  9701. 2129, 2134, 2149, 2176, 2178, 2184, 2186, 2197, 2208, 2210, 2216, 2218, 2309, 2321, 2324, 2329,
  9702. 2340, 2341, 2369, 2384, 2385, 2389, 2401, 2404, 2409, 2449, 2452, 2454, 2457, 2469, 2560, 2562,
  9703. 2568, 2570, 2581, 2592, 2594, 2600, 2602, 2629, 2641, 2649, 2657, 2661, 2688, 2690, 2693, 2696,
  9704. 2698, 2709, 2720, 2722, 2728, 2730, 4112, 4113, 4116, 4121, 4132, 4133, 4161, 4164, 4176, 4181,
  9705. 4184, 4193, 4196, 4197, 4201, 4241, 4244, 4246, 4257, 4261, 4353, 4356, 4358, 4361, 4368, 4370,
  9706. 4373, 4376, 4385, 4388, 4393, 4421, 4426, 4432, 4433, 4434, 4436, 4437, 4438, 4441, 4448, 4453,
  9707. 4484, 4498, 4501, 4513, 4516, 4625, 4628, 4630, 4645, 4672, 4678, 4681, 4690, 4693, 4696, 4698,
  9708. 4708, 4710, 4741, 4753, 4756, 4758, 4773, 5121, 5126, 5129, 5140, 5141, 5144, 5145, 5153, 5158,
  9709. 5185, 5189, 5190, 5192, 5194, 5201, 5204, 5205, 5206, 5209, 5218, 5221, 5224, 5252, 5257, 5264,
  9710. 5268, 5269, 5272, 5273, 5274, 5281, 5284, 5285, 5289, 5378, 5381, 5386, 5393, 5396, 5397, 5398,
  9711. 5401, 5408, 5410, 5413, 5416, 5418, 5441, 5444, 5445, 5446, 5457, 5458, 5460, 5461, 5462, 5465,
  9712. 5466, 5473, 5476, 5477, 5478, 5481, 5504, 5506, 5508, 5509, 5512, 5514, 5520, 5521, 5524, 5525,
  9713. 5526, 5529, 5530, 5536, 5538, 5541, 5633, 5636, 5637, 5638, 5653, 5654, 5656, 5658, 5665, 5670,
  9714. 5696, 5698, 5700, 5701, 5704, 5706, 5713, 5717, 5718, 5720, 5721, 5729, 5732, 5733, 5736, 5737,
  9715. 5738, 5766, 5770, 5778, 5781, 5796, 5801, 6161, 6166, 6181, 6209, 6212, 6214, 6217, 6224, 6229,
  9716. 6232, 6234, 6240, 6241, 6244, 6246, 6249, 6277, 6289, 6292, 6309, 6416, 6418, 6421, 6426, 6433,
  9717. 6437, 6466, 6468, 6469, 6472, 6481, 6484, 6485, 6486, 6489, 6490, 6496, 6501, 6506, 6537, 6545,
  9718. 6546, 6549, 6552, 6561, 6566, 6569, 6665, 6678, 6692, 6694, 6724, 6726, 6729, 6736, 6738, 6741,
  9719. 6744, 6753, 6758, 6761, 6789, 6801, 6806, 6810, 8192, 8194, 8200, 8202, 8213, 8224, 8226, 8229,
  9720. 8232, 8234, 8261, 8273, 8281, 8289, 8293, 8320, 8322, 8328, 8330, 8341, 8352, 8354, 8357, 8360,
  9721. 8362, 8453, 8465, 8468, 8473, 8485, 8514, 8516, 8521, 8533, 8536, 8538, 8545, 8548, 8549, 8550,
  9722. 8581, 8592, 8598, 8601, 8613, 8705, 8712, 8714, 8721, 8725, 8736, 8738, 8744, 8746, 8773, 8785,
  9723. 8790, 8793, 8805, 8833, 8840, 8842, 8849, 8853, 8864, 8866, 8872, 8874, 9221, 9236, 9238, 9241,
  9724. 9253, 9284, 9285, 9286, 9289, 9298, 9301, 9304, 9306, 9318, 9349, 9361, 9364, 9369, 9377, 9381,
  9725. 9481, 9493, 9505, 9513, 9536, 9541, 9544, 9553, 9556, 9557, 9561, 9570, 9573, 9576, 9609, 9616,
  9726. 9620, 9621, 9624, 9626, 9633, 9636, 9638, 9641, 9733, 9744, 9746, 9753, 9765, 9793, 9801, 9813,
  9727. 9824, 9825, 9833, 9860, 9862, 9872, 9882, 10240, 10242, 10248, 10250, 10261, 10272, 10274, 10280, 10282,
  9728. 10309, 10321, 10324, 10341, 10368, 10370, 10376, 10378, 10400, 10402, 10408, 10410, 10505, 10513, 10516, 10521,
  9729. 10533, 10566, 10569, 10578, 10581, 10593, 10596, 10598, 10601, 10629, 10640, 10646, 10649, 10660, 10661, 10752,
  9730. 10754, 10760, 10762, 10784, 10786, 10792, 10794, 10821, 10833, 10838, 10841, 10853, 10880, 10882, 10888, 10890,
  9731. 10901, 10912, 10914, 10920, 10922, 16389, 16401, 16406, 16421, 16457, 16466, 16469, 16472, 16474, 16481, 16484,
  9732. 16486, 16532, 16537, 16545, 16550, 16640, 16641, 16644, 16646, 16649, 16658, 16661, 16662, 16664, 16666, 16673,
  9733. 16678, 16681, 16709, 16712, 16714, 16721, 16724, 16725, 16726, 16729, 16730, 16741, 16744, 16746, 16769, 16772,
  9734. 16774, 16784, 16786, 16789, 16800, 16801, 16802, 16901, 16913, 16916, 16918, 16933, 16961, 16978, 16981, 16986,
  9735. 16996, 17001, 17033, 17044, 17061, 17409, 17429, 17433, 17449, 17477, 17480, 17482, 17489, 17492, 17493, 17494,
  9736. 17505, 17506, 17509, 17512, 17514, 17537, 17542, 17545, 17552, 17554, 17557, 17568, 17569, 17577, 17665, 17666,
  9737. 17669, 17674, 17681, 17684, 17685, 17686, 17689, 17696, 17701, 17706, 17729, 17732, 17733, 17734, 17737, 17744,
  9738. 17745, 17748, 17749, 17750, 17752, 17753, 17761, 17764, 17765, 17766, 17769, 17794, 17796, 17797, 17800, 17809,
  9739. 17812, 17813, 17814, 17817, 17818, 17829, 17832, 17834, 17921, 17925, 17929, 17940, 17941, 17944, 17946, 17953,
  9740. 17956, 17961, 17984, 17986, 17989, 17992, 18000, 18001, 18002, 18005, 18006, 18009, 18018, 18021, 18024, 18049,
  9741. 18053, 18058, 18068, 18069, 18081, 18084, 18086, 18437, 18449, 18453, 18458, 18469, 18498, 18505, 18512, 18517,
  9742. 18520, 18529, 18532, 18534, 18537, 18565, 18577, 18580, 18582, 18585, 18597, 18689, 18693, 18694, 18698, 18704,
  9743. 18708, 18709, 18712, 18721, 18724, 18726, 18752, 18757, 18762, 18769, 18770, 18772, 18773, 18774, 18777, 18784,
  9744. 18786, 18789, 18790, 18794, 18822, 18825, 18834, 18837, 18838, 18840, 18849, 18852, 18854, 18857, 18966, 19012,
  9745. 19014, 19017, 19029, 19032, 19034, 19044, 19049, 19092, 19109, 20481, 20484, 20485, 20486, 20489, 20498, 20501,
  9746. 20506, 20513, 20516, 20521, 20544, 20549, 20552, 20561, 20564, 20565, 20566, 20569, 20581, 20584, 20614, 20617,
  9747. 20629, 20632, 20640, 20641, 20646, 20649, 20741, 20744, 20745, 20746, 20753, 20756, 20757, 20758, 20760, 20761,
  9748. 20768, 20773, 20774, 20776, 20778, 20801, 20804, 20805, 20806, 20809, 20816, 20817, 20818, 20820, 20821, 20822,
  9749. 20824, 20825, 20826, 20833, 20836, 20837, 20838, 20841, 20866, 20869, 20881, 20884, 20885, 20886, 20889, 20896,
  9750. 20901, 20906, 20993, 20998, 21010, 21013, 21018, 21025, 21028, 21058, 21061, 21066, 21073, 21076, 21077, 21078,
  9751. 21081, 21090, 21093, 21125, 21136, 21138, 21141, 21145, 21146, 21156, 21508, 21509, 21521, 21524, 21525, 21526,
  9752. 21528, 21529, 21537, 21541, 21544, 21546, 21569, 21572, 21573, 21574, 21577, 21578, 21584, 21585, 21588, 21589,
  9753. 21590, 21592, 21593, 21594, 21601, 21602, 21604, 21605, 21606, 21609, 21632, 21640, 21642, 21649, 21652, 21653,
  9754. 21654, 21657, 21665, 21668, 21669, 21674, 21761, 21762, 21764, 21765, 21766, 21769, 21776, 21777, 21778, 21780,
  9755. 21781, 21782, 21785, 21786, 21793, 21796, 21797, 21798, 21801, 21824, 21825, 21826, 21828, 21829, 21830, 21832,
  9756. 21833, 21840, 21841, 21842, 21844, 21845, 21846, 21848, 21849, 21850, 21856, 21857, 21860, 21861, 21862, 21864,
  9757. 21865, 21866, 21889, 21892, 21893, 21897, 21898, 21904, 21905, 21908, 21909, 21910, 21912, 21913, 21921, 21924,
  9758. 21925, 21926, 21929, 22016, 22017, 22018, 22020, 22022, 22024, 22025, 22033, 22036, 22037, 22040, 22041, 22048,
  9759. 22049, 22050, 22052, 22053, 22054, 22056, 22057, 22081, 22085, 22086, 22088, 22089, 22090, 22096, 22097, 22098,
  9760. 22100, 22101, 22102, 22104, 22105, 22106, 22113, 22116, 22117, 22121, 22146, 22149, 22150, 22152, 22153, 22154,
  9761. 22161, 22165, 22170, 22178, 22181, 22182, 22184, 22185, 22532, 22533, 22534, 22537, 22544, 22549, 22552, 22561,
  9762. 22570, 22597, 22600, 22602, 22609, 22612, 22613, 22614, 22616, 22617, 22624, 22626, 22628, 22629, 22658, 22665,
  9763. 22672, 22674, 22677, 22680, 22689, 22697, 22785, 22786, 22789, 22794, 22801, 22804, 22805, 22806, 22809, 22821,
  9764. 22849, 22852, 22853, 22854, 22857, 22864, 22865, 22866, 22868, 22869, 22870, 22872, 22873, 22874, 22881, 22884,
  9765. 22885, 22886, 22889, 22913, 22917, 22921, 22929, 22932, 22933, 22934, 22936, 22937, 22949, 23044, 23048, 23061,
  9766. 23066, 23072, 23077, 23078, 23081, 23109, 23112, 23113, 23121, 23125, 23126, 23128, 23129, 23138, 23141, 23144,
  9767. 23146, 23169, 23178, 23186, 23189, 23190, 23192, 23194, 23201, 24581, 24596, 24598, 24601, 24613, 24644, 24656,
  9768. 24661, 24662, 24664, 24666, 24673, 24676, 24678, 24681, 24705, 24726, 24741, 24833, 24836, 24838, 24841, 24850,
  9769. 24853, 24865, 24866, 24870, 24873, 24901, 24905, 24913, 24917, 24918, 24921, 24933, 24934, 24938, 24964, 24970,
  9770. 24978, 24981, 24993, 24998, 25001, 25105, 25110, 25113, 25152, 25153, 25158, 25173, 25174, 25176, 25184, 25221,
  9771. 25233, 25238, 25253, 25617, 25618, 25621, 25622, 25626, 25633, 25638, 25641, 25664, 25666, 25669, 25672, 25674,
  9772. 25681, 25684, 25685, 25686, 25689, 25690, 25696, 25698, 25701, 25732, 25733, 25737, 25744, 25746, 25748, 25749,
  9773. 25750, 25752, 25754, 25761, 25764, 25769, 25861, 25864, 25866, 25873, 25877, 25878, 25881, 25924, 25925, 25926,
  9774. 25929, 25936, 25937, 25940, 25941, 25942, 25945, 25953, 25956, 25957, 25958, 25961, 25990, 25993, 25994, 26001,
  9775. 26005, 26006, 26009, 26010, 26018, 26021, 26022, 26024, 26114, 26121, 26133, 26144, 26150, 26152, 26153, 26176,
  9776. 26181, 26184, 26186, 26193, 26196, 26197, 26198, 26200, 26202, 26208, 26213, 26216, 26240, 26242, 26245, 26250,
  9777. 26260, 26262, 26264, 26265, 26272, 26276, 26278, 26282, 26646, 26649, 26661, 26689, 26706, 26709, 26714, 26721,
  9778. 26729, 26757, 26769, 26776, 26790, 26881, 26884, 26896, 26901, 26913, 26916, 26918, 26921, 26944, 26945, 26949,
  9779. 26950, 26952, 26961, 26964, 26965, 26966, 26969, 26976, 26981, 26986, 27010, 27012, 27018, 27029, 27041, 27044,
  9780. 27045, 27049, 27153, 27158, 27160, 27201, 27204, 27209, 27216, 27221, 27224, 27226, 27236, 27237, 27241, 27270,
  9781. 27284, 27288, 27290, 27302, 32768, 32770, 32776, 32778, 32800, 32802, 32808, 32810, 32837, 32848, 32849, 32852,
  9782. 32854, 32857, 32869, 32896, 32898, 32904, 32906, 32917, 32928, 32930, 32936, 32938, 33029, 33041, 33044, 33046,
  9783. 33049, 33061, 33089, 33092, 33097, 33104, 33106, 33109, 33110, 33112, 33113, 33124, 33126, 33129, 33157, 33161,
  9784. 33172, 33174, 33177, 33189, 33280, 33282, 33288, 33290, 33301, 33312, 33314, 33320, 33322, 33361, 33364, 33369,
  9785. 33381, 33408, 33410, 33416, 33418, 33429, 33440, 33442, 33448, 33450, 33812, 33817, 33857, 33860, 33873, 33877,
  9786. 33882, 33889, 33892, 33897, 33940, 33945, 34049, 34057, 34066, 34069, 34074, 34086, 34089, 34112, 34113, 34117,
  9787. 34120, 34129, 34132, 34133, 34134, 34137, 34138, 34149, 34150, 34152, 34154, 34177, 34180, 34182, 34185, 34192,
  9788. 34194, 34197, 34200, 34214, 34321, 34326, 34329, 34341, 34369, 34372, 34377, 34378, 34384, 34389, 34393, 34394,
  9789. 34401, 34406, 34410, 34437, 34449, 34458, 34468, 34816, 34818, 34824, 34826, 34837, 34848, 34850, 34856, 34858,
  9790. 34881, 34885, 34897, 34900, 34905, 34917, 34921, 34944, 34946, 34952, 34954, 34965, 34976, 34978, 34984, 34986,
  9791. 35077, 35078, 35089, 35092, 35094, 35109, 35137, 35140, 35142, 35145, 35152, 35154, 35157, 35162, 35169, 35172,
  9792. 35205, 35222, 35225, 35237, 35328, 35330, 35336, 35338, 35349, 35360, 35362, 35368, 35370, 35397, 35409, 35412,
  9793. 35414, 35456, 35458, 35464, 35466, 35477, 35488, 35490, 35496, 35498, 36869, 36881, 36886, 36888, 36889, 36901,
  9794. 36929, 36934, 36937, 36949, 36952, 36954, 36969, 36970, 36997, 37009, 37012, 37014, 37017, 37029, 37121, 37124,
  9795. 37126, 37129, 37136, 37141, 37144, 37146, 37153, 37156, 37158, 37161, 37184, 37189, 37200, 37201, 37204, 37205,
  9796. 37206, 37209, 37218, 37221, 37252, 37254, 37266, 37269, 37272, 37281, 37284, 37286, 37289, 37381, 37393, 37396,
  9797. 37401, 37413, 37444, 37446, 37449, 37456, 37458, 37461, 37464, 37478, 37481, 37509, 37524, 37526, 37545, 37889,
  9798. 37892, 37894, 37904, 37909, 37912, 37926, 37952, 37962, 37969, 37972, 37973, 37974, 37976, 37977, 37984, 37985,
  9799. 37986, 37989, 38020, 38022, 38034, 38036, 38037, 38040, 38049, 38057, 38144, 38149, 38152, 38154, 38160, 38161,
  9800. 38164, 38165, 38166, 38169, 38177, 38181, 38185, 38186, 38209, 38212, 38213, 38214, 38217, 38224, 38225, 38226,
  9801. 38228, 38229, 38230, 38232, 38233, 38234, 38241, 38244, 38245, 38246, 38249, 38273, 38277, 38280, 38289, 38290,
  9802. 38292, 38293, 38294, 38297, 38298, 38304, 38306, 38309, 38312, 38314, 38401, 38404, 38416, 38421, 38425, 38432,
  9803. 38438, 38441, 38469, 38472, 38473, 38481, 38482, 38485, 38486, 38489, 38501, 38504, 38530, 38532, 38537, 38538,
  9804. 38546, 38548, 38549, 38564, 38566, 38569, 38917, 38934, 38937, 38949, 38977, 38982, 38992, 38994, 38997, 38998,
  9805. 39002, 39012, 39013, 39045, 39057, 39062, 39065, 39077, 39172, 39174, 39177, 39184, 39186, 39189, 39192, 39194,
  9806. 39200, 39201, 39204, 39206, 39232, 39234, 39237, 39240, 39242, 39249, 39252, 39253, 39254, 39257, 39266, 39269,
  9807. 39270, 39274, 39297, 39300, 39312, 39314, 39317, 39322, 39329, 39334, 39429, 39445, 39461, 39492, 39494, 39497,
  9808. 39504, 39509, 39512, 39521, 39557, 39569, 39572, 39573, 39574, 40960, 40962, 40968, 40970, 40981, 40992, 40994,
  9809. 41000, 41002, 41029, 41041, 41044, 41046, 41049, 41088, 41090, 41096, 41098, 41109, 41120, 41122, 41128, 41130,
  9810. 41221, 41225, 41233, 41236, 41238, 41241, 41242, 41286, 41289, 41297, 41301, 41304, 41306, 41313, 41316, 41349,
  9811. 41360, 41362, 41366, 41369, 41474, 41480, 41482, 41488, 41497, 41506, 41512, 41514, 41541, 41553, 41558, 41561,
  9812. 41573, 41600, 41602, 41608, 41610, 41621, 41632, 41634, 41640, 41642, 42009, 42021, 42049, 42052, 42064, 42068,
  9813. 42069, 42072, 42074, 42081, 42085, 42086, 42088, 42089, 42117, 42246, 42249, 42256, 42258, 42261, 42264, 42278,
  9814. 42281, 42306, 42309, 42321, 42324, 42325, 42326, 42329, 42341, 42346, 42369, 42372, 42373, 42374, 42377, 42386,
  9815. 42389, 42392, 42501, 42513, 42518, 42522, 42529, 42533, 42564, 42566, 42570, 42578, 42581, 42582, 42584, 42592,
  9816. 42594, 42630, 42640, 42645, 42646, 42649, 42657, 42660, 42662, 43008, 43010, 43016, 43018, 43040, 43042, 43048,
  9817. 43050, 43089, 43092, 43094, 43097, 43136, 43138, 43144, 43146, 43157, 43168, 43170, 43176, 43178, 43269, 43284,
  9818. 43289, 43297, 43301, 43329, 43344, 43349, 43354, 43361, 43366, 43369, 43408, 43414, 43520, 43522, 43528, 43530,
  9819. 43552, 43554, 43560, 43562, 43601, 43604, 43606, 43648, 43650, 43656, 43658, 43669, 43680, 43682, 43688, 43690,
  9820. };
  9821. static const uint16_t kgrid_2bit_1024[1024] = {
  9822. 0, 2, 5, 8, 10, 17, 20, 22, 25, 32, 34, 37, 40, 65, 68, 70,
  9823. 73, 80, 82, 85, 88, 97, 100, 102, 105, 128, 130, 133, 136, 145, 148, 160,
  9824. 165, 170, 257, 260, 262, 265, 272, 274, 277, 280, 289, 292, 320, 322, 325, 328,
  9825. 337, 340, 342, 345, 352, 357, 360, 385, 388, 400, 402, 405, 417, 420, 512, 514,
  9826. 517, 520, 529, 532, 544, 554, 577, 580, 582, 585, 592, 597, 640, 645, 650, 660,
  9827. 674, 1025, 1028, 1030, 1033, 1040, 1042, 1045, 1048, 1057, 1060, 1062, 1065, 1088, 1090, 1093,
  9828. 1096, 1098, 1105, 1108, 1110, 1113, 1120, 1122, 1125, 1153, 1156, 1158, 1161, 1168, 1173, 1176,
  9829. 1185, 1188, 1280, 1282, 1285, 1288, 1290, 1297, 1300, 1302, 1305, 1312, 1317, 1320, 1345, 1348,
  9830. 1350, 1353, 1360, 1362, 1365, 1368, 1377, 1380, 1408, 1410, 1413, 1416, 1425, 1428, 1440, 1537,
  9831. 1540, 1542, 1545, 1552, 1557, 1600, 1605, 1608, 1617, 1620, 1632, 1665, 1668, 1680, 2048, 2050,
  9832. 2053, 2056, 2065, 2068, 2070, 2073, 2080, 2085, 2090, 2113, 2116, 2118, 2121, 2128, 2130, 2133,
  9833. 2136, 2145, 2148, 2176, 2181, 2196, 2218, 2305, 2308, 2320, 2322, 2325, 2328, 2337, 2368, 2373,
  9834. 2376, 2385, 2388, 2400, 2433, 2448, 2560, 2577, 2580, 2594, 2600, 2602, 2640, 2713, 4097, 4100,
  9835. 4102, 4105, 4112, 4114, 4117, 4120, 4129, 4132, 4134, 4160, 4162, 4165, 4168, 4177, 4180, 4182,
  9836. 4185, 4192, 4194, 4197, 4200, 4225, 4228, 4230, 4240, 4245, 4248, 4257, 4260, 4352, 4354, 4357,
  9837. 4360, 4362, 4369, 4372, 4374, 4377, 4384, 4386, 4389, 4392, 4417, 4420, 4422, 4425, 4432, 4434,
  9838. 4437, 4440, 4449, 4452, 4480, 4482, 4485, 4488, 4497, 4500, 4609, 4612, 4617, 4624, 4629, 4641,
  9839. 4644, 4672, 4677, 4689, 4692, 4737, 4740, 4752, 5120, 5122, 5125, 5128, 5137, 5140, 5142, 5145,
  9840. 5152, 5157, 5160, 5185, 5188, 5190, 5193, 5200, 5202, 5205, 5208, 5217, 5220, 5248, 5250, 5253,
  9841. 5256, 5265, 5268, 5280, 5377, 5380, 5382, 5385, 5392, 5394, 5397, 5400, 5409, 5412, 5440, 5442,
  9842. 5445, 5448, 5457, 5460, 5472, 5505, 5508, 5520, 5632, 5637, 5640, 5649, 5652, 5664, 5697, 5700,
  9843. 5712, 5760, 5802, 6145, 6148, 6150, 6153, 6160, 6165, 6168, 6177, 6208, 6210, 6213, 6216, 6225,
  9844. 6228, 6240, 6273, 6276, 6400, 6402, 6405, 6408, 6417, 6420, 6432, 6465, 6468, 6480, 6505, 6562,
  9845. 6660, 6672, 6720, 6742, 8192, 8194, 8197, 8200, 8209, 8212, 8214, 8217, 8224, 8229, 8234, 8257,
  9846. 8260, 8272, 8274, 8277, 8292, 8320, 8330, 8340, 8362, 8449, 8452, 8464, 8466, 8469, 8481, 8512,
  9847. 8514, 8517, 8529, 8532, 8544, 8577, 8580, 8592, 8704, 8714, 8738, 8744, 8746, 8772, 8784, 8840,
  9848. 8842, 8872, 9217, 9220, 9222, 9225, 9232, 9237, 9240, 9249, 9252, 9280, 9282, 9285, 9288, 9297,
  9849. 9300, 9312, 9345, 9348, 9360, 9472, 9477, 9480, 9489, 9492, 9504, 9537, 9540, 9552, 9574, 9600,
  9850. 9729, 9732, 9744, 9792, 9817, 10240, 10245, 10257, 10260, 10305, 10308, 10320, 10378, 10410, 10497, 10500,
  9851. 10512, 10645, 10762, 10786, 10852, 10888, 10890, 16385, 16388, 16390, 16393, 16400, 16402, 16405, 16408, 16410,
  9852. 16417, 16420, 16422, 16448, 16450, 16453, 16456, 16458, 16465, 16468, 16470, 16473, 16480, 16482, 16485, 16513,
  9853. 16516, 16528, 16533, 16536, 16545, 16548, 16640, 16642, 16645, 16648, 16657, 16660, 16662, 16665, 16672, 16674,
  9854. 16677, 16705, 16708, 16710, 16713, 16720, 16722, 16725, 16728, 16737, 16740, 16768, 16770, 16773, 16776, 16785,
  9855. 16788, 16800, 16897, 16900, 16912, 16914, 16917, 16920, 16932, 16960, 16965, 16968, 16977, 16980, 16992, 17025,
  9856. 17028, 17408, 17410, 17413, 17416, 17418, 17425, 17428, 17430, 17433, 17440, 17442, 17445, 17448, 17473, 17476,
  9857. 17478, 17481, 17488, 17490, 17493, 17496, 17505, 17508, 17536, 17538, 17541, 17544, 17553, 17556, 17568, 17665,
  9858. 17668, 17670, 17673, 17680, 17682, 17685, 17688, 17697, 17700, 17728, 17730, 17733, 17736, 17745, 17748, 17760,
  9859. 17770, 17793, 17796, 17808, 17920, 17922, 17925, 17928, 17937, 17940, 17952, 17985, 17988, 18000, 18048, 18085,
  9860. 18433, 18436, 18441, 18448, 18450, 18453, 18456, 18465, 18468, 18496, 18498, 18501, 18504, 18513, 18516, 18528,
  9861. 18564, 18576, 18688, 18690, 18693, 18696, 18705, 18708, 18720, 18753, 18756, 18768, 18816, 18838, 18945, 18948,
  9862. 18960, 19008, 20480, 20482, 20485, 20488, 20497, 20500, 20502, 20505, 20512, 20514, 20517, 20520, 20545, 20548,
  9863. 20550, 20553, 20560, 20562, 20565, 20568, 20577, 20580, 20608, 20610, 20613, 20616, 20625, 20628, 20737, 20740,
  9864. 20742, 20745, 20752, 20754, 20757, 20760, 20769, 20772, 20800, 20802, 20805, 20808, 20817, 20820, 20832, 20865,
  9865. 20868, 20880, 20992, 20997, 21000, 21009, 21012, 21024, 21057, 21060, 21072, 21097, 21120, 21505, 21508, 21510,
  9866. 21513, 21520, 21522, 21525, 21528, 21537, 21540, 21568, 21570, 21573, 21576, 21585, 21588, 21600, 21633, 21636,
  9867. 21648, 21760, 21762, 21765, 21768, 21777, 21780, 21792, 21825, 21828, 21840, 21888, 22017, 22020, 22032, 22054,
  9868. 22080, 22528, 22530, 22533, 22536, 22545, 22548, 22560, 22593, 22596, 22608, 22618, 22656, 22785, 22788, 22800,
  9869. 22848, 23040, 23065, 23173, 23208, 24577, 24580, 24582, 24592, 24594, 24597, 24600, 24609, 24612, 24640, 24645,
  9870. 24648, 24657, 24660, 24672, 24708, 24720, 24832, 24834, 24837, 24840, 24849, 24852, 24864, 24897, 24900, 24912,
  9871. 24960, 24985, 25092, 25104, 25152, 25174, 25249, 25600, 25605, 25608, 25617, 25620, 25632, 25665, 25668, 25680,
  9872. 25728, 25857, 25860, 25872, 25920, 25930, 25960, 26002, 26112, 26260, 26625, 26628, 26640, 26725, 26776, 26880,
  9873. 26922, 27202, 27297, 32768, 32770, 32773, 32776, 32785, 32788, 32793, 32800, 32805, 32833, 32836, 32848, 32850,
  9874. 32853, 32856, 32865, 32896, 32901, 32913, 32916, 33025, 33028, 33033, 33040, 33042, 33045, 33048, 33057, 33060,
  9875. 33088, 33090, 33093, 33096, 33105, 33108, 33153, 33156, 33168, 33193, 33280, 33285, 33290, 33297, 33300, 33345,
  9876. 33348, 33360, 33793, 33796, 33798, 33801, 33808, 33810, 33813, 33816, 33825, 33856, 33858, 33861, 33864, 33873,
  9877. 33876, 33888, 33921, 33924, 33936, 34048, 34050, 34053, 34056, 34065, 34068, 34080, 34113, 34116, 34128, 34176,
  9878. 34186, 34305, 34308, 34320, 34345, 34368, 34816, 34821, 34833, 34836, 34881, 34884, 34896, 34978, 35073, 35076,
  9879. 35136, 35173, 35362, 35416, 35418, 35458, 35490, 36865, 36868, 36873, 36880, 36882, 36885, 36888, 36900, 36928,
  9880. 36930, 36933, 36936, 36945, 36948, 36960, 36993, 36996, 37008, 37120, 37125, 37137, 37140, 37185, 37188, 37200,
  9881. 37210, 37377, 37380, 37392, 37440, 37542, 37888, 37890, 37893, 37896, 37905, 37908, 37920, 37953, 37956, 37968,
  9882. 38016, 38038, 38145, 38148, 38160, 38208, 38296, 38305, 38400, 38470, 38500, 38913, 38916, 38928, 38950, 38976,
  9883. 39081, 39168, 39241, 39250, 39568, 40960, 40965, 40970, 40980, 40994, 41002, 41025, 41028, 41040, 41122, 41130,
  9884. 41280, 41317, 41474, 41482, 41506, 41512, 41514, 41602, 41608, 41610, 41640, 41985, 41988, 42000, 42048, 42121,
  9885. 42148, 42240, 42265, 42577, 43018, 43048, 43170, 43348, 43398, 43528, 43530, 43552, 43554, 43560, 43656, 43690,
  9886. };
  9887. const int kmap_size = 43692;
  9888. //const int nwant = type == GGML_TYPE_IQ1_S ? 3 : 2;
  9889. const int nwant = type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M ? 3 : type == GGML_TYPE_IQ2_S ? 1 : 2;
  9890. const uint16_t * kgrid = type == GGML_TYPE_IQ2_XXS ? kgrid_2bit_256 :
  9891. type == GGML_TYPE_IQ2_XS ? kgrid_2bit_512 :
  9892. type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M ? kgrid_1bit_2048 : kgrid_2bit_1024;
  9893. uint64_t * kgrid_q2xs;
  9894. int * kmap_q2xs;
  9895. uint16_t * kneighbors_q2xs;
  9896. //printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size);
  9897. uint64_t * the_grid = (uint64_t *)malloc(grid_size*sizeof(uint64_t));
  9898. for (int k = 0; k < grid_size; ++k) {
  9899. int8_t * pos = (int8_t *)(the_grid + k);
  9900. for (int i = 0; i < 8; ++i) {
  9901. int l = (kgrid[k] >> 2*i) & 0x3;
  9902. pos[i] = 2*l + 1;
  9903. }
  9904. }
  9905. kgrid_q2xs = the_grid;
  9906. iq2_data[gindex].grid = the_grid;
  9907. kmap_q2xs = (int *)malloc(kmap_size*sizeof(int));
  9908. iq2_data[gindex].map = kmap_q2xs;
  9909. for (int i = 0; i < kmap_size; ++i) kmap_q2xs[i] = -1;
  9910. uint64_t aux64;
  9911. uint8_t * aux8 = (uint8_t *)&aux64;
  9912. for (int i = 0; i < grid_size; ++i) {
  9913. aux64 = kgrid_q2xs[i];
  9914. uint16_t index = 0;
  9915. for (int k=0; k<8; ++k) {
  9916. uint16_t q = (aux8[k] - 1)/2;
  9917. index |= (q << 2*k);
  9918. }
  9919. kmap_q2xs[index] = i;
  9920. }
  9921. int8_t pos[8];
  9922. int * dist2 = (int *)malloc(2*grid_size*sizeof(int));
  9923. int num_neighbors = 0, num_not_in_map = 0;
  9924. for (int i = 0; i < kmap_size; ++i) {
  9925. if (kmap_q2xs[i] >= 0) continue;
  9926. ++num_not_in_map;
  9927. for (int k = 0; k < 8; ++k) {
  9928. int l = (i >> 2*k) & 0x3;
  9929. pos[k] = 2*l + 1;
  9930. }
  9931. for (int j = 0; j < grid_size; ++j) {
  9932. const int8_t * pg = (const int8_t *)(kgrid_q2xs + j);
  9933. int d2 = 0;
  9934. for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  9935. dist2[2*j+0] = d2;
  9936. dist2[2*j+1] = j;
  9937. }
  9938. qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func);
  9939. int n = 0; int d2 = dist2[0];
  9940. int nhave = 1;
  9941. for (int j = 0; j < grid_size; ++j) {
  9942. if (dist2[2*j] > d2) {
  9943. if (nhave == nwant) break;
  9944. d2 = dist2[2*j];
  9945. ++nhave;
  9946. }
  9947. ++n;
  9948. }
  9949. num_neighbors += n;
  9950. }
  9951. //printf("%s: %d neighbours in total\n", __func__, num_neighbors);
  9952. kneighbors_q2xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t));
  9953. iq2_data[gindex].neighbours = kneighbors_q2xs;
  9954. int counter = 0;
  9955. for (int i = 0; i < kmap_size; ++i) {
  9956. if (kmap_q2xs[i] >= 0) continue;
  9957. for (int k = 0; k < 8; ++k) {
  9958. int l = (i >> 2*k) & 0x3;
  9959. pos[k] = 2*l + 1;
  9960. }
  9961. for (int j = 0; j < grid_size; ++j) {
  9962. const int8_t * pg = (const int8_t *)(kgrid_q2xs + j);
  9963. int d2 = 0;
  9964. for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  9965. dist2[2*j+0] = d2;
  9966. dist2[2*j+1] = j;
  9967. }
  9968. qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func);
  9969. kmap_q2xs[i] = -(counter + 1);
  9970. int d2 = dist2[0];
  9971. uint16_t * start = &kneighbors_q2xs[counter++];
  9972. int n = 0, nhave = 1;
  9973. for (int j = 0; j < grid_size; ++j) {
  9974. if (dist2[2*j] > d2) {
  9975. if (nhave == nwant) break;
  9976. d2 = dist2[2*j];
  9977. ++nhave;
  9978. }
  9979. kneighbors_q2xs[counter++] = dist2[2*j+1];
  9980. ++n;
  9981. }
  9982. *start = n;
  9983. }
  9984. free(dist2);
  9985. }
  9986. void iq2xs_free_impl(enum ggml_type type) {
  9987. GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M || type == GGML_TYPE_IQ2_S);
  9988. const int gindex = iq2_data_index(type);
  9989. if (iq2_data[gindex].grid) {
  9990. free(iq2_data[gindex].grid); iq2_data[gindex].grid = NULL;
  9991. free(iq2_data[gindex].map); iq2_data[gindex].map = NULL;
  9992. free(iq2_data[gindex].neighbours); iq2_data[gindex].neighbours = NULL;
  9993. }
  9994. }
  9995. static int iq2_find_best_neighbour(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
  9996. const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) {
  9997. int num_neighbors = neighbours[0];
  9998. GGML_ASSERT(num_neighbors > 0);
  9999. float best_d2 = FLT_MAX;
  10000. int grid_index = -1;
  10001. for (int j = 1; j <= num_neighbors; ++j) {
  10002. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  10003. float d2 = 0;
  10004. for (int i = 0; i < 8; ++i) {
  10005. float q = pg[i];
  10006. float diff = scale*q - xval[i];
  10007. d2 += weight[i]*diff*diff;
  10008. }
  10009. if (d2 < best_d2) {
  10010. best_d2 = d2; grid_index = neighbours[j];
  10011. }
  10012. }
  10013. GGML_ASSERT(grid_index >= 0);
  10014. const int8_t * pg = (const int8_t *)(grid + grid_index);
  10015. for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
  10016. return grid_index;
  10017. }
  10018. static void quantize_row_iq2_xxs_impl(const float * restrict x, void * restrict vy, int64_t n, const float * restrict quant_weights) {
  10019. const int gindex = iq2_data_index(GGML_TYPE_IQ2_XXS);
  10020. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  10021. const int * kmap_q2xs = iq2_data[gindex].map;
  10022. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  10023. GGML_ASSERT(quant_weights && "missing quantization weights");
  10024. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  10025. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  10026. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  10027. GGML_ASSERT(n%QK_K == 0);
  10028. const int kMaxQ = 3;
  10029. const int64_t nbl = n/QK_K;
  10030. block_iq2_xxs * y = vy;
  10031. float scales[QK_K/32];
  10032. float weight[32];
  10033. float xval[32];
  10034. int8_t L[32];
  10035. int8_t Laux[32];
  10036. float waux[32];
  10037. uint8_t block_signs[4];
  10038. uint32_t q2[2*(QK_K/32)];
  10039. for (int ibl = 0; ibl < nbl; ++ibl) {
  10040. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  10041. memset(q2, 0, QK_K/4);
  10042. float max_scale = 0;
  10043. const float * xbl = x + QK_K*ibl;
  10044. float sumx2 = 0;
  10045. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  10046. float sigma2 = sumx2/QK_K;
  10047. for (int ib = 0; ib < QK_K/32; ++ib) {
  10048. const float * xb = xbl + 32*ib;
  10049. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  10050. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  10051. for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
  10052. for (int k = 0; k < 4; ++k) {
  10053. int nflip = 0;
  10054. uint8_t s = 0;
  10055. for (int i = 0; i < 8; ++i) {
  10056. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  10057. else {
  10058. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  10059. }
  10060. }
  10061. if (nflip%2) {
  10062. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  10063. for (int i = 1; i < 8; ++i) {
  10064. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  10065. if (ax < min) {
  10066. min = ax; imin = i;
  10067. }
  10068. }
  10069. xval[8*k+imin] = -xval[8*k+imin];
  10070. s ^= (1 << imin);
  10071. }
  10072. block_signs[k] = s & 127;
  10073. }
  10074. float max = xval[0];
  10075. for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]);
  10076. if (max < GROUP_MAX_EPS) {
  10077. scales[ib] = 0;
  10078. memset(L, 0, 32);
  10079. continue;
  10080. }
  10081. float scale = make_qp_quants(32, kMaxQ+1, xval, (uint8_t*)L, weight);
  10082. float eff_max = scale*kMaxQ;
  10083. float best = 0;
  10084. for (int is = -6; is <= 6; ++is) {
  10085. float id = (2*kMaxQ-1+is*0.1f)/eff_max;
  10086. float this_scale = 1/id;
  10087. for (int k = 0; k < 4; ++k) {
  10088. for (int i = 0; i < 8; ++i) {
  10089. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  10090. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  10091. }
  10092. uint16_t u = 0;
  10093. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  10094. int grid_index = kmap_q2xs[u];
  10095. if (grid_index < 0) {
  10096. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  10097. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  10098. }
  10099. }
  10100. float sumqx = 0, sumq2 = 0;
  10101. for (int i = 0; i < 32; ++i) {
  10102. float w = weight[i];
  10103. float q = 2*Laux[i] + 1;
  10104. sumqx += w*xval[i]*q;
  10105. sumq2 += w*q*q;
  10106. }
  10107. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  10108. scale = sumqx/sumq2; best = scale*sumqx;
  10109. memcpy(L, Laux, 32);
  10110. }
  10111. }
  10112. if (scale > 0) {
  10113. float id = 1/scale;
  10114. for (int k = 0; k < 4; ++k) {
  10115. uint16_t u = 0;
  10116. for (int i = 0; i < 8; ++i) {
  10117. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  10118. l = MAX(0, MIN(kMaxQ-1, l));
  10119. u |= (l << 2*i);
  10120. }
  10121. int grid_index = kmap_q2xs[u];
  10122. if (grid_index < 0) {
  10123. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  10124. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  10125. }
  10126. const int8_t * pg = (const int8_t *)(kgrid_q2xs + grid_index);
  10127. for (int i = 0; i < 8; ++i) L[8*k+i] = (pg[i] - 1)/2;
  10128. }
  10129. float sumqx = 0, sumq2 = 0;
  10130. for (int i = 0; i < 32; ++i) {
  10131. float w = weight[i];
  10132. float q = 2*L[i] + 1;
  10133. sumqx += w*xval[i]*q;
  10134. sumq2 += w*q*q;
  10135. }
  10136. if (sumq2 > 0) scale = sumqx/sumq2;
  10137. }
  10138. if (scale < 0) {
  10139. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  10140. // and correspondingly flip quant signs.
  10141. scale = -scale;
  10142. for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127;
  10143. }
  10144. for (int k = 0; k < 4; ++k) {
  10145. uint16_t u = 0;
  10146. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  10147. int grid_index = kmap_q2xs[u];
  10148. if (grid_index < 0) {
  10149. printf("Oops: found point %u not on grid:", u);
  10150. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  10151. printf("\n");
  10152. GGML_ASSERT(false);
  10153. }
  10154. q2[2*ib+0] |= (grid_index << 8*k);
  10155. q2[2*ib+1] |= (block_signs[k] << 7*k);
  10156. }
  10157. GGML_ASSERT(scale >= 0);
  10158. scales[ib] = scale;
  10159. max_scale = MAX(max_scale, scale);
  10160. }
  10161. if (!max_scale) {
  10162. memset(y[ibl].qs, 0, QK_K/4);
  10163. continue;
  10164. }
  10165. float d = max_scale/31;
  10166. y[ibl].d = GGML_FP32_TO_FP16(d);
  10167. float id = 1/d;
  10168. for (int ib = 0; ib < QK_K/32; ++ib) {
  10169. int l = nearest_int(0.5f*(id*scales[ib]-1));
  10170. l = MAX(0, MIN(15, l));
  10171. q2[2*ib+1] |= ((uint32_t)l << 28);
  10172. }
  10173. memcpy(y[ibl].qs, q2, QK_K/4);
  10174. }
  10175. }
  10176. static void quantize_row_iq2_xs_impl(const float * restrict x, void * restrict vy, int64_t n, const float * restrict quant_weights) {
  10177. const int gindex = iq2_data_index(GGML_TYPE_IQ2_XS);
  10178. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  10179. const int * kmap_q2xs = iq2_data[gindex].map;
  10180. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  10181. GGML_ASSERT(quant_weights && "missing quantization weights");
  10182. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  10183. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  10184. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  10185. GGML_ASSERT(n%QK_K == 0);
  10186. const int kMaxQ = 3;
  10187. const int64_t nbl = n/QK_K;
  10188. block_iq2_xs * y = vy;
  10189. float scales[QK_K/16];
  10190. float weight[16];
  10191. float xval[16];
  10192. int8_t L[16];
  10193. int8_t Laux[16];
  10194. float waux[16];
  10195. bool is_on_grid[2];
  10196. bool is_on_grid_aux[2];
  10197. uint8_t block_signs[2];
  10198. uint16_t q2[2*(QK_K/16)];
  10199. for (int ibl = 0; ibl < nbl; ++ibl) {
  10200. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  10201. memset(q2, 0, QK_K/4);
  10202. memset(y[ibl].scales, 0, QK_K/32);
  10203. float max_scale = 0;
  10204. const float * xbl = x + QK_K*ibl;
  10205. float sumx2 = 0;
  10206. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  10207. float sigma2 = sumx2/QK_K;
  10208. for (int ib = 0; ib < QK_K/16; ++ib) {
  10209. const float * xb = xbl + 16*ib;
  10210. const float * qw = quant_weights + QK_K*ibl + 16*ib;
  10211. for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  10212. for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]);
  10213. for (int k = 0; k < 2; ++k) {
  10214. int nflip = 0;
  10215. uint8_t s = 0;
  10216. for (int i = 0; i < 8; ++i) {
  10217. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  10218. else {
  10219. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  10220. }
  10221. }
  10222. if (nflip%2) {
  10223. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  10224. for (int i = 1; i < 8; ++i) {
  10225. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  10226. if (ax < min) {
  10227. min = ax; imin = i;
  10228. }
  10229. }
  10230. xval[8*k+imin] = -xval[8*k+imin];
  10231. s ^= (1 << imin);
  10232. }
  10233. block_signs[k] = s & 127;
  10234. }
  10235. float max = xval[0];
  10236. for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]);
  10237. if (max < GROUP_MAX_EPS) {
  10238. scales[ib] = 0;
  10239. memset(L, 0, 16);
  10240. continue;
  10241. }
  10242. float best = 0;
  10243. float scale = max/(2*kMaxQ-1);
  10244. is_on_grid[0] = is_on_grid[1] = true;
  10245. for (int is = -9; is <= 9; ++is) {
  10246. float id = (2*kMaxQ-1+is*0.1f)/max;
  10247. float this_scale = 1/id;
  10248. for (int k = 0; k < 2; ++k) {
  10249. for (int i = 0; i < 8; ++i) {
  10250. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  10251. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  10252. }
  10253. uint16_t u = 0;
  10254. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  10255. int grid_index = kmap_q2xs[u];
  10256. is_on_grid_aux[k] = true;
  10257. if (grid_index < 0) {
  10258. is_on_grid_aux[k] = false;
  10259. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  10260. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  10261. }
  10262. }
  10263. float sumqx = 0, sumq2 = 0;
  10264. for (int i = 0; i < 16; ++i) {
  10265. float w = weight[i];
  10266. float q = 2*Laux[i] + 1;
  10267. sumqx += w*xval[i]*q;
  10268. sumq2 += w*q*q;
  10269. }
  10270. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  10271. scale = sumqx/sumq2; best = scale*sumqx;
  10272. for (int i = 0; i < 16; ++i) L[i] = Laux[i];
  10273. for (int k = 0; k < 2; ++k) is_on_grid[k] = is_on_grid_aux[k];
  10274. }
  10275. }
  10276. int n_not_ongrid = 0;
  10277. for (int k = 0; k < 2; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  10278. if (n_not_ongrid > 0 && scale > 0) {
  10279. float id = 1/scale;
  10280. for (int k = 0; k < 2; ++k) {
  10281. if (is_on_grid[k]) continue;
  10282. uint16_t u = 0;
  10283. for (int i = 0; i < 8; ++i) {
  10284. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  10285. l = MAX(0, MIN(kMaxQ-1, l));
  10286. u |= (l << 2*i);
  10287. L[8*k + i] = l;
  10288. }
  10289. int grid_index = kmap_q2xs[u];
  10290. if (grid_index < 0) {
  10291. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  10292. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  10293. }
  10294. }
  10295. float sumqx = 0, sumq2 = 0;
  10296. for (int i = 0; i < 16; ++i) {
  10297. float w = weight[i];
  10298. float q = 2*L[i] + 1;
  10299. sumqx += w*xval[i]*q;
  10300. sumq2 += w*q*q;
  10301. }
  10302. if (sumq2 > 0) scale = sumqx/sumq2;
  10303. }
  10304. if (scale < 0) {
  10305. scale = -scale;
  10306. for (int k = 0; k < 2; ++k) block_signs[k] = (~block_signs[k]) & 127;
  10307. }
  10308. for (int k = 0; k < 2; ++k) {
  10309. uint16_t u = 0;
  10310. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  10311. int grid_index = kmap_q2xs[u];
  10312. if (grid_index < 0) {
  10313. printf("Oops: found point %u not on grid:", u);
  10314. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  10315. printf("\n");
  10316. GGML_ASSERT(false);
  10317. }
  10318. q2[2*ib+k] = grid_index | (block_signs[k] << 9);
  10319. }
  10320. GGML_ASSERT(scale >= 0);
  10321. scales[ib] = scale;
  10322. max_scale = MAX(max_scale, scale);
  10323. }
  10324. if (!max_scale) {
  10325. memset(y[ibl].qs, 0, QK_K/4);
  10326. continue;
  10327. }
  10328. float d = max_scale/31;
  10329. y[ibl].d = GGML_FP32_TO_FP16(d);
  10330. float id = 1/d;
  10331. for (int ib = 0; ib < QK_K/16; ++ib) {
  10332. int l = nearest_int(0.5f*(id*scales[ib]-1));
  10333. l = MAX(0, MIN(15, l));
  10334. if (ib%2 == 0) y[ibl].scales[ib/2] = l;
  10335. else y[ibl].scales[ib/2] |= (l << 4);
  10336. }
  10337. memcpy(y[ibl].qs, q2, QK_K/4);
  10338. }
  10339. }
  10340. size_t quantize_iq2_xxs(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  10341. GGML_ASSERT(n_per_row%QK_K == 0);
  10342. int64_t nblock = n_per_row/QK_K;
  10343. char * qrow = (char *)dst;
  10344. for (int64_t row = 0; row < nrow; ++row) {
  10345. quantize_row_iq2_xxs_impl(src, qrow, n_per_row, quant_weights);
  10346. src += n_per_row;
  10347. qrow += nblock*sizeof(block_iq2_xxs);
  10348. }
  10349. return nrow * nblock * sizeof(block_iq2_xxs);
  10350. }
  10351. size_t quantize_iq2_xs(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  10352. GGML_ASSERT(n_per_row%QK_K == 0);
  10353. int64_t nblock = n_per_row/QK_K;
  10354. char * qrow = (char *)dst;
  10355. for (int64_t row = 0; row < nrow; ++row) {
  10356. quantize_row_iq2_xs_impl(src, qrow, n_per_row, quant_weights);
  10357. src += n_per_row;
  10358. qrow += nblock*sizeof(block_iq2_xs);
  10359. }
  10360. return nrow * nblock * sizeof(block_iq2_xs);
  10361. }
  10362. //
  10363. // ============================================= 3-bit using D4 lattice
  10364. //
  10365. typedef struct {
  10366. uint32_t * grid;
  10367. int * map;
  10368. uint16_t * neighbours;
  10369. } iq3_entry_t;
  10370. static iq3_entry_t iq3_data[2] = {
  10371. {NULL, NULL, NULL},
  10372. {NULL, NULL, NULL},
  10373. };
  10374. static inline int iq3_data_index(int grid_size) {
  10375. (void)grid_size;
  10376. GGML_ASSERT(grid_size == 256 || grid_size == 512);
  10377. return grid_size == 256 ? 0 : 1;
  10378. }
  10379. static int iq3_compare_func(const void * left, const void * right) {
  10380. const int * l = (const int *)left;
  10381. const int * r = (const int *)right;
  10382. return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0;
  10383. }
  10384. void iq3xs_init_impl(int grid_size) {
  10385. const int gindex = iq3_data_index(grid_size);
  10386. if (iq3_data[gindex].grid) {
  10387. return;
  10388. }
  10389. static const uint16_t kgrid_256[256] = {
  10390. 0, 2, 4, 9, 11, 15, 16, 18, 25, 34, 59, 61, 65, 67, 72, 74,
  10391. 81, 85, 88, 90, 97, 108, 120, 128, 130, 132, 137, 144, 146, 153, 155, 159,
  10392. 169, 175, 189, 193, 199, 200, 202, 213, 248, 267, 287, 292, 303, 315, 317, 321,
  10393. 327, 346, 362, 413, 436, 456, 460, 462, 483, 497, 513, 515, 520, 522, 529, 531,
  10394. 536, 538, 540, 551, 552, 576, 578, 585, 592, 594, 641, 643, 648, 650, 657, 664,
  10395. 698, 704, 706, 720, 729, 742, 758, 769, 773, 808, 848, 852, 870, 889, 901, 978,
  10396. 992, 1024, 1026, 1033, 1035, 1040, 1042, 1046, 1049, 1058, 1089, 1091, 1093, 1096, 1098, 1105,
  10397. 1112, 1139, 1143, 1144, 1152, 1154, 1161, 1167, 1168, 1170, 1183, 1184, 1197, 1217, 1224, 1228,
  10398. 1272, 1276, 1309, 1323, 1347, 1367, 1377, 1404, 1473, 1475, 1486, 1509, 1537, 1544, 1546, 1553,
  10399. 1555, 1576, 1589, 1594, 1600, 1602, 1616, 1625, 1636, 1638, 1665, 1667, 1672, 1685, 1706, 1722,
  10400. 1737, 1755, 1816, 1831, 1850, 1856, 1862, 1874, 1901, 1932, 1950, 1971, 2011, 2032, 2052, 2063,
  10401. 2077, 2079, 2091, 2095, 2172, 2192, 2207, 2208, 2224, 2230, 2247, 2277, 2308, 2345, 2356, 2389,
  10402. 2403, 2424, 2501, 2504, 2506, 2520, 2570, 2593, 2616, 2624, 2630, 2646, 2669, 2700, 2714, 2746,
  10403. 2754, 2795, 2824, 2835, 2839, 2874, 2882, 2905, 2984, 3028, 3042, 3092, 3108, 3110, 3124, 3153,
  10404. 3185, 3215, 3252, 3288, 3294, 3364, 3397, 3434, 3483, 3523, 3537, 3587, 3589, 3591, 3592, 3610,
  10405. 3626, 3670, 3680, 3722, 3749, 3754, 3776, 3789, 3803, 3824, 3857, 3873, 3904, 3906, 3924, 3992,
  10406. };
  10407. static const uint16_t kgrid_512[512] = {
  10408. 0, 1, 2, 5, 7, 8, 9, 10, 12, 14, 16, 17, 21, 27, 32, 34,
  10409. 37, 39, 41, 43, 48, 50, 57, 60, 63, 64, 65, 66, 68, 72, 73, 77,
  10410. 80, 83, 87, 89, 93, 100, 113, 117, 122, 128, 129, 133, 135, 136, 139, 142,
  10411. 145, 149, 152, 156, 162, 165, 167, 169, 171, 184, 187, 195, 201, 205, 208, 210,
  10412. 217, 219, 222, 228, 232, 234, 247, 249, 253, 256, 267, 271, 273, 276, 282, 288,
  10413. 291, 297, 312, 322, 324, 336, 338, 342, 347, 353, 357, 359, 374, 379, 390, 393,
  10414. 395, 409, 426, 441, 448, 450, 452, 464, 466, 470, 475, 488, 492, 512, 513, 514,
  10415. 516, 520, 521, 523, 525, 527, 528, 530, 537, 540, 542, 556, 558, 561, 570, 576,
  10416. 577, 579, 582, 584, 588, 593, 600, 603, 609, 616, 618, 632, 638, 640, 650, 653,
  10417. 655, 656, 660, 666, 672, 675, 685, 688, 698, 705, 708, 711, 712, 715, 721, 727,
  10418. 728, 732, 737, 754, 760, 771, 773, 778, 780, 793, 795, 802, 806, 808, 812, 833,
  10419. 840, 843, 849, 856, 858, 873, 912, 916, 919, 932, 934, 961, 963, 968, 970, 977,
  10420. 989, 993, 1010, 1016, 1024, 1025, 1027, 1029, 1031, 1032, 1034, 1036, 1038, 1041, 1043, 1047,
  10421. 1048, 1050, 1057, 1059, 1061, 1064, 1066, 1079, 1080, 1083, 1085, 1088, 1090, 1096, 1099, 1103,
  10422. 1106, 1109, 1113, 1116, 1122, 1129, 1153, 1156, 1159, 1169, 1171, 1176, 1183, 1185, 1195, 1199,
  10423. 1209, 1212, 1216, 1218, 1221, 1225, 1234, 1236, 1241, 1243, 1250, 1256, 1270, 1281, 1287, 1296,
  10424. 1299, 1306, 1309, 1313, 1338, 1341, 1348, 1353, 1362, 1375, 1376, 1387, 1400, 1408, 1410, 1415,
  10425. 1425, 1453, 1457, 1477, 1481, 1494, 1496, 1507, 1512, 1538, 1545, 1547, 1549, 1551, 1554, 1561,
  10426. 1563, 1565, 1570, 1572, 1575, 1577, 1587, 1593, 1601, 1603, 1605, 1612, 1617, 1619, 1632, 1648,
  10427. 1658, 1662, 1664, 1674, 1680, 1690, 1692, 1704, 1729, 1736, 1740, 1745, 1747, 1751, 1752, 1761,
  10428. 1763, 1767, 1773, 1787, 1795, 1801, 1806, 1810, 1817, 1834, 1840, 1844, 1857, 1864, 1866, 1877,
  10429. 1882, 1892, 1902, 1915, 1934, 1953, 1985, 1987, 2000, 2002, 2013, 2048, 2052, 2058, 2064, 2068,
  10430. 2071, 2074, 2081, 2088, 2104, 2114, 2119, 2121, 2123, 2130, 2136, 2141, 2147, 2153, 2157, 2177,
  10431. 2179, 2184, 2189, 2193, 2203, 2208, 2223, 2226, 2232, 2244, 2249, 2251, 2256, 2258, 2265, 2269,
  10432. 2304, 2306, 2324, 2335, 2336, 2361, 2373, 2375, 2385, 2418, 2443, 2460, 2480, 2504, 2509, 2520,
  10433. 2531, 2537, 2562, 2568, 2572, 2578, 2592, 2596, 2599, 2602, 2614, 2620, 2625, 2627, 2629, 2634,
  10434. 2641, 2650, 2682, 2688, 2697, 2707, 2712, 2718, 2731, 2754, 2759, 2760, 2775, 2788, 2793, 2805,
  10435. 2811, 2817, 2820, 2832, 2842, 2854, 2890, 2902, 2921, 2923, 2978, 3010, 3012, 3026, 3081, 3083,
  10436. 3085, 3097, 3099, 3120, 3136, 3152, 3159, 3188, 3210, 3228, 3234, 3245, 3250, 3256, 3264, 3276,
  10437. 3281, 3296, 3349, 3363, 3378, 3392, 3395, 3420, 3440, 3461, 3488, 3529, 3531, 3584, 3588, 3591,
  10438. 3600, 3602, 3614, 3616, 3628, 3634, 3650, 3657, 3668, 3683, 3685, 3713, 3716, 3720, 3726, 3729,
  10439. 3736, 3753, 3778, 3802, 3805, 3819, 3841, 3845, 3851, 3856, 3880, 3922, 3938, 3970, 3993, 4032,
  10440. };
  10441. const int kmap_size = 4096;
  10442. const int nwant = grid_size == 256 ? 2 : 3;
  10443. const uint16_t * kgrid = grid_size == 256 ? kgrid_256 : kgrid_512;
  10444. uint32_t * kgrid_q3xs;
  10445. int * kmap_q3xs;
  10446. uint16_t * kneighbors_q3xs;
  10447. //printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size);
  10448. uint32_t * the_grid = (uint32_t *)malloc(grid_size*sizeof(uint32_t));
  10449. for (int k = 0; k < grid_size; ++k) {
  10450. int8_t * pos = (int8_t *)(the_grid + k);
  10451. for (int i = 0; i < 4; ++i) {
  10452. int l = (kgrid[k] >> 3*i) & 0x7;
  10453. pos[i] = 2*l + 1;
  10454. }
  10455. }
  10456. kgrid_q3xs = the_grid;
  10457. iq3_data[gindex].grid = the_grid;
  10458. kmap_q3xs = (int *)malloc(kmap_size*sizeof(int));
  10459. iq3_data[gindex].map = kmap_q3xs;
  10460. for (int i = 0; i < kmap_size; ++i) kmap_q3xs[i] = -1;
  10461. uint32_t aux32;
  10462. uint8_t * aux8 = (uint8_t *)&aux32;
  10463. for (int i = 0; i < grid_size; ++i) {
  10464. aux32 = kgrid_q3xs[i];
  10465. uint16_t index = 0;
  10466. for (int k=0; k<4; ++k) {
  10467. uint16_t q = (aux8[k] - 1)/2;
  10468. index |= (q << 3*k);
  10469. }
  10470. kmap_q3xs[index] = i;
  10471. }
  10472. int8_t pos[4];
  10473. int * dist2 = (int *)malloc(2*grid_size*sizeof(int));
  10474. int num_neighbors = 0, num_not_in_map = 0;
  10475. for (int i = 0; i < kmap_size; ++i) {
  10476. if (kmap_q3xs[i] >= 0) continue;
  10477. ++num_not_in_map;
  10478. for (int k = 0; k < 4; ++k) {
  10479. int l = (i >> 3*k) & 0x7;
  10480. pos[k] = 2*l + 1;
  10481. }
  10482. for (int j = 0; j < grid_size; ++j) {
  10483. const int8_t * pg = (const int8_t *)(kgrid_q3xs + j);
  10484. int d2 = 0;
  10485. for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  10486. dist2[2*j+0] = d2;
  10487. dist2[2*j+1] = j;
  10488. }
  10489. qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func);
  10490. int n = 0; int d2 = dist2[0];
  10491. int nhave = 1;
  10492. for (int j = 0; j < grid_size; ++j) {
  10493. if (dist2[2*j] > d2) {
  10494. if (nhave == nwant) break;
  10495. d2 = dist2[2*j];
  10496. ++nhave;
  10497. }
  10498. ++n;
  10499. }
  10500. num_neighbors += n;
  10501. }
  10502. //printf("%s: %d neighbours in total\n", __func__, num_neighbors);
  10503. kneighbors_q3xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t));
  10504. iq3_data[gindex].neighbours = kneighbors_q3xs;
  10505. int counter = 0;
  10506. for (int i = 0; i < kmap_size; ++i) {
  10507. if (kmap_q3xs[i] >= 0) continue;
  10508. for (int k = 0; k < 4; ++k) {
  10509. int l = (i >> 3*k) & 0x7;
  10510. pos[k] = 2*l + 1;
  10511. }
  10512. for (int j = 0; j < grid_size; ++j) {
  10513. const int8_t * pg = (const int8_t *)(kgrid_q3xs + j);
  10514. int d2 = 0;
  10515. for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  10516. dist2[2*j+0] = d2;
  10517. dist2[2*j+1] = j;
  10518. }
  10519. qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func);
  10520. kmap_q3xs[i] = -(counter + 1);
  10521. int d2 = dist2[0];
  10522. uint16_t * start = &kneighbors_q3xs[counter++];
  10523. int n = 0, nhave = 1;
  10524. for (int j = 0; j < grid_size; ++j) {
  10525. if (dist2[2*j] > d2) {
  10526. if (nhave == nwant) break;
  10527. d2 = dist2[2*j];
  10528. ++nhave;
  10529. }
  10530. kneighbors_q3xs[counter++] = dist2[2*j+1];
  10531. ++n;
  10532. }
  10533. *start = n;
  10534. }
  10535. free(dist2);
  10536. }
  10537. void iq3xs_free_impl(int grid_size) {
  10538. GGML_ASSERT(grid_size == 256 || grid_size == 512);
  10539. const int gindex = iq3_data_index(grid_size);
  10540. if (iq3_data[gindex].grid) {
  10541. free(iq3_data[gindex].grid); iq3_data[gindex].grid = NULL;
  10542. free(iq3_data[gindex].map); iq3_data[gindex].map = NULL;
  10543. free(iq3_data[gindex].neighbours); iq3_data[gindex].neighbours = NULL;
  10544. }
  10545. }
  10546. static int iq3_find_best_neighbour(const uint16_t * restrict neighbours, const uint32_t * restrict grid,
  10547. const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) {
  10548. int num_neighbors = neighbours[0];
  10549. GGML_ASSERT(num_neighbors > 0);
  10550. float best_d2 = FLT_MAX;
  10551. int grid_index = -1;
  10552. for (int j = 1; j <= num_neighbors; ++j) {
  10553. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  10554. float d2 = 0;
  10555. for (int i = 0; i < 4; ++i) {
  10556. float q = pg[i];
  10557. float diff = scale*q - xval[i];
  10558. d2 += weight[i]*diff*diff;
  10559. }
  10560. if (d2 < best_d2) {
  10561. best_d2 = d2; grid_index = neighbours[j];
  10562. }
  10563. }
  10564. GGML_ASSERT(grid_index >= 0);
  10565. const int8_t * pg = (const int8_t *)(grid + grid_index);
  10566. for (int i = 0; i < 4; ++i) L[i] = (pg[i] - 1)/2;
  10567. return grid_index;
  10568. }
  10569. static void quantize_row_iq3_xxs_impl(int grid_size, const float * restrict x, void * restrict vy, int64_t n,
  10570. const float * restrict quant_weights) {
  10571. const int gindex = iq3_data_index(grid_size);
  10572. const uint32_t * kgrid_q3xs = iq3_data[gindex].grid;
  10573. const int * kmap_q3xs = iq3_data[gindex].map;
  10574. const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours;
  10575. //GGML_ASSERT(quant_weights && "missing quantization weights");
  10576. GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?");
  10577. GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?");
  10578. GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?");
  10579. GGML_ASSERT(n%QK_K == 0);
  10580. const int kMaxQ = 8;
  10581. const int64_t nbl = n/QK_K;
  10582. ggml_fp16_t * dh;
  10583. uint8_t * qs;
  10584. int block_size;
  10585. if (grid_size == 256) {
  10586. block_iq3_xxs * y = vy;
  10587. dh = &y->d;
  10588. qs = y->qs;
  10589. block_size = sizeof(block_iq3_xxs);
  10590. } else {
  10591. block_iq3_s * y = vy;
  10592. dh = &y->d;
  10593. qs = y->qs;
  10594. block_size = sizeof(block_iq3_s);
  10595. }
  10596. int quant_size = block_size - sizeof(ggml_fp16_t);
  10597. float scales[QK_K/32];
  10598. float weight[32];
  10599. float xval[32];
  10600. int8_t L[32];
  10601. int8_t Laux[32];
  10602. float waux[32];
  10603. bool is_on_grid[8];
  10604. bool is_on_grid_aux[8];
  10605. uint8_t block_signs[8];
  10606. uint8_t q3[3*(QK_K/8)+QK_K/32];
  10607. uint32_t * scales_and_signs = (uint32_t *)(q3 + QK_K/4);
  10608. uint8_t * qh = q3 + 3*(QK_K/8);
  10609. for (int ibl = 0; ibl < nbl; ++ibl) {
  10610. dh[0] = GGML_FP32_TO_FP16(0.f);
  10611. memset(q3, 0, 3*QK_K/8+QK_K/32);
  10612. float max_scale = 0;
  10613. const float * xbl = x + QK_K*ibl;
  10614. float sumx2 = 0;
  10615. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  10616. float sigma2 = 2*sumx2/QK_K;
  10617. for (int ib = 0; ib < QK_K/32; ++ib) {
  10618. const float * xb = xbl + 32*ib;
  10619. if (quant_weights) {
  10620. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  10621. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  10622. } else {
  10623. for (int i = 0; i < 32; ++i) weight[i] = xb[i]*xb[i];
  10624. }
  10625. for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
  10626. for (int k = 0; k < 4; ++k) {
  10627. int nflip = 0;
  10628. uint8_t s = 0;
  10629. for (int i = 0; i < 8; ++i) {
  10630. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  10631. else {
  10632. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  10633. }
  10634. }
  10635. if (nflip%2) {
  10636. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  10637. for (int i = 1; i < 8; ++i) {
  10638. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  10639. if (ax < min) {
  10640. min = ax; imin = i;
  10641. }
  10642. }
  10643. xval[8*k+imin] = -xval[8*k+imin];
  10644. s ^= (1 << imin);
  10645. }
  10646. block_signs[k] = s & 127;
  10647. }
  10648. float max = xval[0];
  10649. for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]);
  10650. if (max < GROUP_MAX_EPS_IQ3_XXS) {
  10651. scales[ib] = 0;
  10652. memset(L, 0, 32);
  10653. continue;
  10654. }
  10655. float best = 0;
  10656. float scale = max/(2*kMaxQ-1);
  10657. for (int is = -15; is <= 15; ++is) {
  10658. float id = (2*kMaxQ-1+is*0.2f)/max;
  10659. float this_scale = 1/id;
  10660. for (int k = 0; k < 8; ++k) {
  10661. for (int i = 0; i < 4; ++i) {
  10662. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  10663. Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l));
  10664. }
  10665. uint16_t u = 0;
  10666. for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i);
  10667. int grid_index = kmap_q3xs[u];
  10668. is_on_grid_aux[k] = true;
  10669. if (grid_index < 0) {
  10670. is_on_grid_aux[k] = false;
  10671. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  10672. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k);
  10673. }
  10674. }
  10675. float sumqx = 0, sumq2 = 0;
  10676. for (int i = 0; i < 32; ++i) {
  10677. float w = weight[i];
  10678. float q = 2*Laux[i] + 1;
  10679. sumqx += w*xval[i]*q;
  10680. sumq2 += w*q*q;
  10681. }
  10682. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  10683. scale = sumqx/sumq2; best = scale*sumqx;
  10684. for (int i = 0; i < 32; ++i) L[i] = Laux[i];
  10685. for (int k = 0; k < 8; ++k) is_on_grid[k] = is_on_grid_aux[k];
  10686. }
  10687. }
  10688. int n_not_ongrid = 0;
  10689. for (int k = 0; k < 8; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  10690. if (n_not_ongrid > 0 && scale > 0) {
  10691. float id = 1/scale;
  10692. for (int k = 0; k < 8; ++k) {
  10693. if (is_on_grid[k]) continue;
  10694. uint16_t u = 0;
  10695. for (int i = 0; i < 4; ++i) {
  10696. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  10697. l = MAX(0, MIN(kMaxQ-1, l));
  10698. u |= (l << 3*i);
  10699. }
  10700. int grid_index = kmap_q3xs[u];
  10701. if (grid_index < 0) {
  10702. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  10703. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k);
  10704. }
  10705. const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index);
  10706. for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2;
  10707. }
  10708. float sumqx = 0, sumq2 = 0;
  10709. for (int i = 0; i < 32; ++i) {
  10710. float w = weight[i];
  10711. float q = 2*L[i] + 1;
  10712. sumqx += w*xval[i]*q;
  10713. sumq2 += w*q*q;
  10714. }
  10715. if (sumq2 > 0) scale = sumqx/sumq2;
  10716. }
  10717. if (scale < 0) {
  10718. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  10719. // and correspondingly flip quant signs.
  10720. scale = -scale;
  10721. for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127;
  10722. }
  10723. for (int k = 0; k < 8; ++k) {
  10724. uint16_t u = 0;
  10725. for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i);
  10726. int grid_index = kmap_q3xs[u];
  10727. if (grid_index < 0) {
  10728. printf("Oops: found point %u not on grid:", u);
  10729. for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]);
  10730. printf("\n");
  10731. GGML_ASSERT(false);
  10732. }
  10733. if (grid_size == 256) {
  10734. q3[8*ib+k] = grid_index;
  10735. } else {
  10736. q3[8*ib+k] = grid_index & 255;
  10737. qh[ib] |= ((grid_index >> 8) << k);
  10738. }
  10739. }
  10740. scales_and_signs[ib] = block_signs[0] | (block_signs[1] << 7) | (block_signs[2] << 14) | (block_signs[3] << 21);
  10741. GGML_ASSERT(scale >= 0);
  10742. scales[ib] = scale;
  10743. max_scale = MAX(max_scale, scale);
  10744. }
  10745. if (!max_scale) {
  10746. memset(qs, 0, quant_size);
  10747. dh += block_size/sizeof(ggml_fp16_t);
  10748. qs += block_size;
  10749. continue;
  10750. }
  10751. float d = max_scale/31;
  10752. dh[0] = GGML_FP32_TO_FP16(d * 1.0125f); // small improvement via this fudge factor
  10753. float id = 1/d;
  10754. for (int ib = 0; ib < QK_K/32; ++ib) {
  10755. int l = nearest_int(0.5f*(id*scales[ib]-1));
  10756. l = MAX(0, MIN(15, l));
  10757. scales_and_signs[ib] |= ((uint32_t)l << 28);
  10758. }
  10759. memcpy(qs, q3, quant_size);
  10760. dh += block_size/sizeof(ggml_fp16_t);
  10761. qs += block_size;
  10762. }
  10763. }
  10764. size_t quantize_iq3_xxs(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  10765. GGML_ASSERT(n_per_row%QK_K == 0);
  10766. int64_t nblock = n_per_row/QK_K;
  10767. char * qrow = (char *)dst;
  10768. for (int64_t row = 0; row < nrow; ++row) {
  10769. quantize_row_iq3_xxs_impl(256, src, qrow, n_per_row, quant_weights);
  10770. src += n_per_row;
  10771. qrow += nblock*sizeof(block_iq3_xxs);
  10772. }
  10773. return nrow * nblock * sizeof(block_iq3_xxs);
  10774. }
  10775. void quantize_row_iq3_xxs(const float * restrict x, void * restrict vy, int64_t k) {
  10776. assert(k % QK_K == 0);
  10777. block_iq3_xxs * restrict y = vy;
  10778. quantize_row_iq3_xxs_reference(x, y, k);
  10779. }
  10780. void quantize_row_iq3_xxs_reference(const float * restrict x, block_iq3_xxs * restrict y, int64_t k) {
  10781. assert(k % QK_K == 0);
  10782. quantize_row_iq3_xxs_impl(256, x, y, k, NULL);
  10783. }
  10784. static void quantize_row_iq3_s_impl(int block_size, const float * restrict x, void * restrict vy, int n,
  10785. const float * restrict quant_weights,
  10786. float * scales,
  10787. float * weight,
  10788. float * xval,
  10789. int8_t * L,
  10790. int8_t * Laux,
  10791. float * waux,
  10792. bool * is_on_grid,
  10793. bool * is_on_grid_aux,
  10794. uint8_t * block_signs) {
  10795. const int gindex = iq3_data_index(512);
  10796. const uint32_t * kgrid_q3xs = iq3_data[gindex].grid;
  10797. const int * kmap_q3xs = iq3_data[gindex].map;
  10798. const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours;
  10799. //GGML_ASSERT(quant_weights && "missing quantization weights");
  10800. GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?");
  10801. GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?");
  10802. GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?");
  10803. GGML_ASSERT(n%QK_K == 0);
  10804. const int kMaxQ = 8;
  10805. const int64_t nbl = n/QK_K;
  10806. block_iq3_s * y = vy;
  10807. const int bs4 = block_size/4;
  10808. const int bs8 = block_size/8;
  10809. for (int ibl = 0; ibl < nbl; ++ibl) {
  10810. memset(&y[ibl], 0, sizeof(block_iq3_s));
  10811. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  10812. uint8_t * qs = y[ibl].qs;
  10813. uint8_t * qh = y[ibl].qh;
  10814. uint8_t * signs = y[ibl].signs;
  10815. float max_scale = 0;
  10816. const float * xbl = x + QK_K*ibl;
  10817. float sumx2 = 0;
  10818. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  10819. float sigma2 = 2*sumx2/QK_K;
  10820. for (int ib = 0; ib < QK_K/block_size; ++ib) {
  10821. const float * xb = xbl + block_size*ib;
  10822. if (quant_weights) {
  10823. const float * qw = quant_weights + QK_K*ibl + block_size*ib;
  10824. for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  10825. } else {
  10826. for (int i = 0; i < block_size; ++i) weight[i] = xb[i]*xb[i];
  10827. }
  10828. for (int i = 0; i < block_size; ++i) waux[i] = sqrtf(weight[i]);
  10829. for (int k = 0; k < bs8; ++k) {
  10830. uint8_t s = 0;
  10831. for (int i = 0; i < 8; ++i) {
  10832. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  10833. else {
  10834. xval[8*k + i] = -xb[8*k + i]; s |= (1 << i);
  10835. }
  10836. }
  10837. block_signs[k] = s;
  10838. }
  10839. float max = xval[0];
  10840. for (int i = 1; i < block_size; ++i) max = MAX(max, xval[i]);
  10841. if (!max) {
  10842. scales[ib] = 0;
  10843. continue;
  10844. }
  10845. float best = 0;
  10846. float scale = max/(2*kMaxQ-1);
  10847. for (int k = 0; k < bs4; ++k) is_on_grid[k] = false;
  10848. for (int is = -9; is <= 9; ++is) {
  10849. float id = (2*kMaxQ-1+is*0.2f)/max;
  10850. float this_scale = 1/id;
  10851. for (int k = 0; k < bs4; ++k) {
  10852. for (int i = 0; i < 4; ++i) {
  10853. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  10854. Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l));
  10855. }
  10856. uint16_t u = 0;
  10857. for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i);
  10858. int grid_index = kmap_q3xs[u];
  10859. is_on_grid_aux[k] = true;
  10860. if (grid_index < 0) {
  10861. is_on_grid_aux[k] = false;
  10862. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  10863. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k);
  10864. }
  10865. }
  10866. float sumqx = 0, sumq2 = 0;
  10867. for (int i = 0; i < block_size; ++i) {
  10868. float w = weight[i];
  10869. float q = 2*Laux[i] + 1;
  10870. sumqx += w*xval[i]*q;
  10871. sumq2 += w*q*q;
  10872. }
  10873. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  10874. scale = sumqx/sumq2; best = scale*sumqx;
  10875. for (int i = 0; i < block_size; ++i) L[i] = Laux[i];
  10876. for (int k = 0; k < bs4; ++k) is_on_grid[k] = is_on_grid_aux[k];
  10877. }
  10878. }
  10879. int n_not_ongrid = 0;
  10880. for (int k = 0; k < bs4; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  10881. if (n_not_ongrid > 0 && scale > 0) {
  10882. float id = 1/scale;
  10883. for (int k = 0; k < bs4; ++k) {
  10884. //if (is_on_grid[k]) continue;
  10885. uint16_t u = 0;
  10886. for (int i = 0; i < 4; ++i) {
  10887. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  10888. l = MAX(0, MIN(kMaxQ-1, l));
  10889. u |= (l << 3*i);
  10890. }
  10891. int grid_index = kmap_q3xs[u];
  10892. if (grid_index < 0) {
  10893. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  10894. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k);
  10895. }
  10896. const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index);
  10897. for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2;
  10898. }
  10899. float sumqx = 0, sumq2 = 0;
  10900. for (int i = 0; i < block_size; ++i) {
  10901. float w = weight[i];
  10902. float q = 2*L[i] + 1;
  10903. sumqx += w*xval[i]*q;
  10904. sumq2 += w*q*q;
  10905. }
  10906. if (sumq2 > 0) scale = sumqx/sumq2;
  10907. }
  10908. if (scale < 0) {
  10909. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  10910. // and correspondingly flip quant signs.
  10911. scale = -scale;
  10912. for (int k = 0; k < bs8; ++k) block_signs[k] = ~block_signs[k];
  10913. }
  10914. for (int k = 0; k < bs4; ++k) {
  10915. uint16_t u = 0;
  10916. for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i);
  10917. int grid_index = kmap_q3xs[u];
  10918. if (grid_index < 0) {
  10919. printf("Oops: found point %u not on grid:", u);
  10920. for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]);
  10921. printf("\n");
  10922. GGML_ASSERT(false);
  10923. }
  10924. qs[k] = grid_index & 255;
  10925. qh[(ib*bs4+k)/8] |= ((grid_index >> 8) << ((ib*bs4+k)%8));
  10926. }
  10927. qs += bs4;
  10928. for (int k = 0; k < bs8; ++k) signs[k] = block_signs[k];
  10929. signs += bs8;
  10930. GGML_ASSERT(scale >= 0);
  10931. scales[ib] = scale;
  10932. max_scale = MAX(max_scale, scale);
  10933. }
  10934. if (!max_scale) {
  10935. continue;
  10936. }
  10937. float d = max_scale/31;
  10938. y[ibl].d = GGML_FP32_TO_FP16(d * 1.033f);
  10939. float id = 1/d;
  10940. for (int ib = 0; ib < QK_K/block_size; ib += 2) {
  10941. int l1 = nearest_int(0.5f*(id*scales[ib+0]-1));
  10942. l1 = MAX(0, MIN(15, l1));
  10943. int l2 = nearest_int(0.5f*(id*scales[ib+1]-1));
  10944. l2 = MAX(0, MIN(15, l2));
  10945. y[ibl].scales[ib/2] = l1 | (l2 << 4);
  10946. }
  10947. }
  10948. }
  10949. #define IQ3S_BLOCK_SIZE 32
  10950. size_t quantize_iq3_s(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  10951. GGML_ASSERT(n_per_row%QK_K == 0);
  10952. int64_t nblock = n_per_row/QK_K;
  10953. float scales[QK_K/IQ3S_BLOCK_SIZE];
  10954. float weight[IQ3S_BLOCK_SIZE];
  10955. float xval[IQ3S_BLOCK_SIZE];
  10956. int8_t L[IQ3S_BLOCK_SIZE];
  10957. int8_t Laux[IQ3S_BLOCK_SIZE];
  10958. float waux[IQ3S_BLOCK_SIZE];
  10959. bool is_on_grid[IQ3S_BLOCK_SIZE/4];
  10960. bool is_on_grid_aux[IQ3S_BLOCK_SIZE/4];
  10961. uint8_t block_signs[IQ3S_BLOCK_SIZE/8];
  10962. char * qrow = (char *)dst;
  10963. for (int64_t row = 0; row < nrow; ++row) {
  10964. quantize_row_iq3_s_impl(IQ3S_BLOCK_SIZE, src, qrow, n_per_row, quant_weights,
  10965. scales, weight, xval, L, Laux, waux, is_on_grid, is_on_grid_aux, block_signs);
  10966. src += n_per_row;
  10967. qrow += nblock*sizeof(block_iq3_s);
  10968. }
  10969. return nrow * nblock * sizeof(block_iq3_s);
  10970. }
  10971. void quantize_row_iq3_s(const float * restrict x, void * restrict vy, int64_t k) {
  10972. assert(k % QK_K == 0);
  10973. block_iq3_s * restrict y = vy;
  10974. quantize_row_iq3_s_reference(x, y, k);
  10975. }
  10976. void quantize_row_iq3_s_reference(const float * restrict x, block_iq3_s * restrict y, int64_t k) {
  10977. assert(k % QK_K == 0);
  10978. quantize_iq3_s(x, y, 1, k, NULL);
  10979. }
  10980. // =================================== 1.5 bpw ===================================================
  10981. static int iq1_find_best_neighbour(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
  10982. const float * restrict xval, const float * restrict weight, float * scale, int8_t * restrict L, int ngrid) {
  10983. int num_neighbors = neighbours[0];
  10984. GGML_ASSERT(num_neighbors > 0);
  10985. float best_score = 0;
  10986. int grid_index = -1;
  10987. for (int j = 1; j <= num_neighbors; ++j) {
  10988. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  10989. float sumqx = 0, sumq2 = 0;
  10990. for (int i = 0; i < 8; ++i) {
  10991. float q = (pg[i] - 3)/2;
  10992. float w = weight[i];
  10993. sumqx += w*q*xval[i];
  10994. sumq2 += w*q*q;
  10995. }
  10996. if (sumqx > 0 && sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  10997. *scale = sumqx/sumq2; best_score = *scale * sumqx;
  10998. grid_index = neighbours[j];
  10999. }
  11000. }
  11001. if (grid_index < 0) {
  11002. for (int i = 0; i < ngrid; ++i) {
  11003. const int8_t * grid_i = (const int8_t *)(grid + i);
  11004. float sumqx = 0, sumq2 = 0;
  11005. for (int j = 0; j < 8; ++j) {
  11006. float w = weight[j];
  11007. float q = (grid_i[j] - 3)/2;
  11008. sumqx += w*q*xval[j];
  11009. sumq2 += w*q*q;
  11010. }
  11011. if (sumqx > 0 && sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  11012. *scale = sumqx/sumq2; best_score = *scale*sumqx;
  11013. grid_index = i;
  11014. }
  11015. }
  11016. }
  11017. if (grid_index < 0) {
  11018. printf("Oops, did not find grid point\n");
  11019. printf("Have %d neighbours\n", num_neighbors);
  11020. for (int j = 1; j <= num_neighbors; ++j) {
  11021. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  11022. float sumqx = 0, sumq2 = 0;
  11023. for (int i = 0; i < 8; ++i) {
  11024. float q = (pg[i] - 3)/2;
  11025. float w = weight[i];
  11026. sumqx += w*q*xval[i];
  11027. sumq2 += w*q*q;
  11028. }
  11029. printf(" neighbour %d: sumqx = %g sumq2 = %g\n", j, (double)sumqx, (double)sumq2);
  11030. }
  11031. }
  11032. GGML_ASSERT(grid_index >= 0);
  11033. //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  11034. *scale *= 1.05f; // This is a fudge factor. Don't ask me why it improves the result.
  11035. //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  11036. const int8_t * pg = (const int8_t *)(grid + grid_index);
  11037. for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
  11038. return grid_index;
  11039. }
  11040. static int iq1_find_best_neighbour2(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
  11041. const float * restrict xval, const float * restrict weight, float scale, const float * restrict xg, int8_t * restrict L, int ngrid) {
  11042. int num_neighbors = neighbours[0];
  11043. GGML_ASSERT(num_neighbors > 0);
  11044. float best_score = FLT_MAX;
  11045. int grid_index = -1;
  11046. for (int j = 1; j <= num_neighbors; ++j) {
  11047. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  11048. float d2 = 0;
  11049. for (int i = 0; i < 8; ++i) {
  11050. float q = xg[(pg[i] - 1)/2];
  11051. float w = weight[i];
  11052. float diff = scale*q - xval[i];
  11053. d2 += w*diff*diff;
  11054. }
  11055. if (d2 < best_score) {
  11056. best_score = d2;
  11057. grid_index = neighbours[j];
  11058. }
  11059. }
  11060. if (grid_index < 0) {
  11061. for (int i = 0; i < ngrid; ++i) {
  11062. const int8_t * grid_i = (const int8_t *)(grid + i);
  11063. float d2 = 0;
  11064. for (int j = 0; j < 8; ++j) {
  11065. float w = weight[j];
  11066. float q = xg[(grid_i[j] - 1)/2];
  11067. float diff = scale*q - xval[i];
  11068. d2 += w*diff*diff;
  11069. }
  11070. if (d2 < best_score) {
  11071. best_score = d2;
  11072. grid_index = i;
  11073. }
  11074. }
  11075. }
  11076. if (grid_index < 0) {
  11077. printf("Oops, did not find grid point\n");
  11078. printf("Have %d neighbours\n", num_neighbors);
  11079. for (int j = 1; j <= num_neighbors; ++j) {
  11080. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  11081. float sumqx = 0, sumq2 = 0;
  11082. for (int i = 0; i < 8; ++i) {
  11083. float q = xg[(pg[i] - 1)/2];
  11084. float w = weight[i];
  11085. sumqx += w*q*xval[i];
  11086. sumq2 += w*q*q;
  11087. }
  11088. printf(" neighbour %d: sumqx = %g sumq2 = %g\n", j, (double)sumqx, (double)sumq2);
  11089. }
  11090. }
  11091. GGML_ASSERT(grid_index >= 0);
  11092. const int8_t * pg = (const int8_t *)(grid + grid_index);
  11093. for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
  11094. return grid_index;
  11095. }
  11096. static int iq1_sort_helper(const void * left, const void * right) {
  11097. const float * l = left;
  11098. const float * r = right;
  11099. return *l < *r ? -1 : *l > *r ? 1 : 0;
  11100. }
  11101. #define IQ1S_BLOCK_SIZE 32
  11102. #define IQ1M_BLOCK_SIZE 16
  11103. static void quantize_row_iq1_s_impl(const float * restrict x, void * restrict vy, int64_t n, const float * restrict quant_weights,
  11104. float * scales,
  11105. float * weight,
  11106. float * sumx,
  11107. float * sumw,
  11108. float * pairs,
  11109. int8_t * L,
  11110. uint16_t * index,
  11111. int8_t * shifts) {
  11112. const int gindex = iq2_data_index(GGML_TYPE_IQ1_S);
  11113. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  11114. const int * kmap_q2xs = iq2_data[gindex].map;
  11115. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  11116. GGML_ASSERT(quant_weights && "missing quantization weights");
  11117. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  11118. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  11119. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  11120. GGML_ASSERT(n%QK_K == 0);
  11121. block_iq1_s * y = vy;
  11122. const int64_t nbl = n/QK_K;
  11123. const int block_size = IQ1S_BLOCK_SIZE;
  11124. const float x_p[3] = {-1 + IQ1S_DELTA, IQ1S_DELTA, 1 + IQ1S_DELTA};
  11125. const float x_m[3] = {-1 - IQ1S_DELTA, -IQ1S_DELTA, 1 - IQ1S_DELTA};
  11126. int * idx = (int *)(pairs + 1);
  11127. for (int ibl = 0; ibl < nbl; ++ibl) {
  11128. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  11129. memset(y[ibl].qs, 0, QK_K/8);
  11130. memset(y[ibl].qh, 0, QK_K/16);
  11131. float max_scale = 0;
  11132. const float * xbl = x + QK_K*ibl;
  11133. float sumx2 = 0;
  11134. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  11135. float sigma2 = 2*sumx2/QK_K;
  11136. for (int ib = 0; ib < QK_K/block_size; ++ib) {
  11137. const float * xb = xbl + block_size*ib;
  11138. const float * qw = quant_weights + QK_K*ibl + block_size*ib;
  11139. for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  11140. float max = fabsf(xb[0]);
  11141. for (int i = 1; i < block_size; ++i) max = MAX(max, fabsf(xb[i]));
  11142. if (max < GROUP_MAX_EPS_IQ1_S) {
  11143. scales[ib] = 0;
  11144. memset(L, 1, block_size);
  11145. continue;
  11146. }
  11147. // Here we solve exactly the sum of squared difference (SSD) weighted minimization problem.
  11148. // With just 3 allowed quant values (-1, 0, 1), we can search exhaustively for the two
  11149. // boundaries that split the weights xb[i] into 3 groups. To do so, we sort the weights
  11150. // in ascending order, compute Si = sum[weight[j] xb[j], j = 0...i] and
  11151. // Wi = sum[weight[j], j = 0...i], and use these to quckly get get the optimum scale
  11152. // for each possible and score for each split.
  11153. for (int j = 0; j < block_size; ++j) {
  11154. pairs[2*j] = xb[j];
  11155. idx[2*j] = j;
  11156. }
  11157. qsort(pairs, block_size, 2*sizeof(float), iq1_sort_helper);
  11158. {
  11159. sumx[0] = sumw[0] = 0;
  11160. for (int j = 0; j < block_size; ++j) {
  11161. int i = idx[2*j];
  11162. sumx[j+1] = sumx[j] + weight[i]*xb[i];
  11163. sumw[j+1] = sumw[j] + weight[i];
  11164. }
  11165. }
  11166. float best_score = 0, scale = max;
  11167. int besti1 = -1, besti2 = -1, best_shift = 0;
  11168. for (int i1 = 0; i1 <= block_size; ++i1) {
  11169. for (int i2 = i1; i2 <= block_size; ++i2) {
  11170. float sumqx = (sumx[i1] - sumx[0])*x_p[0] + (sumx[i2] - sumx[i1])*x_p[1] + (sumx[block_size] - sumx[i2])*x_p[2];
  11171. float sumq2 = (sumw[i1] - sumw[0])*x_p[0]*x_p[0] + (sumw[i2] - sumw[i1])*x_p[1]*x_p[1] + (sumw[block_size] - sumw[i2])*x_p[2]*x_p[2];
  11172. if (sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  11173. scale = sumqx/sumq2; best_score = scale*sumqx;
  11174. besti1 = i1; besti2 = i2; best_shift = 1;
  11175. }
  11176. sumqx = (sumx[i1] - sumx[0])*x_m[0] + (sumx[i2] - sumx[i1])*x_m[1] + (sumx[block_size] - sumx[i2])*x_m[2];
  11177. sumq2 = (sumw[i1] - sumw[0])*x_m[0]*x_m[0] + (sumw[i2] - sumw[i1])*x_m[1]*x_m[1] + (sumw[block_size] - sumw[i2])*x_m[2]*x_m[2];
  11178. if (sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  11179. scale = sumqx/sumq2; best_score = scale*sumqx;
  11180. besti1 = i1; besti2 = i2; best_shift = -1;
  11181. }
  11182. }
  11183. }
  11184. GGML_ASSERT(besti1 >= 0 && besti2 >= 0 && best_shift != 0);
  11185. for (int j = 0; j < besti1; ++j) L[idx[2*j]] = 0;
  11186. for (int j = besti1; j < besti2; ++j) L[idx[2*j]] = 1;
  11187. for (int j = besti2; j < block_size; ++j) L[idx[2*j]] = 2;
  11188. if (scale < 0) {
  11189. for (int j = 0; j < block_size; ++j) L[j] = 2 - L[j];
  11190. scale = -scale; best_shift = -best_shift;
  11191. }
  11192. bool all_on_grid = true;
  11193. const float * xx = best_shift == 1 ? x_p : x_m;
  11194. for (int k = 0; k < block_size/8; ++k) {
  11195. uint16_t u = 0;
  11196. for (int j = 0; j < 8; ++j) u |= (L[8*k+j] << 2*j);
  11197. int grid_index = kmap_q2xs[u];
  11198. if (grid_index < 0) {
  11199. all_on_grid = false;
  11200. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  11201. grid_index = iq1_find_best_neighbour2(neighbours, kgrid_q2xs, xb + 8*k, weight + 8*k, scale, xx, L + 8*k, NGRID_IQ1S);
  11202. GGML_ASSERT(grid_index >= 0);
  11203. }
  11204. index[k] = grid_index;
  11205. }
  11206. if (!all_on_grid) {
  11207. float sumqx = 0, sumq2 = 0;
  11208. for (int k = 0; k < block_size/8; ++k) {
  11209. const int8_t * pg = (const int8_t *)(kgrid_q2xs + index[k]);
  11210. for (int j = 0; j < 8; ++j) {
  11211. float w = weight[8*k + j];
  11212. float q = xx[(pg[j] - 1)/2];
  11213. sumqx += w*q*xb[8*k+j];
  11214. sumq2 += w*q*q;
  11215. }
  11216. }
  11217. if (sumqx > 0 && sumq2 > 0) scale = sumqx/sumq2;
  11218. }
  11219. uint16_t h = 0;
  11220. for (int k = 0; k < block_size/8; ++k) {
  11221. y[ibl].qs[(block_size/8)*ib + k] = index[k] & 255;
  11222. h |= (index[k] >> 8) << 3*k;
  11223. }
  11224. y[ibl].qh[ib] = h;
  11225. GGML_ASSERT(scale >= 0);
  11226. scales[ib] = scale;
  11227. shifts[ib] = best_shift;
  11228. max_scale = MAX(max_scale, scale);
  11229. }
  11230. if (!max_scale) {
  11231. continue;
  11232. }
  11233. float d = max_scale/15;
  11234. y[ibl].d = GGML_FP32_TO_FP16(d*1.125f); // 1.125f is another fudge factor. Don't ask me why it is needed.
  11235. float id = 1/d;
  11236. for (int ib = 0; ib < QK_K/block_size; ++ib) {
  11237. int l = nearest_int(0.5f*(id*scales[ib]-1));
  11238. l = MAX(0, MIN(7, l));
  11239. if (shifts[ib] == -1) l |= 8;
  11240. y[ibl].qh[ib] |= (l << 12);
  11241. }
  11242. }
  11243. }
  11244. size_t quantize_iq1_s(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  11245. GGML_ASSERT(n_per_row%QK_K == 0);
  11246. float scales[QK_K/IQ1S_BLOCK_SIZE];
  11247. float weight[IQ1S_BLOCK_SIZE];
  11248. int8_t L[IQ1S_BLOCK_SIZE];
  11249. float sumx[IQ1S_BLOCK_SIZE+1];
  11250. float sumw[IQ1S_BLOCK_SIZE+1];
  11251. float pairs[2*IQ1S_BLOCK_SIZE];
  11252. uint16_t index[IQ1S_BLOCK_SIZE/8];
  11253. int8_t shifts[QK_K/IQ1S_BLOCK_SIZE];
  11254. int64_t nblock = n_per_row/QK_K;
  11255. char * qrow = (char *)dst;
  11256. for (int64_t row = 0; row < nrow; ++row) {
  11257. quantize_row_iq1_s_impl(src, qrow, n_per_row, quant_weights, scales, weight, sumx, sumw, pairs, L, index, shifts);
  11258. src += n_per_row;
  11259. qrow += nblock*sizeof(block_iq1_s);
  11260. }
  11261. return nrow * nblock * sizeof(block_iq1_s);
  11262. }
  11263. static void quantize_row_iq1_m_impl(const float * restrict x, void * restrict vy, int64_t n, const float * restrict quant_weights,
  11264. float * scales,
  11265. float * weight,
  11266. float * pairs,
  11267. int8_t * L,
  11268. uint16_t * index,
  11269. int8_t * shifts) {
  11270. const int gindex = iq2_data_index(GGML_TYPE_IQ1_M);
  11271. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  11272. const int * kmap_q2xs = iq2_data[gindex].map;
  11273. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  11274. //GGML_ASSERT(quant_weights && "missing quantization weights");
  11275. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  11276. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  11277. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  11278. GGML_ASSERT(n%QK_K == 0);
  11279. block_iq1_m * y = vy;
  11280. const int64_t nbl = n/QK_K;
  11281. const int block_size = IQ1M_BLOCK_SIZE;
  11282. const float x_p[3] = {-1 + IQ1M_DELTA, IQ1M_DELTA, 1 + IQ1M_DELTA};
  11283. const float x_m[3] = {-1 - IQ1M_DELTA, -IQ1M_DELTA, 1 - IQ1M_DELTA};
  11284. const uint8_t masks[4] = {0x00, 0x80, 0x08, 0x88};
  11285. int * idx = (int *)(pairs + 1);
  11286. float sumqx[4], sumq2[4];
  11287. iq1m_scale_t s;
  11288. const float * xx;
  11289. for (int ibl = 0; ibl < nbl; ++ibl) {
  11290. #if QK_K == 64
  11291. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  11292. #endif
  11293. memset(y[ibl].qs, 0, QK_K/8);
  11294. memset(y[ibl].qh, 0, QK_K/16);
  11295. memset(y[ibl].scales, 0, QK_K/32);
  11296. float max_scale = 0;
  11297. const float * xbl = x + QK_K*ibl;
  11298. float sumx2 = 0;
  11299. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  11300. float sigma2 = 2*sumx2/QK_K;
  11301. for (int ib = 0; ib < QK_K/block_size; ++ib) {
  11302. const float * xb = xbl + block_size*ib;
  11303. if (quant_weights) {
  11304. const float * qw = quant_weights + QK_K*ibl + block_size*ib;
  11305. for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  11306. } else {
  11307. for (int i = 0; i < block_size; ++i) weight[i] = xb[i]*xb[i];
  11308. }
  11309. float max = fabsf(xb[0]);
  11310. for (int i = 1; i < block_size; ++i) max = MAX(max, fabsf(xb[i]));
  11311. if (max < GROUP_MAX_EPS_IQ1_M) {
  11312. scales[ib] = 0;
  11313. memset(L, 1, block_size);
  11314. continue;
  11315. }
  11316. // Here we solve exactly the sum of squared difference (SSD) weighted minimization problem.
  11317. // With just 3 allowed quant values (-1, 0, 1), we can search exhaustively for the two
  11318. // boundaries that split the weights xb[i] into 3 groups. To do so, we sort the weights
  11319. // in ascending order, compute Si = sum[weight[j] xb[j], j = 0...i] and
  11320. // Wi = sum[weight[j], j = 0...i], and use these to quckly get get the optimum scale
  11321. // for each possible and score for each split.
  11322. for (int j = 0; j < block_size; ++j) {
  11323. pairs[2*j] = xb[j];
  11324. idx[2*j] = j;
  11325. }
  11326. qsort(pairs, block_size, 2*sizeof(float), iq1_sort_helper);
  11327. float best_score = 0, scale = max;
  11328. int besti1 = -1, besti2 = -1, best_k = -1;
  11329. // 0: +, +
  11330. // 1: +, -
  11331. // 2: -, +
  11332. // 3: -, -
  11333. for (int i1 = 0; i1 <= block_size; ++i1) {
  11334. for (int i2 = i1; i2 <= block_size; ++i2) {
  11335. memset(sumqx, 0, 4*sizeof(float));
  11336. memset(sumq2, 0, 4*sizeof(float));
  11337. for (int j = 0; j < i1; ++j) {
  11338. int i = idx[2*j];
  11339. if (i < block_size/2) {
  11340. sumqx[0] += weight[i]*x_p[0]*xb[i];
  11341. sumqx[1] += weight[i]*x_p[0]*xb[i];
  11342. sumqx[2] += weight[i]*x_m[0]*xb[i];
  11343. sumqx[3] += weight[i]*x_m[0]*xb[i];
  11344. sumq2[0] += weight[i]*x_p[0]*x_p[0];
  11345. sumq2[1] += weight[i]*x_p[0]*x_p[0];
  11346. sumq2[2] += weight[i]*x_m[0]*x_m[0];
  11347. sumq2[3] += weight[i]*x_m[0]*x_m[0];
  11348. } else {
  11349. sumqx[0] += weight[i]*x_p[0]*xb[i];
  11350. sumqx[2] += weight[i]*x_p[0]*xb[i];
  11351. sumqx[1] += weight[i]*x_m[0]*xb[i];
  11352. sumqx[3] += weight[i]*x_m[0]*xb[i];
  11353. sumq2[0] += weight[i]*x_p[0]*x_p[0];
  11354. sumq2[2] += weight[i]*x_p[0]*x_p[0];
  11355. sumq2[1] += weight[i]*x_m[0]*x_m[0];
  11356. sumq2[3] += weight[i]*x_m[0]*x_m[0];
  11357. }
  11358. }
  11359. for (int j = i1; j < i2; ++j) {
  11360. int i = idx[2*j];
  11361. if (i < block_size/2) {
  11362. sumqx[0] += weight[i]*x_p[1]*xb[i];
  11363. sumqx[1] += weight[i]*x_p[1]*xb[i];
  11364. sumqx[2] += weight[i]*x_m[1]*xb[i];
  11365. sumqx[3] += weight[i]*x_m[1]*xb[i];
  11366. sumq2[0] += weight[i]*x_p[1]*x_p[1];
  11367. sumq2[1] += weight[i]*x_p[1]*x_p[1];
  11368. sumq2[2] += weight[i]*x_m[1]*x_m[1];
  11369. sumq2[3] += weight[i]*x_m[1]*x_m[1];
  11370. } else {
  11371. sumqx[0] += weight[i]*x_p[1]*xb[i];
  11372. sumqx[2] += weight[i]*x_p[1]*xb[i];
  11373. sumqx[1] += weight[i]*x_m[1]*xb[i];
  11374. sumqx[3] += weight[i]*x_m[1]*xb[i];
  11375. sumq2[0] += weight[i]*x_p[1]*x_p[1];
  11376. sumq2[2] += weight[i]*x_p[1]*x_p[1];
  11377. sumq2[1] += weight[i]*x_m[1]*x_m[1];
  11378. sumq2[3] += weight[i]*x_m[1]*x_m[1];
  11379. }
  11380. }
  11381. for (int j = i2; j < block_size; ++j) {
  11382. int i = idx[2*j];
  11383. if (i < block_size/2) {
  11384. sumqx[0] += weight[i]*x_p[2]*xb[i];
  11385. sumqx[1] += weight[i]*x_p[2]*xb[i];
  11386. sumqx[2] += weight[i]*x_m[2]*xb[i];
  11387. sumqx[3] += weight[i]*x_m[2]*xb[i];
  11388. sumq2[0] += weight[i]*x_p[2]*x_p[2];
  11389. sumq2[1] += weight[i]*x_p[2]*x_p[2];
  11390. sumq2[2] += weight[i]*x_m[2]*x_m[2];
  11391. sumq2[3] += weight[i]*x_m[2]*x_m[2];
  11392. } else {
  11393. sumqx[0] += weight[i]*x_p[2]*xb[i];
  11394. sumqx[2] += weight[i]*x_p[2]*xb[i];
  11395. sumqx[1] += weight[i]*x_m[2]*xb[i];
  11396. sumqx[3] += weight[i]*x_m[2]*xb[i];
  11397. sumq2[0] += weight[i]*x_p[2]*x_p[2];
  11398. sumq2[2] += weight[i]*x_p[2]*x_p[2];
  11399. sumq2[1] += weight[i]*x_m[2]*x_m[2];
  11400. sumq2[3] += weight[i]*x_m[2]*x_m[2];
  11401. }
  11402. }
  11403. for (int k = 0; k < 4; ++k) {
  11404. if (sumq2[k] > 0 && sumqx[k]*sumqx[k] > best_score*sumq2[k]) {
  11405. scale = sumqx[k]/sumq2[k]; best_score = scale*sumqx[k];
  11406. besti1 = i1; besti2 = i2; best_k = k;
  11407. }
  11408. }
  11409. }
  11410. }
  11411. GGML_ASSERT(besti1 >= 0 && besti2 >= 0 && best_k >= 0);
  11412. for (int j = 0; j < besti1; ++j) L[idx[2*j]] = 0;
  11413. for (int j = besti1; j < besti2; ++j) L[idx[2*j]] = 1;
  11414. for (int j = besti2; j < block_size; ++j) L[idx[2*j]] = 2;
  11415. if (scale < 0) {
  11416. for (int j = 0; j < block_size; ++j) L[j] = 2 - L[j];
  11417. scale = -scale;
  11418. best_k = best_k == 0 ? 3 : best_k == 1 ? 2 : best_k == 2 ? 1 : 0;
  11419. }
  11420. bool all_on_grid = true;
  11421. for (int k = 0; k < block_size/8; ++k) {
  11422. if (k == 0) xx = best_k < 2 ? x_p : x_m;
  11423. else xx = best_k%2 == 0 ? x_p : x_m;
  11424. uint16_t u = 0;
  11425. for (int j = 0; j < 8; ++j) u |= (L[8*k+j] << 2*j);
  11426. int grid_index = kmap_q2xs[u];
  11427. if (grid_index < 0) {
  11428. all_on_grid = false;
  11429. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  11430. grid_index = iq1_find_best_neighbour2(neighbours, kgrid_q2xs, xb + 8*k, weight + 8*k, scale, xx, L + 8*k, NGRID_IQ1S);
  11431. GGML_ASSERT(grid_index >= 0);
  11432. }
  11433. index[k] = grid_index;
  11434. }
  11435. if (!all_on_grid) {
  11436. float sumqx_f = 0, sumq2_f = 0;
  11437. for (int k = 0; k < block_size/8; ++k) {
  11438. if (k == 0) xx = best_k < 2 ? x_p : x_m;
  11439. else xx = best_k%2 == 0 ? x_p : x_m;
  11440. const int8_t * pg = (const int8_t *)(kgrid_q2xs + index[k]);
  11441. for (int j = 0; j < 8; ++j) {
  11442. float w = weight[8*k + j];
  11443. float q = xx[(pg[j] - 1)/2];
  11444. sumqx_f += w*q*xb[8*k+j];
  11445. sumq2_f += w*q*q;
  11446. }
  11447. }
  11448. if (sumqx_f > 0 && sumq2_f > 0) scale = sumqx_f/sumq2_f;
  11449. }
  11450. y[ibl].qs[2*ib + 0] = index[0] & 255;
  11451. y[ibl].qs[2*ib + 1] = index[1] & 255;
  11452. y[ibl].qh[ib] = (index[0] >> 8) | ((index[1] >> 8) << 4);
  11453. GGML_ASSERT(scale >= 0);
  11454. scales[ib] = scale;
  11455. shifts[ib] = best_k;
  11456. max_scale = MAX(max_scale, scale);
  11457. }
  11458. if (!max_scale) {
  11459. continue;
  11460. }
  11461. uint16_t * sc = (uint16_t *)y[ibl].scales;
  11462. #if QK_K == 64
  11463. float d = max_scale/31;
  11464. #else
  11465. float d = max_scale/15;
  11466. #endif
  11467. float id = 1/d;
  11468. float sumqx_f = 0, sumq2_f = 0;
  11469. for (int ib = 0; ib < QK_K/block_size; ++ib) {
  11470. int l = nearest_int(0.5f*(id*scales[ib+0]-1));
  11471. #if QK_K == 64
  11472. l = MAX(0, MIN(15, l));
  11473. sc[ib/4] |= (l << 4*(ib%4));
  11474. #else
  11475. l = MAX(0, MIN(7, l));
  11476. sc[ib/4] |= (l << 3*(ib%4));
  11477. #endif
  11478. y[ibl].qh[ib] |= masks[shifts[ib]];
  11479. const float * xb = xbl + block_size*ib;
  11480. if (quant_weights) {
  11481. const float * qw = quant_weights + QK_K*ibl + block_size*ib;
  11482. for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  11483. } else {
  11484. for (int i = 0; i < block_size; ++i) weight[i] = xb[i]*xb[i];
  11485. }
  11486. for (int k = 0; k < block_size/8; ++k) {
  11487. if (k == 0) xx = shifts[ib] < 2 ? x_p : x_m;
  11488. else xx = shifts[ib]%2 == 0 ? x_p : x_m;
  11489. const int8_t * pg = (const int8_t *)(kgrid_q2xs + y[ibl].qs[2*ib+k] + ((y[ibl].qh[ib] << (8 - 4*k)) & 0x700));
  11490. for (int j = 0; j < 8; ++j) {
  11491. float w = weight[8*k + j];
  11492. float q = xx[(pg[j] - 1)/2]*(2*l+1);
  11493. sumqx_f += w*q*xb[8*k+j];
  11494. sumq2_f += w*q*q;
  11495. }
  11496. }
  11497. }
  11498. if (sumq2_f > 0) d = sumqx_f/sumq2_f;
  11499. s.f16 = GGML_FP32_TO_FP16(d*1.1125f); // 1.1125f is another fudge factor. Don't ask me why it is needed.
  11500. #if QK_K == 64
  11501. y[ibl].d = s.f16;
  11502. #else
  11503. sc[0] |= ((s.u16 & 0x000f) << 12);
  11504. sc[1] |= ((s.u16 & 0x00f0) << 8);
  11505. sc[2] |= ((s.u16 & 0x0f00) << 4);
  11506. sc[3] |= ((s.u16 & 0xf000) << 0);
  11507. #endif
  11508. }
  11509. }
  11510. size_t quantize_iq1_m(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  11511. GGML_ASSERT(n_per_row%QK_K == 0);
  11512. float scales[QK_K/IQ1M_BLOCK_SIZE];
  11513. float weight[IQ1M_BLOCK_SIZE];
  11514. int8_t L[IQ1M_BLOCK_SIZE];
  11515. float pairs[2*IQ1M_BLOCK_SIZE];
  11516. uint16_t index[IQ1M_BLOCK_SIZE/8];
  11517. int8_t shifts[QK_K/IQ1M_BLOCK_SIZE];
  11518. int64_t nblock = n_per_row/QK_K;
  11519. char * qrow = (char *)dst;
  11520. for (int64_t row = 0; row < nrow; ++row) {
  11521. quantize_row_iq1_m_impl(src, qrow, n_per_row, quant_weights, scales, weight, pairs, L, index, shifts);
  11522. src += n_per_row;
  11523. qrow += nblock*sizeof(block_iq1_m);
  11524. }
  11525. return nrow * nblock * sizeof(block_iq1_m);
  11526. }
  11527. // ============================ 4-bit non-linear quants
  11528. static inline int best_index_int8(int n, const int8_t * val, float x) {
  11529. if (x <= val[0]) return 0;
  11530. if (x >= val[n-1]) return n-1;
  11531. int ml = 0, mu = n-1;
  11532. while (mu-ml > 1) {
  11533. int mav = (ml+mu)/2;
  11534. if (x < val[mav]) mu = mav; else ml = mav;
  11535. }
  11536. return x - val[mu-1] < val[mu] - x ? mu-1 : mu;
  11537. }
  11538. static void quantize_row_iq4_nl_impl(const int super_block_size, const int block_size, const float * restrict x,
  11539. ggml_fp16_t * dh, uint8_t * q4, uint16_t * scales_h, uint8_t * scales_l,
  11540. float * scales, float * weight, uint8_t * L,
  11541. const int8_t * values,
  11542. const float * quant_weights,
  11543. const int ntry) {
  11544. float sigma2 = 0;
  11545. for (int j = 0; j < super_block_size; ++j) sigma2 += x[j]*x[j];
  11546. sigma2 *= 2.f/super_block_size;
  11547. memset(q4, 0, super_block_size/2);
  11548. dh[0] = GGML_FP32_TO_FP16(0.f);
  11549. float max_scale = 0, amax_scale = 0;
  11550. for (int ib = 0; ib < super_block_size/block_size; ++ib) {
  11551. const float * xb = x + ib*block_size;
  11552. uint8_t * Lb = L + ib*block_size;
  11553. if (quant_weights) {
  11554. const float * qw = quant_weights + ib*block_size;
  11555. for (int j = 0; j < block_size; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  11556. } else {
  11557. for (int j = 0; j < block_size; ++j) weight[j] = xb[j]*xb[j];
  11558. }
  11559. float amax = 0, max = 0;
  11560. for (int j = 0; j < block_size; ++j) {
  11561. float ax = fabsf(xb[j]);
  11562. if (ax > amax) {
  11563. amax = ax; max = xb[j];
  11564. }
  11565. }
  11566. if (amax < GROUP_MAX_EPS) {
  11567. scales[ib] = 0;
  11568. continue;
  11569. }
  11570. float d = ntry > 0 ? -max/values[0] : max/values[0];
  11571. float id = 1/d;
  11572. float sumqx = 0, sumq2 = 0;
  11573. for (int j = 0; j < block_size; ++j) {
  11574. float al = id*xb[j];
  11575. int l = best_index_int8(16, values, al);
  11576. Lb[j] = l;
  11577. float q = values[l];
  11578. float w = weight[j];
  11579. sumqx += w*q*xb[j];
  11580. sumq2 += w*q*q;
  11581. }
  11582. d = sumqx/sumq2;
  11583. float best = d*sumqx;
  11584. for (int itry = -ntry; itry <= ntry; ++itry) {
  11585. id = (itry + values[0])/max;
  11586. sumqx = sumq2 = 0;
  11587. for (int j = 0; j < block_size; ++j) {
  11588. float al = id*xb[j];
  11589. int l = best_index_int8(16, values, al);
  11590. float q = values[l];
  11591. float w = weight[j];
  11592. sumqx += w*q*xb[j];
  11593. sumq2 += w*q*q;
  11594. }
  11595. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  11596. d = sumqx/sumq2; best = d * sumqx;
  11597. }
  11598. }
  11599. scales[ib] = d;
  11600. float abs_d = fabsf(d);
  11601. if (abs_d > amax_scale) {
  11602. amax_scale = abs_d; max_scale = d;
  11603. }
  11604. }
  11605. if (super_block_size/block_size > 1) {
  11606. int nb = super_block_size/block_size;
  11607. memset(scales_h, 0, ((nb+7)/8)*sizeof(uint16_t));
  11608. float d = -max_scale/32;
  11609. dh[0] = GGML_FP32_TO_FP16(d);
  11610. float id = d ? 1/d : 0.f;
  11611. for (int ib = 0; ib < super_block_size/block_size; ++ib) {
  11612. int l = nearest_int(id*scales[ib]);
  11613. l = MAX(-32, MIN(31, l));
  11614. float dl = d * l;
  11615. float idl = dl ? 1/dl : 0.f;
  11616. uint8_t * Lb = L + ib*block_size;
  11617. const float * xb = x + ib*block_size;
  11618. for (int j = 0; j < block_size; ++j) {
  11619. Lb[j] = best_index_int8(16, values, idl*xb[j]);
  11620. }
  11621. l += 32;
  11622. uint8_t l_l = l & 0xf;
  11623. uint8_t l_h = l >> 4;
  11624. if (ib%2 == 0) scales_l[ib/2] = l_l;
  11625. else scales_l[ib/2] |= (l_l << 4);
  11626. scales_h[ib/8] |= (l_h << 2*(ib%8));
  11627. }
  11628. } else {
  11629. dh[0] = GGML_FP32_TO_FP16(scales[0]);
  11630. if (ntry > 0) {
  11631. float id = scales[0] ? 1/scales[0] : 0;
  11632. for (int j = 0; j < super_block_size; ++j) {
  11633. L[j] = best_index_int8(16, values, id*x[j]);
  11634. }
  11635. }
  11636. }
  11637. for (int i = 0; i < super_block_size/32; ++i) {
  11638. for (int j = 0; j < 16; ++j) {
  11639. q4[16*i + j] = L[32*i + j] | (L[32*i + 16 + j] << 4);
  11640. }
  11641. }
  11642. }
  11643. size_t quantize_iq4_nl(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  11644. GGML_ASSERT(n_per_row%QK4_NL == 0);
  11645. int64_t nblock = n_per_row/QK4_NL;
  11646. char * qrow = (char *)dst;
  11647. uint8_t L[QK4_NL];
  11648. float weight[QK4_NL];
  11649. uint16_t unused_h;
  11650. uint8_t * unused_l = NULL;
  11651. float scale;
  11652. for (int64_t row = 0; row < nrow; ++row) {
  11653. block_iq4_nl * iq4 = (block_iq4_nl *)qrow;
  11654. for (int ibl = 0; ibl < nblock; ++ibl) {
  11655. const float * qw = quant_weights ? quant_weights + QK4_NL*ibl : NULL;
  11656. quantize_row_iq4_nl_impl(QK4_NL, 32, src + QK4_NL*ibl, &iq4[ibl].d, iq4[ibl].qs, &unused_h, unused_l,
  11657. &scale, weight, L, kvalues_iq4nl, qw, 7);
  11658. }
  11659. src += n_per_row;
  11660. qrow += nblock*sizeof(block_iq4_nl);
  11661. }
  11662. return nrow * nblock * sizeof(block_iq4_nl);
  11663. }
  11664. void quantize_row_iq4_nl(const float * restrict x, void * restrict vy, int64_t k) {
  11665. GGML_ASSERT(k%QK4_NL == 0);
  11666. int64_t nblock = k/QK4_NL;
  11667. uint8_t L[QK4_NL];
  11668. float weight[QK4_NL];
  11669. uint16_t unused_h;
  11670. uint8_t * unused_l = NULL;
  11671. float scale;
  11672. block_iq4_nl * iq4 = (block_iq4_nl *)vy;
  11673. for (int ibl = 0; ibl < nblock; ++ibl) {
  11674. quantize_row_iq4_nl_impl(QK4_NL, 32, x + QK4_NL*ibl, &iq4[ibl].d, iq4[ibl].qs, &unused_h, unused_l,
  11675. &scale, weight, L, kvalues_iq4nl, NULL, -1);
  11676. }
  11677. }
  11678. void quantize_row_iq4_nl_reference(const float * restrict x, block_iq4_nl * restrict y, int64_t k) {
  11679. assert(k % QK4_NL == 0);
  11680. quantize_row_iq4_nl(x, y, k);
  11681. }
  11682. size_t quantize_iq4_xs(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  11683. #if QK_K == 64
  11684. return quantize_iq4_nl(src, dst, nrow, n_per_row, quant_weights);
  11685. #else
  11686. GGML_ASSERT(n_per_row%QK_K == 0);
  11687. int64_t nblock = n_per_row/QK_K;
  11688. char * qrow = (char *)dst;
  11689. uint8_t L[QK_K];
  11690. float weight[32];
  11691. float scales[QK_K/32];
  11692. for (int64_t row = 0; row < nrow; ++row) {
  11693. block_iq4_xs * iq4 = (block_iq4_xs *)qrow;
  11694. for (int ibl = 0; ibl < nblock; ++ibl) {
  11695. const float * qw = quant_weights ? quant_weights + QK_K*ibl : NULL;
  11696. quantize_row_iq4_nl_impl(QK_K, 32, src + QK_K*ibl, &iq4[ibl].d, iq4[ibl].qs, &iq4[ibl].scales_h, iq4[ibl].scales_l,
  11697. scales, weight, L, kvalues_iq4nl, qw, 7);
  11698. }
  11699. src += n_per_row;
  11700. qrow += nblock*sizeof(block_iq4_xs);
  11701. }
  11702. return nrow * nblock * sizeof(block_iq4_xs);
  11703. #endif
  11704. }
  11705. void quantize_row_iq4_xs(const float * restrict x, void * restrict vy, int64_t k) {
  11706. assert(k % QK_K == 0);
  11707. block_iq4_xs * restrict y = vy;
  11708. quantize_row_iq4_xs_reference(x, y, k);
  11709. }
  11710. void quantize_row_iq4_xs_reference(const float * restrict x, block_iq4_xs * restrict y, int64_t k) {
  11711. assert(k % QK_K == 0);
  11712. quantize_iq4_xs(x, y, 1, k, NULL);
  11713. }
  11714. // =============================== 2.5625 bpw
  11715. static void quantize_row_iq2_s_impl(const float * restrict x, void * restrict vy, int64_t n, const float * restrict quant_weights) {
  11716. const int gindex = iq2_data_index(GGML_TYPE_IQ2_S);
  11717. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  11718. const int * kmap_q2xs = iq2_data[gindex].map;
  11719. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  11720. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  11721. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  11722. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  11723. GGML_ASSERT(n%QK_K == 0);
  11724. const int kMaxQ = 3;
  11725. const int64_t nbl = n/QK_K;
  11726. block_iq2_s * y = vy;
  11727. float scales[QK_K/16];
  11728. float weight[16];
  11729. float xval[16];
  11730. int8_t L[16];
  11731. int8_t Laux[16];
  11732. float waux[16];
  11733. bool is_on_grid[2];
  11734. bool is_on_grid_aux[2];
  11735. uint8_t block_signs[2];
  11736. for (int ibl = 0; ibl < nbl; ++ibl) {
  11737. memset(&y[ibl], 0, sizeof(block_iq2_s));
  11738. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  11739. float max_scale = 0;
  11740. const float * xbl = x + QK_K*ibl;
  11741. float sumx2 = 0;
  11742. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  11743. float sigma2 = 2*sumx2/QK_K;
  11744. for (int ib = 0; ib < QK_K/16; ++ib) {
  11745. const float * xb = xbl + 16*ib;
  11746. if (quant_weights) {
  11747. const float * qw = quant_weights + QK_K*ibl + 16*ib;
  11748. for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  11749. } else {
  11750. for (int i = 0; i < 16; ++i) weight[i] = 0.25f*sigma2 + xb[i]*xb[i];
  11751. }
  11752. for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]);
  11753. for (int k = 0; k < 2; ++k) {
  11754. uint8_t s = 0;
  11755. for (int i = 0; i < 8; ++i) {
  11756. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  11757. else {
  11758. xval[8*k + i] = -xb[8*k + i]; s |= (1 << i);
  11759. }
  11760. }
  11761. block_signs[k] = s;
  11762. }
  11763. float max = xval[0];
  11764. for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]);
  11765. if (max < GROUP_MAX_EPS_IQ2_S) {
  11766. scales[ib] = 0;
  11767. continue;
  11768. }
  11769. float best = 0;
  11770. float scale = max/(2*kMaxQ-1);
  11771. is_on_grid[0] = is_on_grid[1] = true;
  11772. for (int is = -9; is <= 9; ++is) {
  11773. float id = (2*kMaxQ-1+is*0.1f)/max;
  11774. float this_scale = 1/id;
  11775. for (int k = 0; k < 2; ++k) {
  11776. for (int i = 0; i < 8; ++i) {
  11777. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  11778. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  11779. }
  11780. uint16_t u = 0;
  11781. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  11782. int grid_index = kmap_q2xs[u];
  11783. is_on_grid_aux[k] = true;
  11784. if (grid_index < 0) {
  11785. is_on_grid_aux[k] = false;
  11786. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  11787. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  11788. }
  11789. }
  11790. float sumqx = 0, sumq2 = 0;
  11791. for (int i = 0; i < 16; ++i) {
  11792. float w = weight[i];
  11793. float q = 2*Laux[i] + 1;
  11794. sumqx += w*xval[i]*q;
  11795. sumq2 += w*q*q;
  11796. }
  11797. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  11798. scale = sumqx/sumq2; best = scale*sumqx;
  11799. for (int i = 0; i < 16; ++i) L[i] = Laux[i];
  11800. for (int k = 0; k < 2; ++k) is_on_grid[k] = is_on_grid_aux[k];
  11801. }
  11802. }
  11803. int n_not_ongrid = 0;
  11804. for (int k = 0; k < 2; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  11805. if (n_not_ongrid > 0 && scale > 0) {
  11806. float id = 1/scale;
  11807. for (int k = 0; k < 2; ++k) {
  11808. if (is_on_grid[k]) continue;
  11809. uint16_t u = 0;
  11810. for (int i = 0; i < 8; ++i) {
  11811. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  11812. l = MAX(0, MIN(kMaxQ-1, l));
  11813. u |= (l << 2*i);
  11814. L[8*k + i] = l;
  11815. }
  11816. int grid_index = kmap_q2xs[u];
  11817. if (grid_index < 0) {
  11818. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  11819. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  11820. }
  11821. }
  11822. float sumqx = 0, sumq2 = 0;
  11823. for (int i = 0; i < 16; ++i) {
  11824. float w = weight[i];
  11825. float q = 2*L[i] + 1;
  11826. sumqx += w*xval[i]*q;
  11827. sumq2 += w*q*q;
  11828. }
  11829. if (sumq2 > 0) scale = sumqx/sumq2;
  11830. }
  11831. if (scale < 0) {
  11832. scale = -scale;
  11833. for (int k = 0; k < 2; ++k) block_signs[k] = ~block_signs[k];
  11834. }
  11835. for (int k = 0; k < 2; ++k) {
  11836. uint16_t u = 0;
  11837. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  11838. int grid_index = kmap_q2xs[u];
  11839. if (grid_index < 0) {
  11840. printf("Oops: found point %u not on grid:", u);
  11841. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  11842. printf("\n");
  11843. GGML_ASSERT(false);
  11844. }
  11845. const int i8 = 2*ib + k;
  11846. y[ibl].qs[i8] = grid_index & 255;
  11847. y[ibl].qh[i8/4] |= ((grid_index >> 8) << 2*(i8%4));
  11848. y[ibl].qs[QK_K/8 + i8] = block_signs[k];
  11849. }
  11850. GGML_ASSERT(scale >= 0);
  11851. scales[ib] = scale;
  11852. max_scale = MAX(max_scale, scale);
  11853. }
  11854. if (!max_scale) {
  11855. continue;
  11856. }
  11857. float d = max_scale/31;
  11858. y[ibl].d = GGML_FP32_TO_FP16(d * 0.9875f);
  11859. float id = 1/d;
  11860. for (int ib = 0; ib < QK_K/16; ++ib) {
  11861. int l = nearest_int(0.5f*(id*scales[ib]-1));
  11862. l = MAX(0, MIN(15, l));
  11863. if (ib%2 == 0) y[ibl].scales[ib/2] = l;
  11864. else y[ibl].scales[ib/2] |= (l << 4);
  11865. }
  11866. }
  11867. }
  11868. size_t quantize_iq2_s(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  11869. GGML_ASSERT(n_per_row%QK_K == 0);
  11870. int64_t nblock = n_per_row/QK_K;
  11871. char * qrow = (char *)dst;
  11872. for (int64_t row = 0; row < nrow; ++row) {
  11873. quantize_row_iq2_s_impl(src, qrow, n_per_row, quant_weights);
  11874. src += n_per_row;
  11875. qrow += nblock*sizeof(block_iq2_s);
  11876. }
  11877. return nrow * nblock * sizeof(block_iq2_s);
  11878. }
  11879. void quantize_row_iq2_s_reference(const float * restrict x, block_iq2_s * restrict y, int64_t k) {
  11880. assert(k % QK_K == 0);
  11881. quantize_iq2_s(x, y, 1, k, NULL);
  11882. }
  11883. void quantize_row_iq2_s(const float * restrict x, void * restrict vy, int64_t k) {
  11884. assert(k % QK_K == 0);
  11885. block_iq2_s * restrict y = vy;
  11886. quantize_row_iq2_s_reference(x, y, k);
  11887. }
  11888. static bool validate_float(float f, size_t i) {
  11889. if (isinf(f)) {
  11890. fprintf(stderr, "ggml_validate_row_data: found inf value at block %zu\n", i);
  11891. return false;
  11892. }
  11893. if (isnan(f)) {
  11894. fprintf(stderr, "ggml_validate_row_data: found nan value at block %zu\n", i);
  11895. return false;
  11896. }
  11897. return true;
  11898. }
  11899. static bool isinf_fp16(ggml_fp16_t f) {
  11900. return (f & 0x7c00) == 0x7c00 && (f & 0x03ff) == 0;
  11901. }
  11902. static bool isnan_fp16(ggml_fp16_t f) {
  11903. return (f & 0x7c00) == 0x7c00 && (f & 0x03ff) != 0;
  11904. }
  11905. static bool validate_fp16(ggml_fp16_t f, size_t i) {
  11906. if (isinf_fp16(f)) {
  11907. fprintf(stderr, "ggml_validate_row_data: found inf value at block %zu\n", i);
  11908. return false;
  11909. }
  11910. if (isnan_fp16(f)) {
  11911. fprintf(stderr, "ggml_validate_row_data: found nan value at block %zu\n", i);
  11912. return false;
  11913. }
  11914. return true;
  11915. }
  11916. #define VALIDATE_ROW_DATA_D_F16_IMPL(type, data, nb) \
  11917. const type * q = (const type *) (data); \
  11918. for (size_t i = 0; i < (nb); ++i) { \
  11919. if (!validate_fp16(q[i].d, i)) { \
  11920. return false; \
  11921. } \
  11922. }
  11923. #define VALIDATE_ROW_DATA_DM_F16_IMPL(type, data, nb, d, m) \
  11924. const type * q = (const type *) (data); \
  11925. for (size_t i = 0; i < (nb); ++i) { \
  11926. if (!validate_fp16(q[i].d, i) || !validate_fp16(q[i].m, i)) { \
  11927. return false; \
  11928. } \
  11929. }
  11930. bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes) {
  11931. if (type < 0 || type >= GGML_TYPE_COUNT) {
  11932. fprintf(stderr, "%s: invalid type %d\n", __func__, type);
  11933. return false;
  11934. }
  11935. if (nbytes % ggml_type_size(type) != 0) {
  11936. fprintf(stderr, "%s: invalid size %zu for type %d\n", __func__, nbytes, type);
  11937. return false;
  11938. }
  11939. const size_t nb = nbytes/ggml_type_size(type);
  11940. switch (type) {
  11941. case GGML_TYPE_BF16:
  11942. {
  11943. int nans = 0;
  11944. int infs = 0;
  11945. const unsigned short * f = (const unsigned short *) data;
  11946. for (size_t i = 0; i < nb; ++i) {
  11947. nans += (f[i] & 0x7fff) > 0x7f80;
  11948. infs += (f[i] & 0x7fff) == 0x7f80;
  11949. }
  11950. if (nans) {
  11951. fprintf(stderr, "%s: found %d NaNs in row of %zu BF16 values\n", __func__, nans, nb);
  11952. return false;
  11953. }
  11954. if (infs) {
  11955. fprintf(stderr, "%s: found %d infinities in row of %zu BF16 values\n", __func__, infs, nb);
  11956. return false;
  11957. }
  11958. } break;
  11959. case GGML_TYPE_F16:
  11960. {
  11961. const ggml_fp16_t * f = (const ggml_fp16_t *) data;
  11962. size_t i = 0;
  11963. #if defined(__AVX2__)
  11964. for (; i + 15 < nb; i += 16) {
  11965. __m256i v = _mm256_loadu_si256((const __m256i *)(f + i));
  11966. __m256i vexp = _mm256_and_si256(v, _mm256_set1_epi16(0x7c00));
  11967. __m256i cmp = _mm256_cmpeq_epi16(vexp, _mm256_set1_epi16(0x7c00));
  11968. int mask = _mm256_movemask_epi8(cmp);
  11969. if (mask) {
  11970. for (size_t j = 0; j < 16; ++j) {
  11971. if (!validate_fp16(f[i + j], i + j)) {
  11972. return false;
  11973. }
  11974. }
  11975. GGML_UNREACHABLE();
  11976. }
  11977. }
  11978. #elif defined(__ARM_NEON)
  11979. for (; i + 7 < nb; i += 8) {
  11980. uint16x8_t v = vld1q_u16(f + i);
  11981. uint16x8_t vexp = vandq_u16(v, vdupq_n_u16(0x7c00));
  11982. uint16x8_t cmp = vceqq_u16(vexp, vdupq_n_u16(0x7c00));
  11983. uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(vshrn_n_u16(cmp, 4)), 0);
  11984. if (mask) {
  11985. for (size_t j = 0; j < 8; ++j) {
  11986. if (!validate_fp16(f[i + j], i + j)) {
  11987. return false;
  11988. }
  11989. }
  11990. GGML_UNREACHABLE();
  11991. }
  11992. }
  11993. #endif
  11994. for (; i < nb; ++i) {
  11995. if (!validate_fp16(f[i], i)) {
  11996. return false;
  11997. }
  11998. }
  11999. } break;
  12000. case GGML_TYPE_F32:
  12001. {
  12002. const float * f = (const float *) data;
  12003. size_t i = 0;
  12004. #if defined(__AVX2__)
  12005. for (; i + 7 < nb; i += 8) {
  12006. __m256i v = _mm256_loadu_si256((const __m256i *)(f + i));
  12007. __m256i vexp = _mm256_and_si256(v, _mm256_set1_epi32(0x7f800000));
  12008. __m256i cmp = _mm256_cmpeq_epi32(vexp, _mm256_set1_epi32(0x7f800000));
  12009. int mask = _mm256_movemask_epi8(cmp);
  12010. if (mask) {
  12011. for (size_t j = 0; j < 8; ++j) {
  12012. if (!validate_float(f[i + j], i + j)) {
  12013. return false;
  12014. }
  12015. }
  12016. GGML_UNREACHABLE();
  12017. }
  12018. }
  12019. #elif defined(__ARM_NEON)
  12020. for (; i + 3 < nb; i += 4) {
  12021. uint32x4_t v = vld1q_u32((const uint32_t *)f + i);
  12022. uint32x4_t vexp = vandq_u32(v, vdupq_n_u32(0x7f800000));
  12023. uint32x4_t cmp = vceqq_u32(vexp, vdupq_n_u32(0x7f800000));
  12024. uint64_t mask = vget_lane_u64(vreinterpret_u64_u16(vshrn_n_u32(cmp, 8)), 0);
  12025. if (mask) {
  12026. for (size_t j = 0; j < 4; ++j) {
  12027. if (!validate_float(f[i + j], i + j)) {
  12028. return false;
  12029. }
  12030. }
  12031. GGML_UNREACHABLE();
  12032. }
  12033. }
  12034. #endif
  12035. for (; i < nb; ++i) {
  12036. if (!validate_float(f[i], i)) {
  12037. return false;
  12038. }
  12039. }
  12040. } break;
  12041. case GGML_TYPE_F64:
  12042. {
  12043. const double * f = (const double *) data;
  12044. for (size_t i = 0; i < nb; ++i) {
  12045. if (!validate_float(f[i], i)) {
  12046. return false;
  12047. }
  12048. }
  12049. } break;
  12050. case GGML_TYPE_Q4_0:
  12051. {
  12052. VALIDATE_ROW_DATA_D_F16_IMPL(block_q4_0, data, nb);
  12053. } break;
  12054. case GGML_TYPE_Q4_1:
  12055. {
  12056. VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_1, data, nb, d, m);
  12057. } break;
  12058. case GGML_TYPE_Q5_0:
  12059. {
  12060. VALIDATE_ROW_DATA_D_F16_IMPL(block_q5_0, data, nb);
  12061. } break;
  12062. case GGML_TYPE_Q5_1:
  12063. {
  12064. VALIDATE_ROW_DATA_DM_F16_IMPL(block_q5_1, data, nb, d, m);
  12065. } break;
  12066. case GGML_TYPE_Q8_0:
  12067. {
  12068. VALIDATE_ROW_DATA_D_F16_IMPL(block_q8_0, data, nb);
  12069. } break;
  12070. case GGML_TYPE_Q2_K:
  12071. {
  12072. VALIDATE_ROW_DATA_DM_F16_IMPL(block_q2_K, data, nb, d, dmin);
  12073. } break;
  12074. case GGML_TYPE_Q3_K:
  12075. {
  12076. VALIDATE_ROW_DATA_D_F16_IMPL(block_q3_K, data, nb);
  12077. } break;
  12078. case GGML_TYPE_Q4_K:
  12079. {
  12080. #ifdef GGML_QKK_64
  12081. VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_K, data, nb, d[0], d[1]);
  12082. #else
  12083. VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_K, data, nb, d, dmin);
  12084. #endif
  12085. } break;
  12086. case GGML_TYPE_Q5_K:
  12087. {
  12088. #ifdef GGML_QKK_64
  12089. VALIDATE_ROW_DATA_D_F16_IMPL(block_q5_K, data, nb);
  12090. #else
  12091. VALIDATE_ROW_DATA_DM_F16_IMPL(block_q5_K, data, nb, d, dmin);
  12092. #endif
  12093. } break;
  12094. case GGML_TYPE_Q6_K:
  12095. {
  12096. VALIDATE_ROW_DATA_D_F16_IMPL(block_q6_K, data, nb);
  12097. } break;
  12098. case GGML_TYPE_Q8_K:
  12099. {
  12100. const block_q8_K * q = (const block_q8_K *) data;
  12101. for (size_t i = 0; i < nb; ++i) {
  12102. if (!validate_float(q[i].d, i)) {
  12103. return false;
  12104. }
  12105. }
  12106. } break;
  12107. case GGML_TYPE_IQ1_S:
  12108. {
  12109. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq1_s, data, nb);
  12110. } break;
  12111. case GGML_TYPE_IQ1_M:
  12112. {
  12113. const block_iq1_m * q = (const block_iq1_m *) data;
  12114. for (size_t i = 0; i < nb; ++i) {
  12115. #if QK_K == 64
  12116. if (!validate_fp16(q[i].d, i)) {
  12117. return false;
  12118. }
  12119. #else
  12120. iq1m_scale_t scale;
  12121. const uint16_t * sc = (const uint16_t *)q[i].scales;
  12122. scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
  12123. if (!validate_fp16(scale.f16, i)) {
  12124. return false;
  12125. }
  12126. #endif
  12127. }
  12128. } break;
  12129. case GGML_TYPE_IQ2_XXS:
  12130. {
  12131. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_xxs, data, nb);
  12132. } break;
  12133. case GGML_TYPE_IQ2_XS:
  12134. {
  12135. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_xs, data, nb);
  12136. } break;
  12137. case GGML_TYPE_IQ2_S:
  12138. {
  12139. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_s, data, nb);
  12140. } break;
  12141. case GGML_TYPE_IQ3_XXS:
  12142. {
  12143. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq3_xxs, data, nb);
  12144. } break;
  12145. case GGML_TYPE_IQ3_S:
  12146. {
  12147. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq3_s, data, nb);
  12148. } break;
  12149. case GGML_TYPE_IQ4_XS:
  12150. #if QK_K != 64
  12151. {
  12152. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_xs, data, nb);
  12153. } break;
  12154. #endif
  12155. // with QK_K == 64, iq4_xs is iq4_nl
  12156. case GGML_TYPE_IQ4_NL:
  12157. {
  12158. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_nl, data, nb);
  12159. } break;
  12160. case GGML_TYPE_I8:
  12161. case GGML_TYPE_I16:
  12162. case GGML_TYPE_I32:
  12163. case GGML_TYPE_I64:
  12164. // nothing to validate
  12165. break;
  12166. default:
  12167. {
  12168. fprintf(stderr, "%s: invalid type %d\n", __func__, type);
  12169. return false;
  12170. }
  12171. }
  12172. return true;
  12173. }