| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308 |
- #include "k_quants.h"
- #include "ggml.h"
- #include <math.h>
- #include <string.h>
- #include <assert.h>
- #ifdef __ARM_NEON
- // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
- //
- // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
- //
- #include <arm_neon.h>
- #if !defined(__aarch64__)
- inline static int32_t vaddvq_s16(int16x8_t v) {
- return
- (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) +
- (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) +
- (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) +
- (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7);
- }
- inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) {
- int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a));
- int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b));
- return vcombine_s16(a0, b0);
- }
- inline static int32_t vaddvq_s32(int32x4_t v) {
- return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
- }
- #endif
- #else
- #ifdef __wasm_simd128__
- #include <wasm_simd128.h>
- #else
- #ifdef __POWER9_VECTOR__
- #include <altivec.h>
- #undef bool
- #define bool _Bool
- #else
- #if defined(_MSC_VER) || defined(__MINGW32__)
- #include <intrin.h>
- #else
- #if !defined(__riscv)
- #include <immintrin.h>
- #endif
- #endif
- #endif
- #endif
- #endif
- #undef MIN
- #undef MAX
- #define MIN(a, b) ((a) < (b) ? (a) : (b))
- #define MAX(a, b) ((a) > (b) ? (a) : (b))
- #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
- //
- // 2-6 bit quantization in super-blocks
- //
- //
- // ===================== Helper functions
- //
- static inline int nearest_int(float fval) {
- assert(fval <= 4194303.f);
- float val = fval + 12582912.f;
- int i; memcpy(&i, &val, sizeof(int));
- return (i & 0x007fffff) - 0x00400000;
- }
- static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, int rmse_type) {
- float max = 0;
- float amax = 0;
- for (int i = 0; i < n; ++i) {
- float ax = fabsf(x[i]);
- if (ax > amax) { amax = ax; max = x[i]; }
- }
- if (!amax) { // all zero
- for (int i = 0; i < n; ++i) {
- L[i] = 0;
- }
- return 0.f;
- }
- float iscale = -nmax / max;
- if (rmse_type == 0) {
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale * x[i]);
- L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
- }
- return 1/iscale;
- }
- bool return_early = false;
- if (rmse_type < 0) {
- rmse_type = -rmse_type;
- return_early = true;
- }
- int weight_type = rmse_type%2;
- float sumlx = 0;
- float suml2 = 0;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale * x[i]);
- l = MAX(-nmax, MIN(nmax-1, l));
- L[i] = l + nmax;
- float w = weight_type == 1 ? x[i] * x[i] : 1;
- sumlx += w*x[i]*l;
- suml2 += w*l*l;
- }
- float scale = sumlx/suml2;
- if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale;
- float best = scale * sumlx;
- for (int is = -9; is <= 9; ++is) {
- if (is == 0) {
- continue;
- }
- iscale = -(nmax + 0.1f*is) / max;
- sumlx = suml2 = 0;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale * x[i]);
- l = MAX(-nmax, MIN(nmax-1, l));
- float w = weight_type == 1 ? x[i] * x[i] : 1;
- sumlx += w*x[i]*l;
- suml2 += w*l*l;
- }
- if (suml2 > 0 && sumlx*sumlx > best*suml2) {
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale * x[i]);
- L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
- }
- scale = sumlx/suml2; best = scale*sumlx;
- }
- }
- return scale;
- }
- static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, bool do_rmse) {
- float max = 0;
- float amax = 0;
- for (int i = 0; i < n; ++i) {
- float ax = fabsf(x[i]);
- if (ax > amax) { amax = ax; max = x[i]; }
- }
- if (!amax) { // all zero
- for (int i = 0; i < n; ++i) { L[i] = 0; }
- return 0.f;
- }
- float iscale = -nmax / max;
- if (do_rmse) {
- float sumlx = 0;
- float suml2 = 0;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale * x[i]);
- l = MAX(-nmax, MIN(nmax-1, l));
- L[i] = l;
- float w = x[i]*x[i];
- sumlx += w*x[i]*l;
- suml2 += w*l*l;
- }
- for (int itry = 0; itry < 5; ++itry) {
- int n_changed = 0;
- for (int i = 0; i < n; ++i) {
- float w = x[i]*x[i];
- float slx = sumlx - w*x[i]*L[i];
- if (slx > 0) {
- float sl2 = suml2 - w*L[i]*L[i];
- int new_l = nearest_int(x[i] * sl2 / slx);
- new_l = MAX(-nmax, MIN(nmax-1, new_l));
- if (new_l != L[i]) {
- slx += w*x[i]*new_l;
- sl2 += w*new_l*new_l;
- if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) {
- L[i] = new_l; sumlx = slx; suml2 = sl2;
- ++n_changed;
- }
- }
- }
- }
- if (!n_changed) {
- break;
- }
- }
- for (int i = 0; i < n; ++i) {
- L[i] += nmax;
- }
- return sumlx / suml2;
- }
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale * x[i]);
- l = MAX(-nmax, MIN(nmax-1, l));
- L[i] = l + nmax;
- }
- return 1/iscale;
- }
- static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min,
- int ntry, float alpha) {
- float min = x[0];
- float max = x[0];
- for (int i = 1; i < n; ++i) {
- if (x[i] < min) min = x[i];
- if (x[i] > max) max = x[i];
- }
- if (max == min) {
- for (int i = 0; i < n; ++i) L[i] = 0;
- *the_min = 0;
- return 0.f;
- }
- if (min > 0) min = 0;
- float iscale = nmax/(max - min);
- float scale = 1/iscale;
- for (int itry = 0; itry < ntry; ++itry) {
- float sumlx = 0; int suml2 = 0;
- bool did_change = false;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale*(x[i] - min));
- l = MAX(0, MIN(nmax, l));
- if (l != L[i]) {
- L[i] = l;
- did_change = true;
- }
- sumlx += (x[i] - min)*l;
- suml2 += l*l;
- }
- scale = sumlx/suml2;
- float sum = 0;
- for (int i = 0; i < n; ++i) {
- sum += x[i] - scale*L[i];
- }
- min = alpha*min + (1 - alpha)*sum/n;
- if (min > 0) min = 0;
- iscale = 1/scale;
- if (!did_change) break;
- }
- *the_min = -min;
- return scale;
- }
- static float make_qkx2_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
- uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
- float rmin, float rdelta, int nstep, bool use_mad) {
- float min = x[0];
- float max = x[0];
- float sum_w = weights[0];
- float sum_x = sum_w * x[0];
- for (int i = 1; i < n; ++i) {
- if (x[i] < min) min = x[i];
- if (x[i] > max) max = x[i];
- float w = weights[i];
- sum_w += w;
- sum_x += w * x[i];
- }
- if (min > 0) min = 0;
- if (max == min) {
- for (int i = 0; i < n; ++i) L[i] = 0;
- *the_min = -min;
- return 0.f;
- }
- float iscale = nmax/(max - min);
- float scale = 1/iscale;
- float best_mad = 0;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale*(x[i] - min));
- L[i] = MAX(0, MIN(nmax, l));
- float diff = scale * L[i] + min - x[i];
- diff = use_mad ? fabsf(diff) : diff * diff;
- float w = weights[i];
- best_mad += w * diff;
- }
- if (nstep < 1) {
- *the_min = -min;
- return scale;
- }
- for (int is = 0; is <= nstep; ++is) {
- iscale = (rmin + rdelta*is + nmax)/(max - min);
- float sum_l = 0, sum_l2 = 0, sum_xl = 0;
- for (int i = 0; i < n; ++i) {
- int l = nearest_int(iscale*(x[i] - min));
- l = MAX(0, MIN(nmax, l));
- Laux[i] = l;
- float w = weights[i];
- sum_l += w*l;
- sum_l2 += w*l*l;
- sum_xl += w*l*x[i];
- }
- float D = sum_w * sum_l2 - sum_l * sum_l;
- if (D > 0) {
- float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
- float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
- if (this_min > 0) {
- this_min = 0;
- this_scale = sum_xl / sum_l2;
- }
- float mad = 0;
- for (int i = 0; i < n; ++i) {
- float diff = this_scale * Laux[i] + this_min - x[i];
- diff = use_mad ? fabsf(diff) : diff * diff;
- float w = weights[i];
- mad += w * diff;
- }
- if (mad < best_mad) {
- for (int i = 0; i < n; ++i) {
- L[i] = Laux[i];
- }
- best_mad = mad;
- scale = this_scale;
- min = this_min;
- }
- }
- }
- *the_min = -min;
- return scale;
- }
- #if QK_K == 256
- static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) {
- if (j < 4) {
- *d = q[j] & 63; *m = q[j + 4] & 63;
- } else {
- *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
- *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
- }
- }
- #endif
- //========================- 2-bit (de)-quantization
- void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- uint8_t L[QK_K];
- uint8_t Laux[16];
- float weights[16];
- float mins[QK_K/16];
- float scales[QK_K/16];
- const float q4scale = 15.f;
- for (int i = 0; i < nb; i++) {
- float max_scale = 0; // as we are deducting the min, scales are always positive
- float max_min = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]);
- scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true);
- float scale = scales[j];
- if (scale > max_scale) {
- max_scale = scale;
- }
- float min = mins[j];
- if (min > max_min) {
- max_min = min;
- }
- }
- if (max_scale > 0) {
- float iscale = q4scale/max_scale;
- for (int j = 0; j < QK_K/16; ++j) {
- int l = nearest_int(iscale*scales[j]);
- y[i].scales[j] = l;
- }
- y[i].d = ggml_fp32_to_fp16(max_scale/q4scale);
- } else {
- for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0;
- y[i].d = ggml_fp32_to_fp16(0.f);
- }
- if (max_min > 0) {
- float iscale = q4scale/max_min;
- for (int j = 0; j < QK_K/16; ++j) {
- int l = nearest_int(iscale*mins[j]);
- y[i].scales[j] |= (l << 4);
- }
- y[i].dmin = ggml_fp32_to_fp16(max_min/q4scale);
- } else {
- y[i].dmin = ggml_fp32_to_fp16(0.f);
- }
- for (int j = 0; j < QK_K/16; ++j) {
- const float d = ggml_fp16_to_fp32(y[i].d) * (y[i].scales[j] & 0xF);
- if (!d) continue;
- const float dm = ggml_fp16_to_fp32(y[i].dmin) * (y[i].scales[j] >> 4);
- for (int ii = 0; ii < 16; ++ii) {
- int l = nearest_int((x[16*j + ii] + dm)/d);
- l = MAX(0, MIN(3, l));
- L[16*j + ii] = l;
- }
- }
- #if QK_K == 256
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
- }
- }
- #else
- for (int l = 0; l < 16; ++l) {
- y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
- }
- #endif
- x += QK_K;
- }
- }
- void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- for (int i = 0; i < nb; i++) {
- const float d = ggml_fp16_to_fp32(x[i].d);
- const float min = ggml_fp16_to_fp32(x[i].dmin);
- const uint8_t * q = x[i].qs;
- #if QK_K == 256
- int is = 0;
- float dl, ml;
- for (int n = 0; n < QK_K; n += 128) {
- int shift = 0;
- for (int j = 0; j < 4; ++j) {
- uint8_t sc = x[i].scales[is++];
- dl = d * (sc & 0xF); ml = min * (sc >> 4);
- for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml;
- sc = x[i].scales[is++];
- dl = d * (sc & 0xF); ml = min * (sc >> 4);
- for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml;
- shift += 2;
- }
- q += 32;
- }
- #else
- float dl1 = d * (x[i].scales[0] & 0xF), ml1 = min * (x[i].scales[0] >> 4);
- float dl2 = d * (x[i].scales[1] & 0xF), ml2 = min * (x[i].scales[1] >> 4);
- float dl3 = d * (x[i].scales[2] & 0xF), ml3 = min * (x[i].scales[2] >> 4);
- float dl4 = d * (x[i].scales[3] & 0xF), ml4 = min * (x[i].scales[3] >> 4);
- for (int l = 0; l < 16; ++l) {
- y[l+ 0] = dl1 * ((int8_t)((q[l] >> 0) & 3)) - ml1;
- y[l+16] = dl2 * ((int8_t)((q[l] >> 2) & 3)) - ml2;
- y[l+32] = dl3 * ((int8_t)((q[l] >> 4) & 3)) - ml3;
- y[l+48] = dl4 * ((int8_t)((q[l] >> 6) & 3)) - ml4;
- }
- y += QK_K;
- #endif
- }
- }
- void quantize_row_q2_K(const float * restrict x, void * restrict vy, int k) {
- quantize_row_q2_K_reference(x, vy, k);
- }
- size_t ggml_quantize_q2_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
- const int nb = k / QK_K;
- // TODO - collect histograms - although, at a second thought, I don't really care about them
- (void)hist;
- for (int j = 0; j < nb; j += k) {
- block_q2_K * restrict y = (block_q2_K *)dst + j/QK_K;
- quantize_row_q2_K_reference(src + j, y, k);
- }
- return (n/QK_K*sizeof(block_q2_K));
- }
- //========================= 3-bit (de)-quantization
- void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- int8_t L[QK_K];
- float scales[QK_K / 16];
- for (int i = 0; i < nb; i++) {
- float max_scale = 0;
- float amax = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true);
- float scale = fabsf(scales[j]);
- if (scale > amax) {
- amax = scale; max_scale = scales[j];
- }
- }
- #if QK_K == 256
- memset(y[i].scales, 0, 12);
- if (max_scale) {
- float iscale = -32.f/max_scale;
- for (int j = 0; j < QK_K/16; ++j) {
- int8_t l = nearest_int(iscale*scales[j]);
- l = MAX(-32, MIN(31, l)) + 32;
- if (j < 8) {
- y[i].scales[j] = l & 0xF;
- } else {
- y[i].scales[j-8] |= ((l & 0xF) << 4);
- }
- l >>= 4;
- y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
- }
- y[i].d = ggml_fp32_to_fp16(1/iscale);
- } else {
- y[i].d = ggml_fp32_to_fp16(0.f);
- }
- int8_t sc;
- for (int j = 0; j < QK_K/16; ++j) {
- sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
- sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
- float d = ggml_fp16_to_fp32(y[i].d) * sc;
- if (!d) {
- continue;
- }
- for (int ii = 0; ii < 16; ++ii) {
- int l = nearest_int(x[16*j + ii]/d);
- l = MAX(-4, MIN(3, l));
- L[16*j + ii] = l + 4;
- }
- }
- #else
- if (max_scale) {
- float iscale = -8.f/max_scale;
- for (int j = 0; j < QK_K/16; j+=2) {
- int l1 = nearest_int(iscale*scales[j]);
- l1 = 8 + MAX(-8, MIN(7, l1));
- int l2 = nearest_int(iscale*scales[j+1]);
- l2 = 8 + MAX(-8, MIN(7, l2));
- y[i].scales[j/2] = l1 | (l2 << 4);
- }
- y[i].d = ggml_fp32_to_fp16(1/iscale);
- } else {
- for (int j = 0; j < QK_K/16; j+=2) {
- y[i].scales[j/2] = 0;
- }
- y[i].d = ggml_fp32_to_fp16(0.f);
- }
- for (int j = 0; j < QK_K/16; ++j) {
- int s = j%2 == 0 ? y[i].scales[j/2] & 0xF : y[i].scales[j/2] >> 4;
- float d = ggml_fp16_to_fp32(y[i].d) * (s - 8);
- if (!d) {
- continue;
- }
- for (int ii = 0; ii < 16; ++ii) {
- int l = nearest_int(x[16*j + ii]/d);
- l = MAX(-4, MIN(3, l));
- L[16*j + ii] = l + 4;
- }
- }
- #endif
- memset(y[i].hmask, 0, QK_K/8);
- // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
- int m = 0;
- uint8_t hm = 1;
- for (int j = 0; j < QK_K; ++j) {
- if (L[j] > 3) {
- y[i].hmask[m] |= hm;
- L[j] -= 4;
- }
- if (++m == QK_K/8) {
- m = 0; hm <<= 1;
- }
- }
- #if QK_K == 256
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
- }
- }
- #else
- for (int l = 0; l < 16; ++l) {
- y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
- }
- #endif
- x += QK_K;
- }
- }
- #if QK_K == 256
- void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- const uint32_t kmask1 = 0x03030303;
- const uint32_t kmask2 = 0x0f0f0f0f;
- uint32_t aux[4];
- const int8_t * scales = (const int8_t*)aux;
- for (int i = 0; i < nb; i++) {
- const float d_all = ggml_fp16_to_fp32(x[i].d);
- const uint8_t * restrict q = x[i].qs;
- const uint8_t * restrict hm = x[i].hmask;
- uint8_t m = 1;
- memcpy(aux, x[i].scales, 12);
- uint32_t tmp = aux[2];
- aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
- aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
- aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
- aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
- int is = 0;
- float dl;
- for (int n = 0; n < QK_K; n += 128) {
- int shift = 0;
- for (int j = 0; j < 4; ++j) {
- dl = d_all * (scales[is++] - 32);
- for (int l = 0; l < 16; ++l) {
- *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4));
- }
- dl = d_all * (scales[is++] - 32);
- for (int l = 0; l < 16; ++l) {
- *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4));
- }
- shift += 2;
- m <<= 1;
- }
- q += 32;
- }
- }
- }
- #else
- void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- assert(QK_K == 64);
- const int nb = k / QK_K;
- for (int i = 0; i < nb; i++) {
- const float d_all = ggml_fp16_to_fp32(x[i].d);
- const uint8_t * restrict q = x[i].qs;
- const uint8_t * restrict hm = x[i].hmask;
- const float d1 = d_all * ((x[i].scales[0] & 0xF) - 8);
- const float d2 = d_all * ((x[i].scales[0] >> 4) - 8);
- const float d3 = d_all * ((x[i].scales[1] & 0xF) - 8);
- const float d4 = d_all * ((x[i].scales[1] >> 4) - 8);
- for (int l=0; l<8; ++l) {
- uint8_t h = hm[l];
- y[l+ 0] = d1 * ((int8_t)((q[l+0] >> 0) & 3) - ((h & 0x01) ? 0 : 4));
- y[l+ 8] = d1 * ((int8_t)((q[l+8] >> 0) & 3) - ((h & 0x02) ? 0 : 4));
- y[l+16] = d2 * ((int8_t)((q[l+0] >> 2) & 3) - ((h & 0x04) ? 0 : 4));
- y[l+24] = d2 * ((int8_t)((q[l+8] >> 2) & 3) - ((h & 0x08) ? 0 : 4));
- y[l+32] = d3 * ((int8_t)((q[l+0] >> 4) & 3) - ((h & 0x10) ? 0 : 4));
- y[l+40] = d3 * ((int8_t)((q[l+8] >> 4) & 3) - ((h & 0x20) ? 0 : 4));
- y[l+48] = d4 * ((int8_t)((q[l+0] >> 6) & 3) - ((h & 0x40) ? 0 : 4));
- y[l+56] = d4 * ((int8_t)((q[l+8] >> 6) & 3) - ((h & 0x80) ? 0 : 4));
- }
- y += QK_K;
- }
- }
- #endif
- void quantize_row_q3_K(const float * restrict x, void * restrict vy, int k) {
- quantize_row_q3_K_reference(x, vy, k);
- }
- size_t ggml_quantize_q3_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
- const int nb = k / QK_K;
- // TODO - collect histograms - although, at a second thought, I don't really care about them
- (void)hist;
- for (int j = 0; j < nb; j += k) {
- block_q3_K * restrict y = (block_q3_K *)dst + j/QK_K;
- quantize_row_q3_K_reference(src + j, y, k);
- }
- return (n/QK_K*sizeof(block_q3_K));
- }
- // ====================== 4-bit (de)-quantization
- void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- uint8_t L[QK_K];
- uint8_t Laux[32];
- float weights[32];
- float mins[QK_K/32];
- float scales[QK_K/32];
- for (int i = 0; i < nb; i++) {
- float max_scale = 0; // as we are deducting the min, scales are always positive
- float max_min = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- //scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
- float sum_x2 = 0;
- for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
- float av_x = sqrtf(sum_x2/32);
- for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
- scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false);
- float scale = scales[j];
- if (scale > max_scale) {
- max_scale = scale;
- }
- float min = mins[j];
- if (min > max_min) {
- max_min = min;
- }
- }
- #if QK_K == 256
- float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
- float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
- for (int j = 0; j < QK_K/32; ++j) {
- uint8_t ls = nearest_int(inv_scale*scales[j]);
- uint8_t lm = nearest_int(inv_min*mins[j]);
- ls = MIN(63, ls);
- lm = MIN(63, lm);
- if (j < 4) {
- y[i].scales[j] = ls;
- y[i].scales[j+4] = lm;
- } else {
- y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
- y[i].scales[j-4] |= ((ls >> 4) << 6);
- y[i].scales[j-0] |= ((lm >> 4) << 6);
- }
- }
- y[i].d = ggml_fp32_to_fp16(max_scale/63.f);
- y[i].dmin = ggml_fp32_to_fp16(max_min/63.f);
- uint8_t sc, m;
- for (int j = 0; j < QK_K/32; ++j) {
- get_scale_min_k4(j, y[i].scales, &sc, &m);
- const float d = ggml_fp16_to_fp32(y[i].d) * sc;
- if (!d) continue;
- const float dm = ggml_fp16_to_fp32(y[i].dmin) * m;
- for (int ii = 0; ii < 32; ++ii) {
- int l = nearest_int((x[32*j + ii] + dm)/d);
- l = MAX(0, MIN(15, l));
- L[32*j + ii] = l;
- }
- }
- #else
- const float s_factor = 15.f;
- float inv_scale = max_scale > 0 ? s_factor/max_scale : 0.f;
- float inv_min = max_min > 0 ? s_factor/max_min : 0.f;
- int d1 = nearest_int(inv_scale*scales[0]);
- int m1 = nearest_int(inv_min*mins[0]);
- int d2 = nearest_int(inv_scale*scales[1]);
- int m2 = nearest_int(inv_min*mins[1]);
- y[i].scales[0] = d1 | (m1 << 4);
- y[i].scales[1] = d2 | (m2 << 4);
- y[i].d[0] = ggml_fp32_to_fp16(max_scale/s_factor);
- y[i].d[1] = ggml_fp32_to_fp16(max_min/s_factor);
- float sumlx = 0;
- int suml2 = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- const uint8_t sd = y[i].scales[j] & 0xF;
- const uint8_t sm = y[i].scales[j] >> 4;
- const float d = ggml_fp16_to_fp32(y[i].d[0]) * sd;
- if (!d) continue;
- const float m = ggml_fp16_to_fp32(y[i].d[1]) * sm;
- for (int ii = 0; ii < 32; ++ii) {
- int l = nearest_int((x[32*j + ii] + m)/d);
- l = MAX(0, MIN(15, l));
- L[32*j + ii] = l;
- sumlx += (x[32*j + ii] + m)*l*sd;
- suml2 += l*l*sd*sd;
- }
- }
- if (suml2) {
- y[i].d[0] = ggml_fp32_to_fp16(sumlx/suml2);
- }
- #endif
- uint8_t * q = y[i].qs;
- for (int j = 0; j < QK_K; j += 64) {
- for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
- q += 32;
- }
- x += QK_K;
- }
- }
- void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- for (int i = 0; i < nb; i++) {
- const uint8_t * q = x[i].qs;
- #if QK_K == 256
- const float d = ggml_fp16_to_fp32(x[i].d);
- const float min = ggml_fp16_to_fp32(x[i].dmin);
- int is = 0;
- uint8_t sc, m;
- for (int j = 0; j < QK_K; j += 64) {
- get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
- const float d1 = d * sc; const float m1 = min * m;
- get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
- const float d2 = d * sc; const float m2 = min * m;
- for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1;
- for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2;
- q += 32; is += 2;
- }
- #else
- const float dall = ggml_fp16_to_fp32(x[i].d[0]);
- const float mall = ggml_fp16_to_fp32(x[i].d[1]);
- const float d1 = dall * (x[i].scales[0] & 0xF), m1 = mall * (x[i].scales[0] >> 4);
- const float d2 = dall * (x[i].scales[1] & 0xF), m2 = mall * (x[i].scales[1] >> 4);
- for (int l = 0; l < 32; ++l) {
- y[l+ 0] = d1 * (q[l] & 0xF) - m1;
- y[l+32] = d2 * (q[l] >> 4) - m2;
- }
- y += QK_K;
- #endif
- }
- }
- void quantize_row_q4_K(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK_K == 0);
- block_q4_K * restrict y = vy;
- quantize_row_q4_K_reference(x, y, k);
- }
- size_t ggml_quantize_q4_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- (void)hist; // TODO: collect histograms
- for (int j = 0; j < nb; j += k) {
- block_q4_K * restrict y = (block_q4_K *)dst + j/QK_K;
- quantize_row_q4_K_reference(src + j, y, k);
- }
- return (n/QK_K*sizeof(block_q4_K));
- }
- // ====================== 5-bit (de)-quantization
- void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- #if QK_K == 256
- uint8_t L[QK_K];
- float mins[QK_K/32];
- float scales[QK_K/32];
- float weights[32];
- uint8_t Laux[32];
- #else
- int8_t L[QK_K];
- float scales[QK_K/16];
- #endif
- for (int i = 0; i < nb; i++) {
- #if QK_K == 256
- float max_scale = 0; // as we are deducting the min, scales are always positive
- float max_min = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- //scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
- float sum_x2 = 0;
- for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
- float av_x = sqrtf(sum_x2/32);
- for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
- scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false);
- float scale = scales[j];
- if (scale > max_scale) {
- max_scale = scale;
- }
- float min = mins[j];
- if (min > max_min) {
- max_min = min;
- }
- }
- float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
- float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
- for (int j = 0; j < QK_K/32; ++j) {
- uint8_t ls = nearest_int(inv_scale*scales[j]);
- uint8_t lm = nearest_int(inv_min*mins[j]);
- ls = MIN(63, ls);
- lm = MIN(63, lm);
- if (j < 4) {
- y[i].scales[j] = ls;
- y[i].scales[j+4] = lm;
- } else {
- y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
- y[i].scales[j-4] |= ((ls >> 4) << 6);
- y[i].scales[j-0] |= ((lm >> 4) << 6);
- }
- }
- y[i].d = ggml_fp32_to_fp16(max_scale/63.f);
- y[i].dmin = ggml_fp32_to_fp16(max_min/63.f);
- uint8_t sc, m;
- for (int j = 0; j < QK_K/32; ++j) {
- get_scale_min_k4(j, y[i].scales, &sc, &m);
- const float d = ggml_fp16_to_fp32(y[i].d) * sc;
- if (!d) continue;
- const float dm = ggml_fp16_to_fp32(y[i].dmin) * m;
- for (int ii = 0; ii < 32; ++ii) {
- int l = nearest_int((x[32*j + ii] + dm)/d);
- l = MAX(0, MIN(31, l));
- L[32*j + ii] = l;
- }
- }
- uint8_t * restrict qh = y[i].qh;
- uint8_t * restrict ql = y[i].qs;
- memset(qh, 0, QK_K/8);
- uint8_t m1 = 1, m2 = 2;
- for (int n = 0; n < QK_K; n += 64) {
- for (int j = 0; j < 32; ++j) {
- int l1 = L[n + j];
- if (l1 > 15) {
- l1 -= 16; qh[j] |= m1;
- }
- int l2 = L[n + j + 32];
- if (l2 > 15) {
- l2 -= 16; qh[j] |= m2;
- }
- ql[j] = l1 | (l2 << 4);
- }
- m1 <<= 2; m2 <<= 2;
- ql += 32;
- }
- #else
- float max_scale = 0, amax = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- scales[j] = make_qx_quants(16, 16, x + 16*j, L + 16*j, 1);
- float abs_scale = fabsf(scales[j]);
- if (abs_scale > amax) {
- amax = abs_scale;
- max_scale = scales[j];
- }
- }
- float iscale = -128.f/max_scale;
- for (int j = 0; j < QK_K/16; ++j) {
- int l = nearest_int(iscale*scales[j]);
- y[i].scales[j] = MAX(-128, MIN(127, l));
- }
- y[i].d = ggml_fp32_to_fp16(1/iscale);
- for (int j = 0; j < QK_K/16; ++j) {
- const float d = ggml_fp16_to_fp32(y[i].d) * y[i].scales[j];
- if (!d) continue;
- for (int ii = 0; ii < 16; ++ii) {
- int l = nearest_int(x[16*j + ii]/d);
- l = MAX(-16, MIN(15, l));
- L[16*j + ii] = l + 16;
- }
- }
- uint8_t * restrict qh = y[i].qh;
- uint8_t * restrict ql = y[i].qs;
- memset(qh, 0, QK_K/8);
- for (int j = 0; j < 32; ++j) {
- int jm = j%8;
- int is = j/8;
- int l1 = L[j];
- if (l1 > 15) {
- l1 -= 16; qh[jm] |= (1 << is);
- }
- int l2 = L[j + 32];
- if (l2 > 15) {
- l2 -= 16; qh[jm] |= (1 << (4 + is));
- }
- ql[j] = l1 | (l2 << 4);
- }
- #endif
- x += QK_K;
- }
- }
- void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- for (int i = 0; i < nb; i++) {
- const uint8_t * ql = x[i].qs;
- const uint8_t * qh = x[i].qh;
- #if QK_K == 256
- const float d = ggml_fp16_to_fp32(x[i].d);
- const float min = ggml_fp16_to_fp32(x[i].dmin);
- int is = 0;
- uint8_t sc, m;
- uint8_t u1 = 1, u2 = 2;
- for (int j = 0; j < QK_K; j += 64) {
- get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
- const float d1 = d * sc; const float m1 = min * m;
- get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
- const float d2 = d * sc; const float m2 = min * m;
- for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1;
- for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2;
- ql += 32; is += 2;
- u1 <<= 2; u2 <<= 2;
- }
- #else
- float d = ggml_fp16_to_fp32(x[i].d);
- const int8_t * restrict s = x[i].scales;
- for (int l = 0; l < 8; ++l) {
- y[l+ 0] = d * s[0] * ((ql[l+ 0] & 0xF) - (qh[l] & 0x01 ? 0 : 16));
- y[l+ 8] = d * s[0] * ((ql[l+ 8] & 0xF) - (qh[l] & 0x02 ? 0 : 16));
- y[l+16] = d * s[1] * ((ql[l+16] & 0xF) - (qh[l] & 0x04 ? 0 : 16));
- y[l+24] = d * s[1] * ((ql[l+24] & 0xF) - (qh[l] & 0x08 ? 0 : 16));
- y[l+32] = d * s[2] * ((ql[l+ 0] >> 4) - (qh[l] & 0x10 ? 0 : 16));
- y[l+40] = d * s[2] * ((ql[l+ 8] >> 4) - (qh[l] & 0x20 ? 0 : 16));
- y[l+48] = d * s[3] * ((ql[l+16] >> 4) - (qh[l] & 0x40 ? 0 : 16));
- y[l+56] = d * s[3] * ((ql[l+24] >> 4) - (qh[l] & 0x80 ? 0 : 16));
- }
- y += QK_K;
- #endif
- }
- }
- void quantize_row_q5_K(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK_K == 0);
- block_q5_K * restrict y = vy;
- quantize_row_q5_K_reference(x, y, k);
- }
- size_t ggml_quantize_q5_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- (void)hist;
- for (int j = 0; j < nb; j += k) {
- block_q5_K * restrict y = (block_q5_K *)dst + j/QK_K;
- quantize_row_q5_K_reference(src + j, y, k);
- }
- return (n/QK_K*sizeof(block_q5_K));
- }
- // ====================== 6-bit (de)-quantization
- void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- int8_t L[QK_K];
- float scales[QK_K/16];
- for (int i = 0; i < nb; i++) {
- float max_scale = 0;
- float max_abs_scale = 0;
- for (int ib = 0; ib < QK_K/16; ++ib) {
- const float scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1);
- scales[ib] = scale;
- const float abs_scale = fabsf(scale);
- if (abs_scale > max_abs_scale) {
- max_abs_scale = abs_scale;
- max_scale = scale;
- }
- }
- float iscale = -128.f/max_scale;
- y[i].d = ggml_fp32_to_fp16(1/iscale);
- for (int ib = 0; ib < QK_K/16; ++ib) {
- y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
- }
- for (int j = 0; j < QK_K/16; ++j) {
- float d = ggml_fp16_to_fp32(y[i].d) * y[i].scales[j];
- if (!d) {
- continue;
- }
- for (int ii = 0; ii < 16; ++ii) {
- int l = nearest_int(x[16*j + ii]/d);
- l = MAX(-32, MIN(31, l));
- L[16*j + ii] = l + 32;
- }
- }
- uint8_t * restrict ql = y[i].ql;
- uint8_t * restrict qh = y[i].qh;
- #if QK_K == 256
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- const uint8_t q1 = L[j + l + 0] & 0xF;
- const uint8_t q2 = L[j + l + 32] & 0xF;
- const uint8_t q3 = L[j + l + 64] & 0xF;
- const uint8_t q4 = L[j + l + 96] & 0xF;
- ql[l+ 0] = q1 | (q3 << 4);
- ql[l+32] = q2 | (q4 << 4);
- qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
- }
- ql += 64;
- qh += 32;
- }
- #else
- for (int l = 0; l < 32; ++l) {
- const uint8_t q1 = L[l + 0] & 0xF;
- const uint8_t q2 = L[l + 32] & 0xF;
- ql[l] = q1 | (q2 << 4);
- }
- for (int l = 0; l < 16; ++l) {
- qh[l] = (L[l] >> 4) | ((L[l + 16] >> 4) << 2) | ((L[l + 32] >> 4) << 4) | ((L[l + 48] >> 4) << 6);
- }
- #endif
- x += QK_K;
- }
- }
- void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- for (int i = 0; i < nb; i++) {
- const float d = ggml_fp16_to_fp32(x[i].d);
- const uint8_t * restrict ql = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict sc = x[i].scales;
- #if QK_K == 256
- for (int n = 0; n < QK_K; n += 128) {
- for (int l = 0; l < 32; ++l) {
- int is = l/16;
- const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- y[l + 0] = d * sc[is + 0] * q1;
- y[l + 32] = d * sc[is + 2] * q2;
- y[l + 64] = d * sc[is + 4] * q3;
- y[l + 96] = d * sc[is + 6] * q4;
- }
- y += 128;
- ql += 64;
- qh += 32;
- sc += 8;
- }
- #else
- for (int l = 0; l < 16; ++l) {
- const int8_t q1 = (int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- const int8_t q2 = (int8_t)((ql[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- const int8_t q3 = (int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- const int8_t q4 = (int8_t)((ql[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- y[l+ 0] = d * sc[0] * q1;
- y[l+16] = d * sc[1] * q2;
- y[l+32] = d * sc[2] * q3;
- y[l+48] = d * sc[3] * q4;
- }
- y += 64;
- #endif
- }
- }
- void quantize_row_q6_K(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK_K == 0);
- block_q6_K * restrict y = vy;
- quantize_row_q6_K_reference(x, y, k);
- }
- size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- (void)hist; // TODO
- for (int j = 0; j < nb; j += k) {
- block_q6_K * restrict y = (block_q6_K *)dst + j/QK_K;
- quantize_row_q6_K_reference(src + j, y, k);
- }
- return (n/QK_K*sizeof(block_q6_K));
- }
- //===================================== Q8_K ==============================================
- void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- for (int i = 0; i < nb; i++) {
- float max = 0;
- float amax = 0;
- for (int j = 0; j < QK_K; ++j) {
- float ax = fabsf(x[j]);
- if (ax > amax) {
- amax = ax; max = x[j];
- }
- }
- if (!amax) {
- y[i].d = 0;
- memset(y[i].qs, 0, QK_K);
- x += QK_K;
- continue;
- }
- const float iscale = -128.f/max;
- for (int j = 0; j < QK_K; ++j) {
- int v = nearest_int(iscale*x[j]);
- y[i].qs[j] = MIN(127, v);
- }
- for (int j = 0; j < QK_K/16; ++j) {
- int sum = 0;
- for (int ii = 0; ii < 16; ++ii) {
- sum += y[i].qs[j*16 + ii];
- }
- y[i].bsums[j] = sum;
- }
- y[i].d = 1/iscale;
- x += QK_K;
- }
- }
- void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
- for (int i = 0; i < nb; i++) {
- for (int j = 0; j < QK_K; ++j) {
- *y++ = x[i].d * x[i].qs[j];
- }
- }
- }
- void quantize_row_q8_K(const float * restrict x, void * restrict y, int k) {
- quantize_row_q8_K_reference(x, y, k);
- }
- //===================================== Dot ptoducts =================================
- //
- // Helper functions
- //
- #if __AVX__ || __AVX2__ || __AVX512F__
- // horizontally add 8 floats
- static inline float hsum_float_8(const __m256 x) {
- __m128 res = _mm256_extractf128_ps(x, 1);
- res = _mm_add_ps(res, _mm256_castps256_ps128(x));
- res = _mm_add_ps(res, _mm_movehl_ps(res, res));
- res = _mm_add_ss(res, _mm_movehdup_ps(res));
- return _mm_cvtss_f32(res);
- }
- // shuffles to pick the required scales in dot products
- static inline __m256i get_scale_shuffle_q3k(int i) {
- static const uint8_t k_shuffle[128] = {
- 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
- 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
- 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
- 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,
- };
- return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
- }
- static inline __m256i get_scale_shuffle_k4(int i) {
- static const uint8_t k_shuffle[256] = {
- 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
- 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
- 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5,
- 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
- 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9,
- 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
- 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,
- 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15
- };
- return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
- }
- static inline __m128i get_scale_shuffle(int i) {
- static const uint8_t k_shuffle[128] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
- 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
- 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
- 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
- 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11,
- 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13,
- 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15
- };
- return _mm_loadu_si128((const __m128i*)k_shuffle + i);
- }
- #endif
- #if QK_K == 256
- void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- const block_q2_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- const uint8x16_t m3 = vdupq_n_u8(0x3);
- const uint8x16_t m4 = vdupq_n_u8(0xF);
- #if defined(__ARM_FEATURE_DOTPROD)
- const int32x4_t vzero = vdupq_n_s32(0);
- #endif
- int8x16x2_t q2bytes;
- uint8_t aux[16];
- float sum = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);
- const uint8_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint8_t * restrict sc = x[i].scales;
- const uint8x16_t mins_and_scales = vld1q_u8(sc);
- const uint8x16_t scales = vandq_u8(mins_and_scales, m4);
- vst1q_u8(aux, scales);
- const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4);
- const int16x8x2_t q8sums = vld1q_s16_x2(y[i].bsums);
- const int16x8x2_t mins16 = {vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))};
- const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])),
- vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0])));
- const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])),
- vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1])));
- sum += dmin * vaddvq_s32(vaddq_s32(s0, s1));
- int isum = 0;
- int is = 0;
- // We use this macro instead of a function call because for some reason
- // the code runs 2-3% slower, even if the function is declared inline
- #if defined(__ARM_FEATURE_DOTPROD)
- #define MULTIPLY_ACCUM_WITH_SCALE(index)\
- isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\
- isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)];
- #else
- #define MULTIPLY_ACCUM_WITH_SCALE(index)\
- {\
- const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[0]), vget_low_s8 (q8bytes.val[0])),\
- vmull_s8(vget_high_s8(q2bytes.val[0]), vget_high_s8(q8bytes.val[0])));\
- const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[1]), vget_low_s8 (q8bytes.val[1])),\
- vmull_s8(vget_high_s8(q2bytes.val[1]), vget_high_s8(q8bytes.val[1])));\
- isum += vaddvq_s16(p1) * aux[is+(index)] + vaddvq_s16(p2) * aux[is+1+(index)];\
- }
- #endif
- #define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\
- q8bytes = vld1q_s8_x2(q8); q8 += 32;\
- q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\
- q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\
- MULTIPLY_ACCUM_WITH_SCALE((index));
- for (int j = 0; j < QK_K/128; ++j) {
- const uint8x16x2_t q2bits = vld1q_u8_x2(q2); q2 += 32;
- int8x16x2_t q8bytes = vld1q_s8_x2(q8); q8 += 32;
- q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3));
- q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3));
- MULTIPLY_ACCUM_WITH_SCALE(0);
- SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2);
- SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4);
- SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6);
- is += 8;
- }
- sum += d * isum;
- }
- *s = sum;
- #elif defined __AVX2__
- const __m256i m3 = _mm256_set1_epi8(3);
- const __m128i m4 = _mm_set1_epi8(0xF);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);
- const uint8_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
- const __m128i scales8 = _mm_and_si128(mins_and_scales, m4);
- const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
- const __m256i mins = _mm256_cvtepi8_epi16(mins8);
- const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums));
- acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc);
- const __m256i all_scales = _mm256_cvtepi8_epi16(scales8);
- const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
- const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
- const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
- __m256i sumi = _mm256_setzero_si256();
- for (int j = 0; j < QK_K/128; ++j) {
- const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32;
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q2_0 = _mm256_and_si256(q2bits, m3);
- const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3);
- const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3);
- const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3);
- __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
- __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
- __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2);
- __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3);
- p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0);
- p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1);
- p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2);
- p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3);
- p0 = _mm256_add_epi32(p0, p1);
- p2 = _mm256_add_epi32(p2, p3);
- sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2));
- }
- acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __AVX__
- const __m128i m3 = _mm_set1_epi8(0x3);
- const __m128i m4 = _mm_set1_epi8(0xF);
- const __m128i m2 = _mm_set1_epi8(0x2);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float dall = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);
- const uint8_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- // load mins and scales from block_q2_K.scales[QK_K/16]
- const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
- const __m128i scales16 = _mm_and_si128(mins_and_scales, m4);
- const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
- const __m128i mins_0 = _mm_cvtepi8_epi16(mins16);
- const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16));
- // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2
- const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0]));
- const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8]));
- // sumf += -dmin * summs in 32bits*8
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc);
- const __m128i scales_0 = _mm_cvtepi8_epi16(scales16);
- const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16));
- const __m128i scales[2] = { scales_0, scales_1 };
- __m128i sumi_0 = _mm_setzero_si128();
- __m128i sumi_1 = _mm_setzero_si128();
- for (int j = 0; j < QK_K/128; ++j) {
- // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K]
- const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- // load 2bits*16*8 from block_q2_K.qs[QK_K/4]
- __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
- const __m128i q2_0 = _mm_and_si128(q2bits, m3);
- const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
- const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
- const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
- q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
- const __m128i q2_1 = _mm_and_si128(q2bits, m3);
- const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
- const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
- const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
- // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8
- __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0);
- __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1);
- __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2);
- __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3);
- __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4);
- __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5);
- __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6);
- __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7);
- // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8
- __m128i shuffle = _mm_set1_epi16(0x0100);
- p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0);
- shuffle = _mm_add_epi16(shuffle, m2);
- p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1);
- shuffle = _mm_add_epi16(shuffle, m2);
- p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2);
- shuffle = _mm_add_epi16(shuffle, m2);
- p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3);
- shuffle = _mm_add_epi16(shuffle, m2);
- p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4);
- shuffle = _mm_add_epi16(shuffle, m2);
- p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5);
- shuffle = _mm_add_epi16(shuffle, m2);
- p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6);
- shuffle = _mm_add_epi16(shuffle, m2);
- p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7);
- p0 = _mm_add_epi32(p0, p1);
- p2 = _mm_add_epi32(p2, p3);
- p4 = _mm_add_epi32(p4, p5);
- p6 = _mm_add_epi32(p6, p7);
- // isum in 32bits*4*2
- sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2));
- sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6));
- }
- // sumf += dall * isum - dmin * summs in 32bits
- __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc);
- }
- *s = hsum_float_8(acc);
- #else
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * q2 = x[i].qs;
- const int8_t * q8 = y[i].qs;
- const uint8_t * sc = x[i].scales;
- int summs = 0;
- for (int j = 0; j < 16; ++j) {
- summs += y[i].bsums[j] * (sc[j] >> 4);
- }
- const float dall = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const float dmin = y[i].d * ggml_fp16_to_fp32(x[i].dmin);
- int isum = 0;
- int is = 0;
- int d;
- for (int k = 0; k < QK_K/128; ++k) {
- int shift = 0;
- for (int j = 0; j < 4; ++j) {
- d = sc[is++] & 0xF;
- int isuml = 0;
- for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- d = sc[is++] & 0xF;
- isuml = 0;
- for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- shift += 2;
- q8 += 32;
- }
- q2 += 32;
- }
- sumf += dall * isum - dmin * summs;
- }
- *s = sumf;
- #endif
- }
- #else
- void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- const block_q2_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- const uint8x16_t m3 = vdupq_n_u8(0x3);
- #if defined(__ARM_FEATURE_DOTPROD)
- const int32x4_t vzero = vdupq_n_s32(0);
- #endif
- int8x16x4_t q2bytes;
- uint32_t aux32[2];
- const uint8_t * scales = (const uint8_t *)aux32;
- float sum = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * (float)x[i].d;
- const float dmin = -y[i].d * (float)x[i].dmin;
- const uint8_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
- aux32[0] = sc[0] & 0x0f0f0f0f;
- aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
- sum += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
- int isum1 = 0, isum2 = 0;
- const uint8x16_t q2bits = vld1q_u8(q2);
- const int8x16x4_t q8bytes = vld1q_s8_x4(q8);
- q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits, m3));
- q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 2), m3));
- q2bytes.val[2] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 4), m3));
- q2bytes.val[3] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 6), m3));
- #if defined(__ARM_FEATURE_DOTPROD)
- isum1 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * scales[0];
- isum2 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * scales[1];
- isum1 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[2], q8bytes.val[2])) * scales[2];
- isum2 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[3], q8bytes.val[3])) * scales[3];
- #else
- const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
- vmull_s8(vget_high_s8(q2bytes.val[0]), vget_high_s8(q8bytes.val[0])));
- const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
- vmull_s8(vget_high_s8(q2bytes.val[1]), vget_high_s8(q8bytes.val[1])));
- isum1 += vaddvq_s16(p1) * scales[0];
- isum2 += vaddvq_s16(p2) * scales[1];
- const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
- vmull_s8(vget_high_s8(q2bytes.val[2]), vget_high_s8(q8bytes.val[2])));
- const int16x8_t p4 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
- vmull_s8(vget_high_s8(q2bytes.val[3]), vget_high_s8(q8bytes.val[3])));
- isum1 += vaddvq_s16(p3) * scales[2];
- isum2 += vaddvq_s16(p4) * scales[3];
- #endif
- sum += d * (isum1 + isum2);
- }
- *s = sum;
- #elif defined __AVX2__
- const __m256i m3 = _mm256_set1_epi8(3);
- __m256 acc = _mm256_setzero_ps();
- uint32_t ud, um;
- const uint8_t * restrict db = (const uint8_t *)&ud;
- const uint8_t * restrict mb = (const uint8_t *)&um;
- float summs = 0;
- // TODO: optimize this
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);
- const uint8_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
- ud = (sc[0] >> 0) & 0x0f0f0f0f;
- um = (sc[0] >> 4) & 0x0f0f0f0f;
- int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
- summs += dmin * smin;
- const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
- const __m256i q2_0 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 2), q2bits), m3);
- const __m256i q2_1 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 6), _mm_srli_epi16(q2bits, 4)), m3);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- const __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
- const __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
- const __m256i p_0 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 0));
- const __m256i p_1 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 1));
- const __m256i p_2 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 0));
- const __m256i p_3 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 1));
- acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0), acc);
- acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1), acc);
- acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2), acc);
- acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3), acc);
- }
- *s = hsum_float_8(acc) + summs;
- #elif defined __AVX__
- const __m128i m3 = _mm_set1_epi8(3);
- __m256 acc = _mm256_setzero_ps();
- uint32_t ud, um;
- const uint8_t * restrict db = (const uint8_t *)&ud;
- const uint8_t * restrict mb = (const uint8_t *)&um;
- float summs = 0;
- // TODO: optimize this
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);
- const uint8_t * restrict q2 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
- ud = (sc[0] >> 0) & 0x0f0f0f0f;
- um = (sc[0] >> 4) & 0x0f0f0f0f;
- int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
- summs += dmin * smin;
- const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
- const __m128i q2_0 = _mm_and_si128(q2bits, m3);
- const __m128i q2_1 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
- const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
- const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- const __m128i p0 = _mm_maddubs_epi16(q2_0, _mm256_extractf128_si256(q8_0, 0));
- const __m128i p1 = _mm_maddubs_epi16(q2_1, _mm256_extractf128_si256(q8_0, 1));
- const __m128i p2 = _mm_maddubs_epi16(q2_2, _mm256_extractf128_si256(q8_1, 0));
- const __m128i p3 = _mm_maddubs_epi16(q2_3, _mm256_extractf128_si256(q8_1, 1));
- const __m256i p_0 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p0, p0)), _mm_cvtepi16_epi32(p0));
- const __m256i p_1 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p1, p1)), _mm_cvtepi16_epi32(p1));
- const __m256i p_2 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p2, p2)), _mm_cvtepi16_epi32(p2));
- const __m256i p_3 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p3, p3)), _mm_cvtepi16_epi32(p3));
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0)), acc);
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1)), acc);
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2)), acc);
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3)), acc);
- }
- *s = hsum_float_8(acc) + summs;
- #else
- float sumf = 0;
- int isum[4];
- for (int i = 0; i < nb; ++i) {
- const uint8_t * q2 = x[i].qs;
- const int8_t * q8 = y[i].qs;
- const uint8_t * sc = x[i].scales;
- int summs = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- summs += y[i].bsums[j] * (sc[j] >> 4);
- }
- const float dall = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const float dmin = y[i].d * ggml_fp16_to_fp32(x[i].dmin);
- isum[0] = isum[1] = isum[2] = isum[3] = 0;
- for (int l = 0; l < 16; ++l) {
- isum[0] += q8[l+ 0] * ((q2[l] >> 0) & 3);
- isum[1] += q8[l+16] * ((q2[l] >> 2) & 3);
- isum[2] += q8[l+32] * ((q2[l] >> 4) & 3);
- isum[3] += q8[l+48] * ((q2[l] >> 6) & 3);
- }
- for (int l = 0; l < 4; ++l) {
- isum[l] *= (sc[l] & 0xF);
- }
- sumf += dall * (isum[0] + isum[1] + isum[2] + isum[3]) - dmin * summs;
- }
- *s = sumf;
- #endif
- }
- #endif
- #if QK_K == 256
- void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- assert(n % QK_K == 0);
- const uint32_t kmask1 = 0x03030303;
- const uint32_t kmask2 = 0x0f0f0f0f;
- const block_q3_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- uint32_t aux[3];
- uint32_t utmp[4];
- const uint8x16_t m3b = vdupq_n_u8(0x3);
- #ifdef __ARM_FEATURE_DOTPROD
- const int32x4_t vzero = vdupq_n_s32(0);
- #endif
- const uint8x16_t m0 = vdupq_n_u8(1);
- const uint8x16_t m1 = vshlq_n_u8(m0, 1);
- const uint8x16_t m2 = vshlq_n_u8(m0, 2);
- const uint8x16_t m3 = vshlq_n_u8(m0, 3);
- const int8_t m32 = 32;
- int8x16x4_t q3bytes;
- float sum = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const uint8_t * restrict q3 = x[i].qs;
- const uint8_t * restrict qh = x[i].hmask;
- const int8_t * restrict q8 = y[i].qs;
- uint8x16x2_t qhbits = vld1q_u8_x2(qh);
- uint8x16x4_t q3h;
- int32_t isum = 0;
- // Set up scales
- memcpy(aux, x[i].scales, 12);
- utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
- utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
- utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
- utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
- int8_t * scale = (int8_t *)utmp;
- for (int j = 0; j < 16; ++j) scale[j] -= m32;
- for (int j = 0; j < QK_K/128; ++j) {
- const uint8x16x2_t q3bits = vld1q_u8_x2(q3); q3 += 32;
- const int8x16x4_t q8bytes_1 = vld1q_s8_x4(q8); q8 += 64;
- const int8x16x4_t q8bytes_2 = vld1q_s8_x4(q8); q8 += 64;
- q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2);
- q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2);
- q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1);
- q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1);
- q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0]));
- q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1]));
- q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
- q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
- #if defined(__ARM_FEATURE_DOTPROD)
- isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0];
- isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1];
- isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2];
- isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3];
- #else
- int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[0]), vget_low_s8 (q8bytes_1.val[0])),
- vmull_s8(vget_high_s8(q3bytes.val[0]), vget_high_s8(q8bytes_1.val[0])));
- int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[1]), vget_low_s8 (q8bytes_1.val[1])),
- vmull_s8(vget_high_s8(q3bytes.val[1]), vget_high_s8(q8bytes_1.val[1])));
- int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[2]), vget_low_s8 (q8bytes_1.val[2])),
- vmull_s8(vget_high_s8(q3bytes.val[2]), vget_high_s8(q8bytes_1.val[2])));
- int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[3]), vget_low_s8 (q8bytes_1.val[3])),
- vmull_s8(vget_high_s8(q3bytes.val[3]), vget_high_s8(q8bytes_1.val[3])));
- isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1] + vaddvq_s16(p2) * scale[2] + vaddvq_s16(p3) * scale[3];
- #endif
- scale += 4;
- q3h.val[0] = vbicq_u8(m2, qhbits.val[0]);
- q3h.val[1] = vbicq_u8(m2, qhbits.val[1]);
- q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1);
- q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1);
- q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0]));
- q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1]));
- q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
- q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
- #if defined(__ARM_FEATURE_DOTPROD)
- isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0];
- isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1];
- isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2];
- isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3];
- #else
- p0 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[0]), vget_low_s8 (q8bytes_2.val[0])),
- vmull_s8(vget_high_s8(q3bytes.val[0]), vget_high_s8(q8bytes_2.val[0])));
- p1 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[1]), vget_low_s8 (q8bytes_2.val[1])),
- vmull_s8(vget_high_s8(q3bytes.val[1]), vget_high_s8(q8bytes_2.val[1])));
- p2 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[2]), vget_low_s8 (q8bytes_2.val[2])),
- vmull_s8(vget_high_s8(q3bytes.val[2]), vget_high_s8(q8bytes_2.val[2])));
- p3 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[3]), vget_low_s8 (q8bytes_2.val[3])),
- vmull_s8(vget_high_s8(q3bytes.val[3]), vget_high_s8(q8bytes_2.val[3])));
- isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1] + vaddvq_s16(p2) * scale[2] + vaddvq_s16(p3) * scale[3];
- #endif
- scale += 4;
- if (j == 0) {
- qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4);
- qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4);
- }
- }
- sum += d * isum;
- }
- *s = sum;
- #elif defined __AVX2__
- const __m256i m3 = _mm256_set1_epi8(3);
- const __m256i mone = _mm256_set1_epi8(1);
- const __m128i m32 = _mm_set1_epi8(32);
- __m256 acc = _mm256_setzero_ps();
- uint32_t aux[3];
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const uint8_t * restrict q3 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- // Set up scales
- memcpy(aux, x[i].scales, 12);
- __m128i scales128 = _mm_set_epi32(
- ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
- ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
- (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
- (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
- scales128 = _mm_sub_epi8(scales128, m32);
- const __m256i all_scales = _mm256_cvtepi8_epi16(scales128);
- const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
- const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
- const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
- // high bit
- const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask);
- // integer accumulator
- __m256i sumi = _mm256_setzero_si256();
- int bit = 0;
- int is = 0;
- for (int j = 0; j < QK_K/128; ++j) {
- // load low 2 bits
- const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32;
- // prepare low and high bits
- const __m256i q3l_0 = _mm256_and_si256(q3bits, m3);
- const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
- ++bit;
- const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3);
- const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
- ++bit;
- const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3);
- const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
- ++bit;
- const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3);
- const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
- ++bit;
- // load Q8 quants
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
- // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
- // and 2 if the high bit was set)
- __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
- __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
- __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2);
- __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3);
- __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
- __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
- __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2);
- __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3);
- p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
- p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
- p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
- // multiply with scales
- p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0);
- p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1);
- p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2);
- p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3);
- // accumulate
- p16_0 = _mm256_add_epi32(p16_0, p16_1);
- p16_2 = _mm256_add_epi32(p16_2, p16_3);
- sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2));
- }
- // multiply with block scale and accumulate
- acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __AVX__
- const __m128i m3 = _mm_set1_epi8(3);
- const __m128i mone = _mm_set1_epi8(1);
- const __m128i m32 = _mm_set1_epi8(32);
- const __m128i m2 = _mm_set1_epi8(2);
- __m256 acc = _mm256_setzero_ps();
- const uint32_t *aux;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const uint8_t * restrict q3 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- // Set up scales
- aux = (const uint32_t *)x[i].scales;
- __m128i scales128 = _mm_set_epi32(
- ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
- ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
- (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
- (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
- scales128 = _mm_sub_epi8(scales128, m32);
- const __m128i scales_0 = _mm_cvtepi8_epi16(scales128);
- const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128));
- const __m128i scales[2] = { scales_0, scales_1 };
- // high bit *128*2 from block_q3_K.hmask[QK_K/8]
- const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]);
- const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]);
- // integer accumulator
- __m128i sumi_0 = _mm_setzero_si128();
- __m128i sumi_1 = _mm_setzero_si128();
- for (int j = 0; j < QK_K/128; ++j) {
- // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4]
- const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
- const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
- // prepare low and high bits
- const int bit = j << 2;
- const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3);
- const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3);
- const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2);
- const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2);
- const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3);
- const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3);
- const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
- const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
- const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3);
- const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3);
- const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
- const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
- const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3);
- const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3);
- const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
- const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
- // load Q8 quants from block_q8_K.qs[QK_K]
- const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
- // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
- // and 2 if the high bit was set)
- __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0);
- __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1);
- __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2);
- __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3);
- __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4);
- __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5);
- __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6);
- __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7);
- __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0);
- __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1);
- __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2);
- __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3);
- __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4);
- __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5);
- __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6);
- __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7);
- p16_0 = _mm_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm_sub_epi16(p16_1, q8s_1);
- p16_2 = _mm_sub_epi16(p16_2, q8s_2);
- p16_3 = _mm_sub_epi16(p16_3, q8s_3);
- p16_4 = _mm_sub_epi16(p16_4, q8s_4);
- p16_5 = _mm_sub_epi16(p16_5, q8s_5);
- p16_6 = _mm_sub_epi16(p16_6, q8s_6);
- p16_7 = _mm_sub_epi16(p16_7, q8s_7);
- // multiply with scales
- __m128i shuffle = _mm_set1_epi16(0x0100);
- p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0);
- shuffle = _mm_add_epi16(shuffle, m2);
- p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1);
- shuffle = _mm_add_epi16(shuffle, m2);
- p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2);
- shuffle = _mm_add_epi16(shuffle, m2);
- p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3);
- shuffle = _mm_add_epi16(shuffle, m2);
- p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4);
- shuffle = _mm_add_epi16(shuffle, m2);
- p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5);
- shuffle = _mm_add_epi16(shuffle, m2);
- p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6);
- shuffle = _mm_add_epi16(shuffle, m2);
- p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7);
- // accumulate
- p16_0 = _mm_add_epi32(p16_0, p16_1);
- p16_2 = _mm_add_epi32(p16_2, p16_3);
- p16_4 = _mm_add_epi32(p16_4, p16_5);
- p16_6 = _mm_add_epi32(p16_6, p16_7);
- sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
- sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6));
- }
- // multiply with block scale and accumulate
- __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
- }
- *s = hsum_float_8(acc);
- #else
- // scalar version
- // This function is written like this so the compiler can manage to vectorize most of it
- // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
- // manually vectorized version above. Every other version I tried would run at least 4 times slower.
- // The ideal situation would be if we could just write the code once, and the compiler would
- // automatically produce the best possible set of machine instructions, instead of us having to manually
- // write vectorized versions for AVX, ARM_NEON, etc.
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
- uint32_t auxs[4];
- const int8_t * scales = (const int8_t*)auxs;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q3 = x[i].qs;
- const uint8_t * restrict hm = x[i].hmask;
- const int8_t * restrict q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * restrict a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- q3 += 32;
- }
- a = aux8;
- memcpy(auxs, x[i].scales, 12);
- uint32_t tmp = auxs[2];
- auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
- auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
- auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
- auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
- for (int j = 0; j < QK_K/16; ++j) {
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #else
- void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- assert(n % QK_K == 0);
- const block_q3_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- #ifdef __ARM_FEATURE_DOTPROD
- const int32x4_t vzero = vdupq_n_s32(0);
- #endif
- const uint8x16_t m3b = vdupq_n_u8(0x3);
- const uint8x16_t mh = vdupq_n_u8(4);
- int8x16x4_t q3bytes;
- uint16_t aux16[2];
- int8_t * scales = (int8_t *)aux16;
- float sum = 0;
- for (int i = 0; i < nb; ++i) {
- uint8x16x4_t q3h;
- const uint8x8_t hbits = vld1_u8(x[i].hmask);
- const uint8x16_t q3bits = vld1q_u8(x[i].qs);
- const int8x16x4_t q8bytes = vld1q_s8_x4(y[i].qs);
- const uint16_t a = *(const uint16_t *)x[i].scales;
- aux16[0] = a & 0x0f0f;
- aux16[1] = (a >> 4) & 0x0f0f;
- for (int j = 0; j < 4; ++j) scales[j] -= 8;
- int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
- const float d = y[i].d * (float)x[i].d;
- const uint8x16_t htmp = vcombine_u8(hbits, vshr_n_u8(hbits, 1));
- q3h.val[0] = vandq_u8(mh, vshlq_n_u8(htmp, 2));
- q3h.val[1] = vandq_u8(mh, htmp);
- q3h.val[2] = vandq_u8(mh, vshrq_n_u8(htmp, 2));
- q3h.val[3] = vandq_u8(mh, vshrq_n_u8(htmp, 4));
- q3bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q3bits, m3b), q3h.val[0]));
- q3bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 2), m3b), q3h.val[1]));
- q3bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 4), m3b), q3h.val[2]));
- q3bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q3bits, 6), q3h.val[3]));
- #if defined(__ARM_FEATURE_DOTPROD)
- isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[0], q8bytes.val[0])) * scales[0];
- isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[1], q8bytes.val[1])) * scales[2];
- isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[2], q8bytes.val[2])) * scales[1];
- isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[3], q8bytes.val[3])) * scales[3];
- #else
- const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
- vmull_s8(vget_high_s8(q3bytes.val[0]), vget_high_s8(q8bytes.val[0])));
- const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
- vmull_s8(vget_high_s8(q3bytes.val[1]), vget_high_s8(q8bytes.val[1])));
- const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
- vmull_s8(vget_high_s8(q3bytes.val[2]), vget_high_s8(q8bytes.val[2])));
- const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
- vmull_s8(vget_high_s8(q3bytes.val[3]), vget_high_s8(q8bytes.val[3])));
- isum += vaddvq_s16(p0) * scales[0] + vaddvq_s16(p1) * scales[2] + vaddvq_s16(p2) * scales[1] + vaddvq_s16(p3) * scales[3];
- #endif
- sum += d * isum;
- }
- *s = sum;
- #elif defined __AVX2__
- const __m256i m3 = _mm256_set1_epi8(3);
- const __m256i m1 = _mm256_set1_epi8(1);
- __m256 acc = _mm256_setzero_ps();
- uint64_t aux64;
- uint16_t aux16[2];
- const int8_t * aux8 = (const int8_t *)aux16;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const uint8_t * restrict q3 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint16_t a = *(const uint16_t *)x[i].scales;
- aux16[0] = a & 0x0f0f;
- aux16[1] = (a >> 4) & 0x0f0f;
- const __m256i scale_0 = MM256_SET_M128I(_mm_set1_epi16(aux8[2] - 8), _mm_set1_epi16(aux8[0] - 8));
- const __m256i scale_1 = MM256_SET_M128I(_mm_set1_epi16(aux8[3] - 8), _mm_set1_epi16(aux8[1] - 8));
- memcpy(&aux64, x[i].hmask, 8);
- const __m128i haux = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
- __m256i q3h_0 = MM256_SET_M128I(_mm_srli_epi16(haux, 2), haux);
- __m256i q3h_1 = _mm256_srli_epi16(q3h_0, 4);
- q3h_0 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_0, m1), 2);
- q3h_1 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_1, m1), 2);
- // load low 2 bits
- const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
- // prepare low and high bits
- const __m256i q3aux = MM256_SET_M128I(_mm_srli_epi16(q3bits, 2), q3bits);
- const __m256i q3l_0 = _mm256_and_si256(q3aux, m3);
- const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3aux, 4), m3);
- // load Q8 quants
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
- // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
- // and 2 if the high bit was set)
- const __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
- const __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
- __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
- __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
- p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
- // multiply with scales
- p16_0 = _mm256_madd_epi16(scale_0, p16_0);
- p16_1 = _mm256_madd_epi16(scale_1, p16_1);
- p16_0 = _mm256_add_epi32(p16_0, p16_1);
- // multiply with block scale and accumulate
- acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16_0), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __AVX__
- const __m128i m3 = _mm_set1_epi8(3);
- const __m128i m1 = _mm_set1_epi8(1);
- __m256 acc = _mm256_setzero_ps();
- uint64_t aux64;
- uint16_t aux16[2];
- const int8_t * aux8 = (const int8_t *)aux16;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const uint8_t * restrict q3 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint16_t a = *(const uint16_t *)x[i].scales;
- aux16[0] = a & 0x0f0f;
- aux16[1] = (a >> 4) & 0x0f0f;
- const __m128i scale_0 = _mm_set1_epi16(aux8[0] - 8);
- const __m128i scale_1 = _mm_set1_epi16(aux8[2] - 8);
- const __m128i scale_2 = _mm_set1_epi16(aux8[1] - 8);
- const __m128i scale_3 = _mm_set1_epi16(aux8[3] - 8);
- memcpy(&aux64, x[i].hmask, 8);
- __m128i q3h_0 = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
- __m128i q3h_1 = _mm_srli_epi16(q3h_0, 2);
- __m128i q3h_2 = _mm_srli_epi16(q3h_0, 4);
- __m128i q3h_3 = _mm_srli_epi16(q3h_0, 6);
- q3h_0 = _mm_slli_epi16(_mm_andnot_si128(q3h_0, m1), 2);
- q3h_1 = _mm_slli_epi16(_mm_andnot_si128(q3h_1, m1), 2);
- q3h_2 = _mm_slli_epi16(_mm_andnot_si128(q3h_2, m1), 2);
- q3h_3 = _mm_slli_epi16(_mm_andnot_si128(q3h_3, m1), 2);
- // load low 2 bits
- const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
- // prepare low and high bits
- const __m128i q3l_0 = _mm_and_si128(q3bits, m3);
- const __m128i q3l_1 = _mm_and_si128(_mm_srli_epi16(q3bits, 2), m3);
- const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits, 4), m3);
- const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits, 6), m3);
- // load Q8 quants
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm_maddubs_epi16,
- // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
- // and 2 if the high bit was set)
- const __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, _mm256_extractf128_si256(q8_0, 0));
- const __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, _mm256_extractf128_si256(q8_0, 1));
- const __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, _mm256_extractf128_si256(q8_1, 0));
- const __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, _mm256_extractf128_si256(q8_1, 1));
- __m128i p16_0 = _mm_maddubs_epi16(q3l_0, _mm256_extractf128_si256(q8_0, 0));
- __m128i p16_1 = _mm_maddubs_epi16(q3l_1, _mm256_extractf128_si256(q8_0, 1));
- __m128i p16_2 = _mm_maddubs_epi16(q3l_2, _mm256_extractf128_si256(q8_1, 0));
- __m128i p16_3 = _mm_maddubs_epi16(q3l_3, _mm256_extractf128_si256(q8_1, 1));
- p16_0 = _mm_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm_sub_epi16(p16_1, q8s_1);
- p16_2 = _mm_sub_epi16(p16_2, q8s_2);
- p16_3 = _mm_sub_epi16(p16_3, q8s_3);
- // multiply with scales
- p16_0 = _mm_madd_epi16(scale_0, p16_0);
- p16_1 = _mm_madd_epi16(scale_1, p16_1);
- p16_2 = _mm_madd_epi16(scale_2, p16_2);
- p16_3 = _mm_madd_epi16(scale_3, p16_3);
- p16_0 = _mm_add_epi32(p16_0, p16_2);
- p16_1 = _mm_add_epi32(p16_1, p16_3);
- __m256i p16 = MM256_SET_M128I(p16_1, p16_0);
- // multiply with block scale and accumulate
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16)), acc);
- }
- *s = hsum_float_8(acc);
- #else
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- int32_t scales[4];
- memset(sums, 0, 8*sizeof(float));
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q3 = x[i].qs;
- const uint8_t * restrict hm = x[i].hmask;
- const int8_t * restrict q8 = y[i].qs;
- int8_t * restrict a = aux8;
- for (int l = 0; l < 8; ++l) {
- a[l+ 0] = (int8_t)((q3[l+0] >> 0) & 3) - (hm[l] & 0x01 ? 0 : 4);
- a[l+ 8] = (int8_t)((q3[l+8] >> 0) & 3) - (hm[l] & 0x02 ? 0 : 4);
- a[l+16] = (int8_t)((q3[l+0] >> 2) & 3) - (hm[l] & 0x04 ? 0 : 4);
- a[l+24] = (int8_t)((q3[l+8] >> 2) & 3) - (hm[l] & 0x08 ? 0 : 4);
- a[l+32] = (int8_t)((q3[l+0] >> 4) & 3) - (hm[l] & 0x10 ? 0 : 4);
- a[l+40] = (int8_t)((q3[l+8] >> 4) & 3) - (hm[l] & 0x20 ? 0 : 4);
- a[l+48] = (int8_t)((q3[l+0] >> 6) & 3) - (hm[l] & 0x40 ? 0 : 4);
- a[l+56] = (int8_t)((q3[l+8] >> 6) & 3) - (hm[l] & 0x80 ? 0 : 4);
- }
- scales[0] = (x[i].scales[0] & 0xF) - 8;
- scales[1] = (x[i].scales[0] >> 4) - 8;
- scales[2] = (x[i].scales[1] & 0xF) - 8;
- scales[3] = (x[i].scales[1] >> 4) - 8;
- memset(aux32, 0, 8*sizeof(int32_t));
- for (int j = 0; j < QK_K/16; ++j) {
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] += q8[l] * a[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux32[l] += scales[j] * aux16[l];
- }
- const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #endif
- #if QK_K == 256
- void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- assert(n % QK_K == 0);
- const block_q4_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- static const uint32_t kmask1 = 0x3f3f3f3f;
- static const uint32_t kmask2 = 0x0f0f0f0f;
- static const uint32_t kmask3 = 0x03030303;
- uint32_t utmp[4];
- #ifdef __ARM_NEON
- const uint8x16_t m4b = vdupq_n_u8(0xf);
- #ifdef __ARM_FEATURE_DOTPROD
- const int32x4_t mzero = vdupq_n_s32(0);
- #endif
- int8x16x2_t q4bytes;
- int8x16x2_t q8bytes;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const float dmin = y[i].d * ggml_fp16_to_fp32(x[i].dmin);
- const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
- memcpy(utmp, x[i].scales, 12);
- const uint32x2_t mins8 = {utmp[1] & kmask1, ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4)};
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[0] &= kmask1;
- const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8)));
- const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
- vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
- sumf -= dmin * vaddvq_s32(prod);
- const uint8_t * scales = (const uint8_t *)utmp;
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- int32_t sumi1 = 0;
- int32_t sumi2 = 0;
- for (int j = 0; j < QK_K/64; ++j) {
- const uint8x16x2_t q4bits = vld1q_u8_x2(q4); q4 += 32;
- #ifdef __ARM_FEATURE_DOTPROD
- q8bytes = vld1q_s8_x2(q8); q8 += 32;
- q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
- q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
- const int32x4_t p1 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
- sumi1 += vaddvq_s32(p1) * scales[2*j+0];
- q8bytes = vld1q_s8_x2(q8); q8 += 32;
- q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
- q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
- const int32x4_t p2 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
- sumi2 += vaddvq_s32(p2) * scales[2*j+1];
- #else
- q8bytes = vld1q_s8_x2(q8); q8 += 32;
- q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
- q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
- const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
- vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0])));
- const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
- vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1])));
- sumi1 += vaddvq_s16(vaddq_s16(p0, p1)) * scales[2*j+0];
- q8bytes = vld1q_s8_x2(q8); q8 += 32;
- q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
- q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
- const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
- vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0])));
- const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
- vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1])));
- sumi2 += vaddvq_s16(vaddq_s16(p2, p3)) * scales[2*j+1];
- #endif
- }
- sumf += d * (sumi1 + sumi2);
- }
- *s = sumf;
- #elif defined __AVX2__
- const __m256i m4 = _mm256_set1_epi8(0xF);
- __m256 acc = _mm256_setzero_ps();
- __m128 acc_m = _mm_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
- const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
- const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
- const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
- acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m);
- const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
- const __m256i scales = MM256_SET_M128I(sc128, sc128);
- __m256i sumi = _mm256_setzero_si256();
- for (int j = 0; j < QK_K/64; ++j) {
- const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
- const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
- const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
- const __m256i q4l = _mm256_and_si256(q4bits, m4);
- const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
- const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
- p16l = _mm256_madd_epi16(scale_l, p16l);
- const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
- p16h = _mm256_madd_epi16(scale_h, p16h);
- const __m256i sumj = _mm256_add_epi32(p16l, p16h);
- sumi = _mm256_add_epi32(sumi, sumj);
- }
- __m256 vd = _mm256_set1_ps(d);
- acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
- }
- acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
- acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
- *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
- #elif defined __AVX__
- const __m128i m4 = _mm_set1_epi8(0xF);
- const __m128i m2 = _mm_set1_epi8(0x2);
- __m256 acc = _mm256_setzero_ps();
- __m128 acc_m = _mm_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
- const __m128i scales = _mm_cvtepu8_epi16(utmps);
- const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
- const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
- const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
- const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
- const __m128i prod = _mm_madd_epi16(mins, q8s);
- acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m);
- __m128i sumi_0 = _mm_setzero_si128();
- __m128i sumi_1 = _mm_setzero_si128();
- __m128i shuffle = _mm_set1_epi16(0x0100);
- for (int j = 0; j < QK_K/64; ++j) {
- const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi16(shuffle, m2);
- const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi16(shuffle, m2);
- __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
- const __m128i q4l_0 = _mm_and_si128(q4bits, m4);
- const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
- q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
- const __m128i q4l_1 = _mm_and_si128(q4bits, m4);
- const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
- const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0);
- p16l = _mm_madd_epi16(scale_l, p16l);
- sumi_0 = _mm_add_epi32(sumi_0, p16l);
- const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- p16l = _mm_maddubs_epi16(q4l_1, q8l_1);
- p16l = _mm_madd_epi16(scale_l, p16l);
- sumi_1 = _mm_add_epi32(sumi_1, p16l);
- const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0);
- p16h = _mm_madd_epi16(scale_h, p16h);
- sumi_0 = _mm_add_epi32(sumi_0, p16h);
- const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- p16h = _mm_maddubs_epi16(q4h_1, q8h_1);
- p16h = _mm_madd_epi16(scale_h, p16h);
- sumi_1 = _mm_add_epi32(sumi_1, p16h);
- }
- __m256 vd = _mm256_set1_ps(d);
- __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
- acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
- }
- acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
- acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
- *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
- #else
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * restrict a = aux8;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- a += 32;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- a += 32; q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = ggml_fp16_to_fp32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #else
- void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- assert(n % QK_K == 0);
- const block_q4_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- const uint8x16_t m4b = vdupq_n_u8(0xf);
- #ifdef __ARM_FEATURE_DOTPROD
- const int32x4_t mzero = vdupq_n_s32(0);
- #endif
- float sumf = 0;
- int8x16x2_t q4bytes;
- int8x16x4_t q8bytes;
- float sum_mins = 0.f;
- uint16_t aux16[2];
- const uint8_t * restrict scales = (const uint8_t *)aux16;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const uint16_t * restrict a = (const uint16_t *)x[i].scales;
- aux16[0] = a[0] & 0x0f0f;
- aux16[1] = (a[0] >> 4) & 0x0f0f;
- const int32_t summi = scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]);
- sum_mins += y[i].d * (float)x[i].d[1] * summi;
- const float d = y[i].d * (float)x[i].d[0];
- const uint8x16x2_t q4bits = vld1q_u8_x2(q4);
- #ifdef __ARM_FEATURE_DOTPROD
- q8bytes = vld1q_s8_x4(q8);
- q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
- q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
- const int32x4_t p1 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
- const int32_t sumi1 = vaddvq_s32(p1) * scales[0];
- q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
- q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
- const int32x4_t p2 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[2]), q4bytes.val[1], q8bytes.val[3]);
- const int32_t sumi2 = vaddvq_s32(p2) * scales[1];
- #else
- q8bytes = vld1q_s8_x4(q8);
- q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
- q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
- const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
- vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0])));
- const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
- vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1])));
- int32_t sumi1 = vaddvq_s16(vaddq_s16(p0, p1)) * scales[0];
- q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
- q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
- const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[2])),
- vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[2])));
- const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[3])),
- vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[3])));
- int32_t sumi2 = vaddvq_s16(vaddq_s16(p2, p3)) * scales[1];
- #endif
- sumf += d * (sumi1 + sumi2);
- }
- *s = sumf - sum_mins;
- #elif defined __AVX2__
- const __m256i m4 = _mm256_set1_epi8(0xF);
- __m256 acc = _mm256_setzero_ps();
- float summs = 0;
- uint16_t aux16[2];
- const uint8_t * scales = (const uint8_t *)aux16;
- for (int i = 0; i < nb; ++i) {
- const float d = ggml_fp16_to_fp32(x[i].d[0]) * y[i].d;
- const float m = ggml_fp16_to_fp32(x[i].d[1]) * y[i].d;
- const __m256 vd = _mm256_set1_ps(d);
- const uint16_t * a = (const uint16_t *)x[i].scales;
- aux16[0] = a[0] & 0x0f0f;
- aux16[1] = (a[0] >> 4) & 0x0f0f;
- summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
- const __m256i q4l = _mm256_and_si256(q4bits, m4);
- const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
- const __m256i q8l = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8h = _mm256_loadu_si256((const __m256i*)(q8+32));
- const __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
- const __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
- const __m256i p32l = _mm256_madd_epi16(_mm256_set1_epi16(scales[0]), p16l);
- acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32l), acc);
- const __m256i p32h = _mm256_madd_epi16(_mm256_set1_epi16(scales[1]), p16h);
- acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32h), acc);
- }
- *s = hsum_float_8(acc) - summs;
- #elif defined __AVX__
- const __m128i m4 = _mm_set1_epi8(0xF);
- __m256 acc = _mm256_setzero_ps();
- float summs = 0;
- uint16_t aux16[2];
- const uint8_t * scales = (const uint8_t *)aux16;
- for (int i = 0; i < nb; ++i) {
- const float d = ggml_fp16_to_fp32(x[i].d[0]) * y[i].d;
- const float m = ggml_fp16_to_fp32(x[i].d[1]) * y[i].d;
- const __m256 vd = _mm256_set1_ps(d);
- const uint16_t * a = (const uint16_t *)x[i].scales;
- aux16[0] = a[0] & 0x0f0f;
- aux16[1] = (a[0] >> 4) & 0x0f0f;
- summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
- const __m128i q4bits_0 = _mm256_extractf128_si256(q4bits, 0);
- const __m128i q4bits_1 = _mm256_extractf128_si256(q4bits, 1);
- const __m128i q4_0 = _mm_and_si128(q4bits_0, m4);
- const __m128i q4_1 = _mm_and_si128(q4bits_1, m4);
- const __m128i q4_2 = _mm_and_si128(_mm_srli_epi16(q4bits_0, 4), m4);
- const __m128i q4_3 = _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- const __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
- const __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
- const __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
- const __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
- const __m128i p32_0 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_0);
- const __m128i p32_1 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_1);
- acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_1, p32_0))), acc);
- const __m128i p32_2 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_2);
- const __m128i p32_3 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_3);
- acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_3, p32_2))), acc);
- }
- *s = hsum_float_8(acc) - summs;
- #else
- uint8_t aux8[QK_K];
- int16_t aux16[16];
- float sums [8];
- memset(sums, 0, 8*sizeof(float));
- uint16_t s16[2];
- const uint8_t * restrict scales = (const uint8_t *)s16;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- uint8_t * restrict a = aux8;
- for (int l = 0; l < 32; ++l) a[l+ 0] = q4[l] & 0xF;
- for (int l = 0; l < 32; ++l) a[l+32] = q4[l] >> 4;
- const uint16_t * restrict b = (const uint16_t *)x[i].scales;
- s16[0] = b[0] & 0x0f0f;
- s16[1] = (b[0] >> 4) & 0x0f0f;
- sumf -= y[i].d * ggml_fp16_to_fp32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d[0]);
- for (int j = 0; j < QK_K/32; ++j) {
- for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
- q8 += 16; a += 16;
- for (int l = 0; l < 16; ++l) aux16[l] += q8[l] * a[l];
- q8 += 16; a += 16;
- const float dl = d * scales[j];
- for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[l+8]);
- }
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #endif
- #if QK_K == 256
- void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- assert(n % QK_K == 0);
- const block_q5_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- static const uint32_t kmask1 = 0x3f3f3f3f;
- static const uint32_t kmask2 = 0x0f0f0f0f;
- static const uint32_t kmask3 = 0x03030303;
- uint32_t utmp[4];
- #ifdef __ARM_NEON
- const uint8x16_t m4b = vdupq_n_u8(0xf);
- const uint8x16_t mone = vdupq_n_u8(1);
- const uint8x16_t mtwo = vdupq_n_u8(2);
- #if defined(__ARM_FEATURE_DOTPROD)
- const int32x4_t mzero = vdupq_n_s32(0);
- #endif
- int8x16x4_t q5bytes;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const float dmin = y[i].d * ggml_fp16_to_fp32(x[i].dmin);
- const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8);
- const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8));
- const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
- vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
- int32_t sumi_mins = vaddvq_s32(prod);
- const uint8_t * scales = (const uint8_t *)utmp;
- const uint8_t * restrict q5 = x[i].qs;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- uint8x16x2_t qhbits = vld1q_u8_x2(qh);
- uint8x16x4_t q5h;
- int32_t sumi = 0;
- for (int j = 0; j < QK_K/64; ++j) {
- const uint8x16x2_t q5bits = vld1q_u8_x2(q5); q5 += 32;
- const int8x16x4_t q8bytes = vld1q_s8_x4(q8); q8 += 64;
- q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
- q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
- q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3);
- q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3);
- qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2);
- qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2);
- q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0]));
- q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1]));
- q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2]));
- q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3]));
- #if defined(__ARM_FEATURE_DOTPROD)
- sumi += vaddvq_s32(vdotq_s32(vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++;
- sumi += vaddvq_s32(vdotq_s32(vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++;
- #else
- const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
- vmull_s8(vget_high_s8(q5bytes.val[0]), vget_high_s8(q8bytes.val[0])));
- const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
- vmull_s8(vget_high_s8(q5bytes.val[1]), vget_high_s8(q8bytes.val[1])));
- sumi += vaddvq_s16(vaddq_s16(p0, p1)) * *scales++;
- const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
- vmull_s8(vget_high_s8(q5bytes.val[2]), vget_high_s8(q8bytes.val[2])));
- const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
- vmull_s8(vget_high_s8(q5bytes.val[3]), vget_high_s8(q8bytes.val[3])));
- sumi += vaddvq_s16(vaddq_s16(p2, p3)) * *scales++;
- #endif
- }
- sumf += d * sumi - dmin * sumi_mins;
- }
- *s = sumf;
- #elif defined __AVX2__
- const __m256i m4 = _mm256_set1_epi8(0xF);
- const __m128i mzero = _mm_setzero_si128();
- const __m256i mone = _mm256_set1_epi8(1);
- __m256 acc = _mm256_setzero_ps();
- float summs = 0.f;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q5 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- #if QK_K == 256
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- #else
- // TODO
- const float d = 0, dmin = 0;
- #endif
- const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
- const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
- const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
- const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
- const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
- summs += dmin * _mm_extract_epi32(hsum, 0);
- const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
- const __m256i scales = MM256_SET_M128I(sc128, sc128);
- const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh);
- __m256i hmask = mone;
- __m256i sumi = _mm256_setzero_si256();
- int bit = 0;
- for (int j = 0; j < QK_K/64; ++j) {
- const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
- const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
- const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32;
- const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
- const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
- const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0);
- hmask = _mm256_slli_epi16(hmask, 1);
- const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
- const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
- const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1);
- hmask = _mm256_slli_epi16(hmask, 1);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0);
- __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1);
- p16_0 = _mm256_madd_epi16(scale_0, p16_0);
- p16_1 = _mm256_madd_epi16(scale_1, p16_1);
- sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
- }
- __m256 vd = _mm256_set1_ps(d);
- acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
- }
- *s = hsum_float_8(acc) + summs;
- #elif defined __AVX__
- const __m128i m4 = _mm_set1_epi8(0xF);
- const __m128i mzero = _mm_setzero_si128();
- const __m128i mone = _mm_set1_epi8(1);
- const __m128i m2 = _mm_set1_epi8(2);
- __m256 acc = _mm256_setzero_ps();
- float summs = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);
- const uint8_t * restrict q5 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
- const __m128i scales = _mm_cvtepu8_epi16(utmps);
- const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
- const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
- const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
- const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
- const __m128i prod = _mm_madd_epi16(mins, q8s);
- const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
- summs += dmin * _mm_extract_epi32(hsum, 0);
- const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]);
- const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]);
- __m128i hmask = mone;
- __m128i sumi_0 = _mm_setzero_si128();
- __m128i sumi_1 = _mm_setzero_si128();
- int bit = 0;
- __m128i shuffle = _mm_set1_epi16(0x0100);
- for (int j = 0; j < QK_K/64; ++j) {
- const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi16(shuffle, m2);
- const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi16(shuffle, m2);
- const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
- const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
- __m128i q5l_0 = _mm_and_si128(q5bits_0, m4);
- __m128i q5l_1 = _mm_and_si128(q5bits_1, m4);
- __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
- __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
- __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0);
- __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1);
- hmask = _mm_slli_epi16(hmask, 1);
- __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0);
- __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1);
- p16_0 = _mm_madd_epi16(scale_0, p16_0);
- p16_1 = _mm_madd_epi16(scale_0, p16_1);
- q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4);
- q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4);
- q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
- q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
- q5_0 = _mm_add_epi8(q5l_0, q5h_0);
- q5_1 = _mm_add_epi8(q5l_1, q5h_1);
- hmask = _mm_slli_epi16(hmask, 1);
- q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0);
- __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1);
- p16_2 = _mm_madd_epi16(scale_1, p16_2);
- p16_3 = _mm_madd_epi16(scale_1, p16_3);
- sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
- sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
- }
- __m256 vd = _mm256_set1_ps(d);
- __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
- acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
- }
- *s = hsum_float_8(acc) + summs;
- #else
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].qs;
- const uint8_t * restrict hm = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * restrict a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = ggml_fp16_to_fp32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #else
- void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- assert(n % QK_K == 0);
- const block_q5_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- const uint8x16_t m4b = vdupq_n_u8(0xf);
- const uint8x16_t mh = vdupq_n_u8(16);
- #if defined(__ARM_FEATURE_DOTPROD)
- const int32x4_t mzero = vdupq_n_s32(0);
- #endif
- int8x16x4_t q5bytes;
- uint8x16x4_t q5h;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * (float)x[i].d;
- const int8_t * sc = x[i].scales;
- const uint8_t * restrict q5 = x[i].qs;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const uint8x8_t qhbits = vld1_u8(qh);
- const uint8x16x2_t q5bits = vld1q_u8_x2(q5);
- const int8x16x4_t q8bytes = vld1q_s8_x4(q8);
- const uint8x16_t htmp = vcombine_u8(qhbits, vshr_n_u8(qhbits, 1));
- q5h.val[0] = vbicq_u8(mh, vshlq_n_u8(htmp, 4));
- q5h.val[1] = vbicq_u8(mh, vshlq_n_u8(htmp, 2));
- q5h.val[2] = vbicq_u8(mh, htmp);
- q5h.val[3] = vbicq_u8(mh, vshrq_n_u8(htmp, 2));
- q5bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[0], m4b)), vreinterpretq_s8_u8(q5h.val[0]));
- q5bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[1], m4b)), vreinterpretq_s8_u8(q5h.val[1]));
- q5bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[0], 4)), vreinterpretq_s8_u8(q5h.val[2]));
- q5bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[1], 4)), vreinterpretq_s8_u8(q5h.val[3]));
- #if defined(__ARM_FEATURE_DOTPROD)
- int32_t sumi1 = sc[0] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]));
- int32_t sumi2 = sc[1] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[1], q8bytes.val[1]));
- int32_t sumi3 = sc[2] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]));
- int32_t sumi4 = sc[3] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[3], q8bytes.val[3]));
- sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
- #else
- const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
- vmull_s8(vget_high_s8(q5bytes.val[0]), vget_high_s8(q8bytes.val[0])));
- const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
- vmull_s8(vget_high_s8(q5bytes.val[1]), vget_high_s8(q8bytes.val[1])));
- int32_t sumi = sc[0] * vaddvq_s16(p0) + sc[1] * vaddvq_s16(p1);
- const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
- vmull_s8(vget_high_s8(q5bytes.val[2]), vget_high_s8(q8bytes.val[2])));
- const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
- vmull_s8(vget_high_s8(q5bytes.val[3]), vget_high_s8(q8bytes.val[3])));
- sumi += sc[2] * vaddvq_s16(p2) + sc[3] * vaddvq_s16(p3);
- sumf += d*sumi;
- #endif
- }
- *s = sumf;
- #elif defined __AVX2__
- const __m256i m4 = _mm256_set1_epi8(0xF);
- const __m256i mone = _mm256_set1_epi8(1);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q5 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
- const __m256i scale_l = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[1]), _mm_set1_epi16(x[i].scales[0]));
- const __m256i scale_h = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[3]), _mm_set1_epi16(x[i].scales[2]));
- int64_t aux64;
- memcpy(&aux64, x[i].qh, 8);
- const __m128i haux128 = _mm_set_epi64x(aux64 >> 1, aux64);
- const __m256i haux256 = MM256_SET_M128I(_mm_srli_epi16(haux128, 2), haux128);
- const __m256i q5h_0 = _mm256_slli_epi16(_mm256_andnot_si256(haux256, mone), 4);
- const __m256i q5h_1 = _mm256_slli_epi16(_mm256_andnot_si256(_mm256_srli_epi16(haux256, 4), mone), 4);
- const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
- const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- const __m256i p16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5l_0, q8_0));
- const __m256i p16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5l_1, q8_1));
- const __m256i s16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5h_0, q8_0));
- const __m256i s16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5h_1, q8_1));
- const __m256i dot = _mm256_sub_epi32(_mm256_add_epi32(p16_0, p16_1), _mm256_add_epi32(s16_0, s16_1));
- acc = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(dot), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __AVX__
- const __m128i m4 = _mm_set1_epi8(0xF);
- const __m128i mone = _mm_set1_epi8(1);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q5 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
- const __m128i scale_0 = _mm_set1_epi16(x[i].scales[0]);
- const __m128i scale_1 = _mm_set1_epi16(x[i].scales[1]);
- const __m128i scale_2 = _mm_set1_epi16(x[i].scales[2]);
- const __m128i scale_3 = _mm_set1_epi16(x[i].scales[3]);
- int64_t aux64;
- memcpy(&aux64, x[i].qh, 8);
- const __m128i haux128_0 = _mm_set_epi64x(aux64 >> 1, aux64);
- const __m128i haux128_1 = _mm_srli_epi16(haux128_0, 2);
- const __m128i q5h_0 = _mm_slli_epi16(_mm_andnot_si128(haux128_0, mone), 4);
- const __m128i q5h_1 = _mm_slli_epi16(_mm_andnot_si128(haux128_1, mone), 4);
- const __m128i q5h_2 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_0, 4), mone), 4);
- const __m128i q5h_3 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_1, 4), mone), 4);
- const __m128i q5l_0 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 0), m4);
- const __m128i q5l_1 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 1), m4);
- const __m128i q5l_2 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 0), 4), m4);
- const __m128i q5l_3 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 1), 4), m4);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- const __m128i p16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5l_0, _mm256_extractf128_si256(q8_0, 0)));
- const __m128i p16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5l_1, _mm256_extractf128_si256(q8_0, 1)));
- const __m128i p16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5l_2, _mm256_extractf128_si256(q8_1, 0)));
- const __m128i p16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5l_3, _mm256_extractf128_si256(q8_1, 1)));
- const __m128i s16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5h_0, _mm256_extractf128_si256(q8_0, 0)));
- const __m128i s16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5h_1, _mm256_extractf128_si256(q8_0, 1)));
- const __m128i s16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5h_2, _mm256_extractf128_si256(q8_1, 0)));
- const __m128i s16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5h_3, _mm256_extractf128_si256(q8_1, 1)));
- const __m128i dot_0 = _mm_sub_epi32(_mm_add_epi32(p16_0, p16_2), _mm_add_epi32(s16_0, s16_2));
- const __m128i dot_1 = _mm_sub_epi32(_mm_add_epi32(p16_1, p16_3), _mm_add_epi32(s16_1, s16_3));
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(dot_1, dot_0))), acc);
- }
- *s = hsum_float_8(acc);
- #else
- int8_t aux8[QK_K];
- int16_t aux16[16];
- float sums [8];
- memset(sums, 0, 8*sizeof(float));
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].qs;
- const uint8_t * restrict hm = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- int8_t * restrict a = aux8;
- for (int l = 0; l < 32; ++l) {
- a[l+ 0] = q4[l] & 0xF;
- a[l+32] = q4[l] >> 4;
- }
- for (int is = 0; is < 8; ++is) {
- uint8_t m = 1 << is;
- for (int l = 0; l < 8; ++l) a[8*is + l] -= (hm[l] & m ? 0 : 16);
- }
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const int8_t * restrict sc = x[i].scales;
- for (int j = 0; j < QK_K/16; ++j) {
- const float dl = d * sc[j];
- for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[8+l]);
- q8 += 16; a += 16;
- }
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #endif
- #if QK_K == 256
- void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- assert(n % QK_K == 0);
- const block_q6_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- float sum = 0;
- const uint8x16_t m4b = vdupq_n_u8(0xF);
- #if defined(__ARM_FEATURE_DOTPROD)
- const int32x4_t vzero = vdupq_n_s32(0);
- #endif
- //const int8x16_t m32s = vdupq_n_s8(32);
- const uint8x16_t mone = vdupq_n_u8(3);
- int8x16x4_t q6bytes;
- uint8x16x4_t q6h;
- for (int i = 0; i < nb; ++i) {
- const float d_all = ggml_fp16_to_fp32(x[i].d);
- const uint8_t * restrict q6 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const int8_t * restrict scale = x[i].scales;
- const int16x8x2_t q8sums = vld1q_s16_x2(y[i].bsums);
- const int8x16_t scales = vld1q_s8(scale);
- const int16x8x2_t q6scales = {vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))};
- const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])),
- vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))),
- vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])),
- vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1]))));
- int32_t isum_mins = vaddvq_s32(prod);
- int32_t isum = 0;
- for (int j = 0; j < QK_K/128; ++j) {
- uint8x16x2_t qhbits = vld1q_u8_x2(qh); qh += 32;
- uint8x16x4_t q6bits = vld1q_u8_x4(q6); q6 += 64;
- int8x16x4_t q8bytes = vld1q_s8_x4(q8); q8 += 64;
- q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
- q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
- uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2);
- q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- shifted = vshrq_n_u8(qhbits.val[1], 2);
- q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
- //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
- //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s);
- //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s);
- q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0]));
- q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1]));
- q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2]));
- q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3]));
- #if defined(__ARM_FEATURE_DOTPROD)
- isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
- vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
- vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
- vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
- scale += 4;
- #else
- int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
- vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0])));
- int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
- vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1])));
- isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1];
- scale += 2;
- int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
- vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2])));
- int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
- vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3])));
- isum += vaddvq_s16(p2) * scale[0] + vaddvq_s16(p3) * scale[1];
- scale += 2;
- #endif
- q8bytes = vld1q_s8_x4(q8); q8 += 64;
- shifted = vshrq_n_u8(qhbits.val[0], 4);
- q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- shifted = vshrq_n_u8(qhbits.val[1], 4);
- q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- shifted = vshrq_n_u8(qhbits.val[0], 6);
- q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- shifted = vshrq_n_u8(qhbits.val[1], 6);
- q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s);
- //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s);
- //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s);
- //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s);
- q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0]));
- q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1]));
- q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2]));
- q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3]));
- #if defined(__ARM_FEATURE_DOTPROD)
- isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
- vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
- vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
- vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
- scale += 4;
- //for (int l = 0; l < 4; ++l) {
- // const int32x4_t p = vdotq_s32(vzero, q6bytes.val[l], q8bytes.val[l]);
- // isum += vaddvq_s32(p) * *scale++;
- //}
- #else
- p0 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
- vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0])));
- p1 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
- vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1])));
- isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1];
- scale += 2;
- p2 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
- vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2])));
- p3 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
- vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3])));
- isum += vaddvq_s16(p2) * scale[0] + vaddvq_s16(p3) * scale[1];
- scale += 2;
- #endif
- }
- //sum += isum * d_all * y[i].d;
- sum += d_all * y[i].d * (isum - 32 * isum_mins);
- }
- *s = sum;
- #elif defined __AVX2__
- const __m256i m4 = _mm256_set1_epi8(0xF);
- const __m256i m2 = _mm256_set1_epi8(3);
- const __m256i m32s = _mm256_set1_epi8(32);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const uint8_t * restrict q4 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
- __m256i sumi = _mm256_setzero_si256();
- int is = 0;
- for (int j = 0; j < QK_K/128; ++j) {
- const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0));
- const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1));
- const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2));
- const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3));
- is += 4;
- const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
- const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
- const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32;
- const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4);
- const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4);
- const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4);
- const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4);
- const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
- const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1);
- const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2);
- const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
- __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
- __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
- __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2);
- __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3);
- __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
- __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
- __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2);
- __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3);
- p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
- p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
- p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
- p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
- p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
- p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2);
- p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3);
- sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
- sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3));
- }
- acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __AVX__
- const __m128i m4 = _mm_set1_epi8(0xF);
- const __m128i m3 = _mm_set1_epi8(3);
- const __m128i m32s = _mm_set1_epi8(32);
- const __m128i m2 = _mm_set1_epi8(2);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const uint8_t * restrict q4 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
- __m128i sumi_0 = _mm_setzero_si128();
- __m128i sumi_1 = _mm_setzero_si128();
- __m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
- for (int j = 0; j < QK_K/128; ++j) {
- const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
- const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
- const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4);
- const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4);
- const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4);
- const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4);
- const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4);
- const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4);
- const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4);
- const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4);
- const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
- const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
- const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
- const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
- const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0);
- const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1);
- const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2);
- const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3);
- const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4);
- const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5);
- const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6);
- const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7);
- const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
- __m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0);
- __m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1);
- __m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2);
- __m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3);
- __m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4);
- __m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5);
- __m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6);
- __m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7);
- __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0);
- __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1);
- __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2);
- __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3);
- __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4);
- __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5);
- __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6);
- __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7);
- p16_0 = _mm_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm_sub_epi16(p16_1, q8s_1);
- p16_2 = _mm_sub_epi16(p16_2, q8s_2);
- p16_3 = _mm_sub_epi16(p16_3, q8s_3);
- p16_4 = _mm_sub_epi16(p16_4, q8s_4);
- p16_5 = _mm_sub_epi16(p16_5, q8s_5);
- p16_6 = _mm_sub_epi16(p16_6, q8s_6);
- p16_7 = _mm_sub_epi16(p16_7, q8s_7);
- const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi8(shuffle, m2);
- const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi8(shuffle, m2);
- const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi8(shuffle, m2);
- const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle);
- shuffle = _mm_add_epi8(shuffle, m2);
- p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
- p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
- p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
- p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
- p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4);
- p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5);
- p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6);
- p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7);
- sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
- sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
- sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6));
- sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7));
- }
- __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
- }
- *s = hsum_float_8(acc);
- #else
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * restrict a = aux8;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- }
- a += 128;
- q4 += 64;
- qh += 32;
- }
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- int scale = x[i].scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #else
- void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- assert(n % QK_K == 0);
- const block_q6_K * restrict x = vx;
- const block_q8_K * restrict y = vy;
- const int nb = n / QK_K;
- #ifdef __ARM_NEON
- float sum = 0;
- const uint8x16_t m4b = vdupq_n_u8(0xF);
- const int8x16_t m32s = vdupq_n_s8(32);
- #if defined(__ARM_FEATURE_DOTPROD)
- const int32x4_t vzero = vdupq_n_s32(0);
- #endif
- const uint8x16_t mone = vdupq_n_u8(3);
- int8x16x4_t q6bytes;
- uint8x16x4_t q6h;
- for (int i = 0; i < nb; ++i) {
- const float d_all = (float)x[i].d;
- const uint8_t * restrict q6 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const int8_t * restrict scale = x[i].scales;
- int32_t isum = 0;
- uint8x16_t qhbits = vld1q_u8(qh);
- uint8x16x2_t q6bits = vld1q_u8_x2(q6);
- int8x16x4_t q8bytes = vld1q_s8_x4(q8);
- q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits), 4);
- uint8x16_t shifted = vshrq_n_u8(qhbits, 2);
- q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- shifted = vshrq_n_u8(qhbits, 4);
- q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- shifted = vshrq_n_u8(qhbits, 6);
- q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
- q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
- q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
- q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[2])), m32s);
- q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[3])), m32s);
- #if defined(__ARM_FEATURE_DOTPROD)
- isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
- vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
- vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
- vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
- #else
- int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
- vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0])));
- int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
- vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1])));
- isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1];
- int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
- vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2])));
- int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
- vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3])));
- isum += vaddvq_s16(p2) * scale[2] + vaddvq_s16(p3) * scale[3];
- #endif
- sum += isum * d_all * y[i].d;
- }
- *s = sum;
- #elif defined __AVX2__
- const __m256i m4 = _mm256_set1_epi8(0xF);
- const __m256i m2 = _mm256_set1_epi8(3);
- const __m256i m32s = _mm256_set1_epi8(32);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const uint8_t * restrict q4 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
- const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
- const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
- const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
- __m256i sumi = _mm256_setzero_si256();
- const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
- const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
- const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
- const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
- const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4);
- const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4);
- const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
- const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_1);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
- __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
- __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
- __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
- p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
- p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
- p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
- sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
- acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
- }
- *s = hsum_float_8(acc);
- #elif defined __AVX__
- const __m128i m4 = _mm_set1_epi8(0xF);
- const __m128i m2 = _mm_set1_epi8(3);
- const __m128i m32s = _mm_set1_epi8(32);
- __m256 acc = _mm256_setzero_ps();
- for (int i = 0; i < nb; ++i) {
- const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
- const uint8_t * restrict q4 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
- const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
- const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
- const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
- __m128i sumi_0 = _mm_setzero_si128();
- __m128i sumi_1 = _mm_setzero_si128();
- const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
- const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
- const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
- const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
- const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH, m2), 4);
- const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 2), m2), 4);
- const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 4), m2), 4);
- const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 6), m2), 4);
- const __m128i q4_0 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 0), m4), q4h_0);
- const __m128i q4_1 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 1), m4), q4h_1);
- const __m128i q4_2 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 0), 4), m4), q4h_2);
- const __m128i q4_3 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 1), 4), m4), q4h_3);
- const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
- const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
- __m128i q8s_0 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 0));
- __m128i q8s_1 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 1));
- __m128i q8s_2 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 0));
- __m128i q8s_3 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 1));
- __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
- __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
- __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
- __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
- p16_0 = _mm_sub_epi16(p16_0, q8s_0);
- p16_1 = _mm_sub_epi16(p16_1, q8s_1);
- p16_2 = _mm_sub_epi16(p16_2, q8s_2);
- p16_3 = _mm_sub_epi16(p16_3, q8s_3);
- p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
- p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
- p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
- p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
- sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
- sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
- acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi_1, sumi_0))), acc);
- }
- *s = hsum_float_8(acc);
- #else
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * restrict q4 = x[i].ql;
- const uint8_t * restrict qh = x[i].qh;
- const int8_t * restrict q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * restrict a = aux8;
- for (int l = 0; l < 16; ++l) {
- a[l+ 0] = (int8_t)((q4[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- a[l+16] = (int8_t)((q4[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- a[l+32] = (int8_t)((q4[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- a[l+48] = (int8_t)((q4[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- }
- int is = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- int scale = x[i].scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
- #endif
- }
- #endif
|