ggml.c 387 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257
  1. // Defines CLOCK_MONOTONIC on Linux
  2. #define _GNU_SOURCE
  3. #include "ggml.h"
  4. #if defined(_MSC_VER) || defined(__MINGW32__)
  5. #include <malloc.h> // using malloc.h with MSC/MINGW
  6. #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
  7. #include <alloca.h>
  8. #endif
  9. #include <assert.h>
  10. #include <errno.h>
  11. #include <time.h>
  12. #include <math.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdint.h>
  16. #include <inttypes.h>
  17. #include <stdio.h>
  18. #include <float.h>
  19. // if C99 - static_assert is noop
  20. // ref: https://stackoverflow.com/a/53923785/4039976
  21. #ifndef static_assert
  22. #define static_assert(cond, msg) struct global_scope_noop_trick
  23. #endif
  24. #if defined(_WIN32)
  25. #include <windows.h>
  26. typedef volatile LONG atomic_int;
  27. typedef atomic_int atomic_bool;
  28. static void atomic_store(atomic_int* ptr, LONG val) {
  29. InterlockedExchange(ptr, val);
  30. }
  31. static LONG atomic_load(atomic_int* ptr) {
  32. return InterlockedCompareExchange(ptr, 0, 0);
  33. }
  34. static LONG atomic_fetch_add(atomic_int* ptr, LONG inc) {
  35. return InterlockedExchangeAdd(ptr, inc);
  36. }
  37. static LONG atomic_fetch_sub(atomic_int* ptr, LONG dec) {
  38. return atomic_fetch_add(ptr, -(dec));
  39. }
  40. typedef HANDLE pthread_t;
  41. typedef DWORD thread_ret_t;
  42. static int pthread_create(pthread_t* out, void* unused, thread_ret_t(*func)(void*), void* arg) {
  43. (void) unused;
  44. HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
  45. if (handle == NULL)
  46. {
  47. return EAGAIN;
  48. }
  49. *out = handle;
  50. return 0;
  51. }
  52. static int pthread_join(pthread_t thread, void* unused) {
  53. (void) unused;
  54. return (int) WaitForSingleObject(thread, INFINITE);
  55. }
  56. static int sched_yield (void) {
  57. Sleep (0);
  58. return 0;
  59. }
  60. #else
  61. #include <pthread.h>
  62. #include <stdatomic.h>
  63. typedef void* thread_ret_t;
  64. #endif
  65. // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
  66. #if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
  67. #ifndef __FMA__
  68. #define __FMA__
  69. #endif
  70. #ifndef __F16C__
  71. #define __F16C__
  72. #endif
  73. #ifndef __SSE3__
  74. #define __SSE3__
  75. #endif
  76. #endif
  77. #ifdef __HAIKU__
  78. #define static_assert(cond, msg) _Static_assert(cond, msg)
  79. #endif
  80. /*#define GGML_PERF*/
  81. #define GGML_DEBUG 0
  82. #define GGML_GELU_FP16
  83. #define GGML_SILU_FP16
  84. #define GGML_SOFT_MAX_UNROLL 4
  85. #define GGML_VEC_DOT_UNROLL 2
  86. #ifdef GGML_USE_ACCELERATE
  87. // uncomment to use vDSP for soft max computation
  88. // note: not sure if it is actually faster
  89. //#define GGML_SOFT_MAX_ACCELERATE
  90. #endif
  91. #if UINTPTR_MAX == 0xFFFFFFFF
  92. #define GGML_MEM_ALIGN 4
  93. #else
  94. #define GGML_MEM_ALIGN 16
  95. #endif
  96. #if defined(_MSC_VER) || defined(__MINGW32__)
  97. #define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
  98. #define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
  99. #else
  100. inline static void* ggml_aligned_malloc(size_t size) {
  101. void* aligned_memory = NULL;
  102. int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
  103. if (result != 0) {
  104. // Handle allocation failure
  105. return NULL;
  106. }
  107. return aligned_memory;
  108. }
  109. #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
  110. #define GGML_ALIGNED_FREE(ptr) free(ptr)
  111. #endif
  112. #define UNUSED(x) (void)(x)
  113. #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
  114. #define GGML_ASSERT(x) \
  115. do { \
  116. if (!(x)) { \
  117. fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
  118. abort(); \
  119. } \
  120. } while (0)
  121. #ifdef GGML_USE_ACCELERATE
  122. #include <Accelerate/Accelerate.h>
  123. #elif GGML_USE_OPENBLAS
  124. #include <cblas.h>
  125. #endif
  126. #undef MIN
  127. #undef MAX
  128. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  129. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  130. // floating point type used to accumulate sums
  131. typedef double ggml_float;
  132. // 16-bit float
  133. // on Arm, we use __fp16
  134. // on x86, we use uint16_t
  135. #ifdef __ARM_NEON
  136. // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
  137. //
  138. // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
  139. //
  140. #include <arm_neon.h>
  141. #define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
  142. #define GGML_COMPUTE_FP32_TO_FP16(x) (x)
  143. #define GGML_FP16_TO_FP32(x) ((float) (x))
  144. #define GGML_FP32_TO_FP16(x) (x)
  145. #else
  146. #ifdef __wasm_simd128__
  147. #include <wasm_simd128.h>
  148. #else
  149. #ifdef __POWER9_VECTOR__
  150. #include <altivec.h>
  151. #undef bool
  152. #define bool _Bool
  153. #else
  154. #include <immintrin.h>
  155. #endif
  156. #endif
  157. #ifdef __F16C__
  158. #ifdef _MSC_VER
  159. #define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
  160. #define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
  161. #else
  162. #define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
  163. #define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
  164. #endif
  165. #elif defined(__POWER9_VECTOR__)
  166. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  167. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  168. /* the inline asm below is about 12% faster than the lookup method */
  169. #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
  170. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  171. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  172. register float f;
  173. register double d;
  174. __asm__(
  175. "mtfprd %0,%2\n"
  176. "xscvhpdp %0,%0\n"
  177. "frsp %1,%0\n" :
  178. /* temp */ "=d"(d),
  179. /* out */ "=f"(f):
  180. /* in */ "r"(h));
  181. return f;
  182. }
  183. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  184. register double d;
  185. register ggml_fp16_t r;
  186. __asm__( /* xscvdphp can work on double or single precision */
  187. "xscvdphp %0,%2\n"
  188. "mffprd %1,%0\n" :
  189. /* temp */ "=d"(d),
  190. /* out */ "=r"(r):
  191. /* in */ "f"(f));
  192. return r;
  193. }
  194. #else
  195. // FP16 <-> FP32
  196. // ref: https://github.com/Maratyszcza/FP16
  197. static inline float fp32_from_bits(uint32_t w) {
  198. union {
  199. uint32_t as_bits;
  200. float as_value;
  201. } fp32;
  202. fp32.as_bits = w;
  203. return fp32.as_value;
  204. }
  205. static inline uint32_t fp32_to_bits(float f) {
  206. union {
  207. float as_value;
  208. uint32_t as_bits;
  209. } fp32;
  210. fp32.as_value = f;
  211. return fp32.as_bits;
  212. }
  213. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  214. const uint32_t w = (uint32_t) h << 16;
  215. const uint32_t sign = w & UINT32_C(0x80000000);
  216. const uint32_t two_w = w + w;
  217. const uint32_t exp_offset = UINT32_C(0xE0) << 23;
  218. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  219. const float exp_scale = 0x1.0p-112f;
  220. #else
  221. const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
  222. #endif
  223. const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
  224. const uint32_t magic_mask = UINT32_C(126) << 23;
  225. const float magic_bias = 0.5f;
  226. const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
  227. const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
  228. const uint32_t result = sign |
  229. (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
  230. return fp32_from_bits(result);
  231. }
  232. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  233. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  234. const float scale_to_inf = 0x1.0p+112f;
  235. const float scale_to_zero = 0x1.0p-110f;
  236. #else
  237. const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
  238. const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
  239. #endif
  240. float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
  241. const uint32_t w = fp32_to_bits(f);
  242. const uint32_t shl1_w = w + w;
  243. const uint32_t sign = w & UINT32_C(0x80000000);
  244. uint32_t bias = shl1_w & UINT32_C(0xFF000000);
  245. if (bias < UINT32_C(0x71000000)) {
  246. bias = UINT32_C(0x71000000);
  247. }
  248. base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
  249. const uint32_t bits = fp32_to_bits(base);
  250. const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
  251. const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
  252. const uint32_t nonsign = exp_bits + mantissa_bits;
  253. return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
  254. }
  255. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  256. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  257. #endif // __F16C__
  258. #endif // __ARM_NEON
  259. //
  260. // global data
  261. //
  262. // precomputed gelu table for f16 (128 KB)
  263. static ggml_fp16_t table_gelu_f16[1 << 16];
  264. // precomputed silu table for f16 (128 KB)
  265. static ggml_fp16_t table_silu_f16[1 << 16];
  266. // precomputed exp table for f16 (128 KB)
  267. static ggml_fp16_t table_exp_f16[1 << 16];
  268. // precomputed f32 table for f16 (256 KB)
  269. static float table_f32_f16[1 << 16];
  270. // On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
  271. // so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
  272. // This is also true for POWER9.
  273. #if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16)
  274. inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
  275. uint16_t s;
  276. memcpy(&s, &f, sizeof(uint16_t));
  277. return table_f32_f16[s];
  278. }
  279. #define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
  280. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  281. #endif
  282. // note: do not use these inside ggml.c
  283. // these are meant to be used via the ggml.h API
  284. float ggml_fp16_to_fp32(ggml_fp16_t x) {
  285. return (float) GGML_FP16_TO_FP32(x);
  286. }
  287. ggml_fp16_t ggml_fp32_to_fp16(float x) {
  288. return GGML_FP32_TO_FP16(x);
  289. }
  290. //
  291. // timing
  292. //
  293. #if defined(_MSC_VER) || defined(__MINGW32__)
  294. static int64_t timer_freq;
  295. void ggml_time_init(void) {
  296. LARGE_INTEGER frequency;
  297. QueryPerformanceFrequency(&frequency);
  298. timer_freq = frequency.QuadPart;
  299. }
  300. int64_t ggml_time_ms(void) {
  301. LARGE_INTEGER t;
  302. QueryPerformanceCounter(&t);
  303. return (t.QuadPart * 1000) / timer_freq;
  304. }
  305. int64_t ggml_time_us(void) {
  306. LARGE_INTEGER t;
  307. QueryPerformanceCounter(&t);
  308. return (t.QuadPart * 1000000) / timer_freq;
  309. }
  310. #else
  311. void ggml_time_init(void) {}
  312. int64_t ggml_time_ms(void) {
  313. struct timespec ts;
  314. clock_gettime(CLOCK_MONOTONIC, &ts);
  315. return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000;
  316. }
  317. int64_t ggml_time_us(void) {
  318. struct timespec ts;
  319. clock_gettime(CLOCK_MONOTONIC, &ts);
  320. return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000;
  321. }
  322. #endif
  323. int64_t ggml_cycles(void) {
  324. return clock();
  325. }
  326. int64_t ggml_cycles_per_ms(void) {
  327. return CLOCKS_PER_SEC/1000;
  328. }
  329. #ifdef GGML_PERF
  330. #define ggml_perf_time_ms() ggml_time_ms()
  331. #define ggml_perf_time_us() ggml_time_us()
  332. #define ggml_perf_cycles() ggml_cycles()
  333. #define ggml_perf_cycles_per_ms() ggml_cycles_per_ms()
  334. #else
  335. #define ggml_perf_time_ms() 0
  336. #define ggml_perf_time_us() 0
  337. #define ggml_perf_cycles() 0
  338. #define ggml_perf_cycles_per_ms() 0
  339. #endif
  340. //
  341. // cache line
  342. //
  343. #if defined(__cpp_lib_hardware_interference_size)
  344. #define CACHE_LINE_SIZE hardware_destructive_interference_size
  345. #else
  346. #if defined(__POWER9_VECTOR__)
  347. #define CACHE_LINE_SIZE 128
  348. #else
  349. #define CACHE_LINE_SIZE 64
  350. #endif
  351. #endif
  352. static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
  353. //
  354. // quantization
  355. //
  356. // AVX routines provided by GH user Const-me
  357. // ref: https://github.com/ggerganov/ggml/pull/27#issuecomment-1464934600
  358. #if __AVX2__ || __AVX512F__
  359. // Unpack 32 4-bit fields into 32 bytes
  360. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  361. static inline __m256i bytesFromNibbles( const uint8_t* rsi )
  362. {
  363. // Load 16 bytes from memory
  364. __m128i tmp = _mm_loadu_si128( ( const __m128i* )rsi );
  365. // Expand bytes into uint16_t values
  366. __m256i bytes = _mm256_cvtepu8_epi16( tmp );
  367. // Unpack values into individual bytes
  368. const __m256i lowMask = _mm256_set1_epi8( 0xF );
  369. __m256i high = _mm256_andnot_si256( lowMask, bytes );
  370. __m256i low = _mm256_and_si256( lowMask, bytes );
  371. high = _mm256_slli_epi16( high, 4 );
  372. bytes = _mm256_or_si256( low, high );
  373. return bytes;
  374. }
  375. static inline __m128i packNibbles( __m256i bytes )
  376. {
  377. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  378. const __m256i lowByte = _mm256_set1_epi16( 0xFF );
  379. __m256i high = _mm256_andnot_si256( lowByte, bytes );
  380. __m256i low = _mm256_and_si256( lowByte, bytes );
  381. high = _mm256_srli_epi16( high, 4 );
  382. bytes = _mm256_or_si256( low, high );
  383. // Compress uint16_t lanes into bytes
  384. __m128i r0 = _mm256_castsi256_si128( bytes );
  385. __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
  386. return _mm_packus_epi16( r0, r1 );
  387. }
  388. #elif __AVX__
  389. static inline __m128i bytesFromNibbles( const uint8_t* rsi )
  390. {
  391. // Load 8 bytes from memory
  392. __m128i tmp = _mm_loadu_si64( ( const __m128i* )rsi );
  393. // Expand bytes into uint16_t values
  394. __m128i bytes = _mm_cvtepu8_epi16( tmp );
  395. // Unpack values into individual bytes
  396. const __m128i lowMask = _mm_set1_epi8( 0xF );
  397. __m128i high = _mm_andnot_si128( lowMask, bytes );
  398. __m128i low = _mm_and_si128( lowMask, bytes );
  399. high = _mm_slli_epi16( high, 4 );
  400. bytes = _mm_or_si128( low, high );
  401. return bytes;
  402. }
  403. static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
  404. {
  405. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  406. const __m128i lowByte = _mm_set1_epi16( 0xFF );
  407. __m128i high = _mm_andnot_si128( lowByte, bytes1 );
  408. __m128i low = _mm_and_si128( lowByte, bytes1 );
  409. high = _mm_srli_epi16( high, 4 );
  410. bytes1 = _mm_or_si128( low, high );
  411. high = _mm_andnot_si128( lowByte, bytes2 );
  412. low = _mm_and_si128( lowByte, bytes2 );
  413. high = _mm_srli_epi16( high, 4 );
  414. bytes2 = _mm_or_si128( low, high );
  415. return _mm_packus_epi16( bytes1, bytes2);
  416. }
  417. #endif
  418. #if __ARM_NEON
  419. #if !defined(__aarch64__)
  420. inline static uint16_t vaddvq_u8(uint8x16_t v) {
  421. return
  422. (uint16_t)vgetq_lane_u8(v, 0) + (uint16_t)vgetq_lane_u8(v, 1) +
  423. (uint16_t)vgetq_lane_u8(v, 2) + (uint16_t)vgetq_lane_u8(v, 3) +
  424. (uint16_t)vgetq_lane_u8(v, 4) + (uint16_t)vgetq_lane_u8(v, 5) +
  425. (uint16_t)vgetq_lane_u8(v, 6) + (uint16_t)vgetq_lane_u8(v, 7) +
  426. (uint16_t)vgetq_lane_u8(v, 8) + (uint16_t)vgetq_lane_u8(v, 9) +
  427. (uint16_t)vgetq_lane_u8(v, 10) + (uint16_t)vgetq_lane_u8(v, 11) +
  428. (uint16_t)vgetq_lane_u8(v, 12) + (uint16_t)vgetq_lane_u8(v, 13) +
  429. (uint16_t)vgetq_lane_u8(v, 14) + (uint16_t)vgetq_lane_u8(v, 15);
  430. }
  431. inline static int32_t vaddvq_s16(int16x8_t v) {
  432. return
  433. (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) +
  434. (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) +
  435. (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) +
  436. (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7);
  437. }
  438. inline static uint32_t vaddvq_u16(uint16x8_t v) {
  439. return
  440. (uint32_t)vgetq_lane_u16(v, 0) + (uint32_t)vgetq_lane_u16(v, 1) +
  441. (uint32_t)vgetq_lane_u16(v, 2) + (uint32_t)vgetq_lane_u16(v, 3) +
  442. (uint32_t)vgetq_lane_u16(v, 4) + (uint32_t)vgetq_lane_u16(v, 5) +
  443. (uint32_t)vgetq_lane_u16(v, 6) + (uint32_t)vgetq_lane_u16(v, 7);
  444. }
  445. inline static int32_t vaddvq_s32(int32x4_t v) {
  446. return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
  447. }
  448. inline static float vaddvq_f32(float32x4_t v) {
  449. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  450. }
  451. float vminvq_f32(float32x4_t v) {
  452. return
  453. MIN(MIN(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
  454. MIN(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
  455. }
  456. float vmaxvq_f32(float32x4_t v) {
  457. return
  458. MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
  459. MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
  460. }
  461. int8x8_t vzip1_s8(int8x8_t a, int8x8_t b) {
  462. return vget_low_s8(vcombine_s8(a, b));
  463. }
  464. int8x8_t vzip2_s8(int8x8_t a, int8x8_t b) {
  465. return vget_high_s8(vcombine_s8(a, b));
  466. }
  467. uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) {
  468. return vget_low_u8(vcombine_u8(a, b));
  469. }
  470. uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) {
  471. return vget_high_u8(vcombine_u8(a, b));
  472. }
  473. #endif
  474. #endif
  475. #define QK4_0 32
  476. typedef struct {
  477. float d; // delta
  478. uint8_t qs[QK4_0 / 2]; // nibbles / quants
  479. } block_q4_0;
  480. static_assert(sizeof(block_q4_0) == sizeof(float) + QK4_0 / 2, "wrong q4_0 block size/padding");
  481. #define QK4_1 32
  482. typedef struct {
  483. float d; // delta
  484. float m; // min
  485. uint8_t qs[QK4_1 / 2]; // nibbles / quants
  486. } block_q4_1;
  487. static_assert(sizeof(block_q4_1) == sizeof(float) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding");
  488. #define QK4_2 16
  489. typedef struct {
  490. ggml_fp16_t d; // delta
  491. uint8_t qs[QK4_2 / 2]; // nibbles / quants
  492. } block_q4_2;
  493. static_assert(sizeof(block_q4_2) == sizeof(ggml_fp16_t) + QK4_2 / 2, "wrong q4_2 block size/padding");
  494. #define QK8_0 32
  495. typedef struct {
  496. float d; // delta
  497. int8_t qs[QK8_0]; // quants
  498. } block_q8_0;
  499. static_assert(sizeof(block_q8_0) == sizeof(float) + QK8_0, "wrong q8_0 block size/padding");
  500. // reference implementation for deterministic creation of model files
  501. static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
  502. assert(k % QK4_0 == 0);
  503. const int nb = k / QK4_0;
  504. uint8_t pp[QK4_0/2];
  505. for (int i = 0; i < nb; i++) {
  506. float amax = 0.0f; // absolute max
  507. for (int l = 0; l < QK4_0; l++) {
  508. const float v = x[i*QK4_0 + l];
  509. amax = MAX(amax, fabsf(v));
  510. }
  511. const float d = amax / ((1 << 3) - 1);
  512. const float id = d ? 1.0f/d : 0.0f;
  513. y[i].d = d;
  514. for (int l = 0; l < QK4_0; l += 2) {
  515. const float v0 = x[i*QK4_0 + l + 0]*id;
  516. const float v1 = x[i*QK4_0 + l + 1]*id;
  517. const uint8_t vi0 = (int8_t)roundf(v0) + 8;
  518. const uint8_t vi1 = (int8_t)roundf(v1) + 8;
  519. assert(vi0 < 16);
  520. assert(vi1 < 16);
  521. pp[l/2] = vi0 | (vi1 << 4);
  522. }
  523. memcpy(y[i].qs, pp, sizeof(pp));
  524. }
  525. }
  526. static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int k) {
  527. assert(k % QK4_0 == 0);
  528. const int nb = k / QK4_0;
  529. block_q4_0 * restrict y = vy;
  530. #if defined(__POWER9_VECTOR__)
  531. const vector float v85 = vec_splats(8.5f);
  532. for (int i = 0; i < nb; i++) {
  533. float amax = 0.0f; // absolute max
  534. vector float srcv [8];
  535. vector float asrcv[8];
  536. vector float amaxv[8];
  537. for (int l = 0; l < 8; l++) srcv[l] = *(vector float *)(x + i*32 + 4*l);
  538. for (int l = 0; l < 8; l++) asrcv[l] = vec_abs(srcv[l]);
  539. for (int l = 0; l < 4; l++) amaxv[2*l] = vec_max(asrcv[2*l], asrcv[2*l+1]);
  540. //for (int l = 0; l < 2; l++) amaxv[4*l] = vec_max(amaxv[4*l], amaxv[4*l+2]);
  541. amaxv[0] = vec_max(amaxv[0], amaxv[2]);
  542. amaxv[4] = vec_max(amaxv[4], amaxv[6]);
  543. //for (int l = 0; l < 1; l++) amaxv[8*l] = vec_max(amaxv[8*l], amaxv[8*l+4]);
  544. amaxv[0] = vec_max(amaxv[0], amaxv[4]);
  545. amax = MAX(
  546. MAX(vec_extract(amaxv[0], 0), vec_extract(amaxv[0], 1)),
  547. MAX(vec_extract(amaxv[0], 2), vec_extract(amaxv[0], 3)));
  548. const float d = amax / ((1 << 3) - 1);
  549. const float id = d ? 1.0/d : 0.0;
  550. y[i].d = d;
  551. const vector float vid = vec_splats(id);
  552. uint8_t * restrict pb = y[i].qs;
  553. for (int l = 0; l < 8; l++) {
  554. const vector float vf = vec_madd(srcv[l], vid, v85);
  555. const vector signed int vi = vec_signed(vf);
  556. pb[2*l + 0] = vec_extract(vi, 0) | (vec_extract(vi, 1) << 4);
  557. pb[2*l + 1] = vec_extract(vi, 2) | (vec_extract(vi, 3) << 4);
  558. }
  559. }
  560. #elif __ARM_NEON
  561. for (int i = 0; i < nb; i++) {
  562. float32x4_t srcv [8];
  563. float32x4_t asrcv[8];
  564. float32x4_t amaxv[8];
  565. for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(x + i*32 + 4*l);
  566. for (int l = 0; l < 8; l++) asrcv[l] = vabsq_f32(srcv[l]);
  567. for (int l = 0; l < 4; l++) amaxv[2*l] = vmaxq_f32(asrcv[2*l], asrcv[2*l+1]);
  568. for (int l = 0; l < 2; l++) amaxv[4*l] = vmaxq_f32(amaxv[4*l], amaxv[4*l+2]);
  569. for (int l = 0; l < 1; l++) amaxv[8*l] = vmaxq_f32(amaxv[8*l], amaxv[8*l+4]);
  570. const float amax = vmaxvq_f32(amaxv[0]);
  571. const float d = amax / ((1 << 3) - 1);
  572. const float id = d ? 1.0f/d : 0.0f;
  573. y[i].d = d;
  574. for (int l = 0; l < 8; l++) {
  575. const float32x4_t v = vmulq_n_f32(srcv[l], id);
  576. const float32x4_t vf = vaddq_f32(v, vdupq_n_f32(8.5f));
  577. const int32x4_t vi = vcvtq_s32_f32(vf);
  578. y[i].qs[2*l + 0] = vgetq_lane_s32(vi, 0) | (vgetq_lane_s32(vi, 1) << 4);
  579. y[i].qs[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4);
  580. }
  581. }
  582. #elif defined(__AVX2__)
  583. for (int i = 0; i < nb; i++) {
  584. // Load elements into 4 AVX vectors
  585. __m256 v0 = _mm256_loadu_ps( x );
  586. __m256 v1 = _mm256_loadu_ps( x + 8 );
  587. __m256 v2 = _mm256_loadu_ps( x + 16 );
  588. __m256 v3 = _mm256_loadu_ps( x + 24 );
  589. x += 32;
  590. // Compute max(abs(e)) for the block
  591. const __m256 signBit = _mm256_set1_ps( -0.0f );
  592. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  593. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  594. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  595. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  596. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  597. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  598. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  599. const float maxScalar = _mm_cvtss_f32( max4 );
  600. // Quantize these floats
  601. const float d = maxScalar / 7.0f;
  602. y[i].d = d;
  603. const float id = ( maxScalar != 0.0f ) ? 7.0f / maxScalar : 0.0f;
  604. const __m256 mul = _mm256_set1_ps( id );
  605. // Apply the multiplier
  606. v0 = _mm256_mul_ps( v0, mul );
  607. v1 = _mm256_mul_ps( v1, mul );
  608. v2 = _mm256_mul_ps( v2, mul );
  609. v3 = _mm256_mul_ps( v3, mul );
  610. // Round to nearest integer
  611. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  612. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  613. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  614. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  615. // Convert floats to integers
  616. __m256i i0 = _mm256_cvtps_epi32( v0 );
  617. __m256i i1 = _mm256_cvtps_epi32( v1 );
  618. __m256i i2 = _mm256_cvtps_epi32( v2 );
  619. __m256i i3 = _mm256_cvtps_epi32( v3 );
  620. // Convert int32 to int16
  621. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  622. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  623. // Convert int16 to int8
  624. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  625. // We got our precious signed bytes, but the order is now wrong
  626. // These AVX2 pack instructions process 16-byte pieces independently
  627. // The following instruction is fixing the order
  628. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  629. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  630. // Apply offset to translate the range from [ -7 .. +7 ] into [ +1 .. +15 ]
  631. const __m256i off = _mm256_set1_epi8( 8 );
  632. i0 = _mm256_add_epi8( i0, off );
  633. // Compress the vector into 4 bit/value, and store
  634. __m128i res = packNibbles( i0 );
  635. _mm_storeu_si128( ( __m128i* )y[i].qs, res );
  636. }
  637. #elif defined(__AVX__)
  638. for (int i = 0; i < nb; i++) {
  639. // Load elements into 4 AVX vectors
  640. __m256 v0 = _mm256_loadu_ps( x );
  641. __m256 v1 = _mm256_loadu_ps( x + 8 );
  642. __m256 v2 = _mm256_loadu_ps( x + 16 );
  643. __m256 v3 = _mm256_loadu_ps( x + 24 );
  644. x += 32;
  645. // Compute max(abs(e)) for the block
  646. const __m256 signBit = _mm256_set1_ps( -0.0f );
  647. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  648. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  649. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  650. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  651. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  652. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  653. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  654. const float maxScalar = _mm_cvtss_f32( max4 );
  655. // Quantize these floats
  656. const float d = maxScalar / 7.0f;
  657. y[i].d = d;
  658. const float id = ( maxScalar != 0.0f ) ? 7.0f / maxScalar : 0.0f;
  659. const __m256 mul = _mm256_set1_ps( id );
  660. // Apply the multiplier
  661. v0 = _mm256_mul_ps( v0, mul );
  662. v1 = _mm256_mul_ps( v1, mul );
  663. v2 = _mm256_mul_ps( v2, mul );
  664. v3 = _mm256_mul_ps( v3, mul );
  665. // Round to nearest integer
  666. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  667. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  668. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  669. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  670. // Convert floats to integers
  671. __m256i i0 = _mm256_cvtps_epi32( v0 );
  672. __m256i i1 = _mm256_cvtps_epi32( v1 );
  673. __m256i i2 = _mm256_cvtps_epi32( v2 );
  674. __m256i i3 = _mm256_cvtps_epi32( v3 );
  675. // Since we don't have in AVX some necessary functions,
  676. // we split the registers in half and call AVX2 analogs from SSE
  677. __m128i ni0 = _mm256_castsi256_si128( i0 );
  678. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  679. __m128i ni2 = _mm256_castsi256_si128( i1 );
  680. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  681. __m128i ni4 = _mm256_castsi256_si128( i2 );
  682. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  683. __m128i ni6 = _mm256_castsi256_si128( i3 );
  684. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  685. // Convert int32 to int16
  686. ni0 = _mm_packs_epi32( ni0, ni1 );
  687. ni2 = _mm_packs_epi32( ni2, ni3 );
  688. ni4 = _mm_packs_epi32( ni4, ni5 );
  689. ni6 = _mm_packs_epi32( ni6, ni7 );
  690. // Convert int16 to int8
  691. ni0 = _mm_packs_epi16( ni0, ni2 );
  692. ni4 = _mm_packs_epi16( ni4, ni6 );
  693. // Apply offset to translate the range from [ -7 .. +7 ] into [ +1 .. +15 ]
  694. const __m128i off = _mm_set1_epi8( 8);
  695. ni0 = _mm_add_epi8( ni0, off );
  696. ni4 = _mm_add_epi8( ni4, off );
  697. // Compress the vector into 4 bit/value, and store
  698. __m128i res = packNibbles( ni0, ni4 );
  699. _mm_storeu_si128( ( __m128i* )y[i].qs, res );
  700. }
  701. #elif defined(__wasm_simd128__)
  702. for (int i = 0; i < nb; i++) {
  703. float amax = 0.0f; // absolute max
  704. v128_t srcv [8];
  705. v128_t asrcv[8];
  706. v128_t amaxv[8];
  707. for (int l = 0; l < 8; l++) srcv[l] = wasm_v128_load(x + i*32 + 4*l);
  708. for (int l = 0; l < 8; l++) asrcv[l] = wasm_f32x4_abs(srcv[l]);
  709. for (int l = 0; l < 4; l++) amaxv[2*l] = wasm_f32x4_max(asrcv[2*l], asrcv[2*l+1]);
  710. for (int l = 0; l < 2; l++) amaxv[4*l] = wasm_f32x4_max(amaxv[4*l], amaxv[4*l+2]);
  711. for (int l = 0; l < 1; l++) amaxv[8*l] = wasm_f32x4_max(amaxv[8*l], amaxv[8*l+4]);
  712. amax = MAX(
  713. MAX(wasm_f32x4_extract_lane(amaxv[0], 0), wasm_f32x4_extract_lane(amaxv[0], 1)),
  714. MAX(wasm_f32x4_extract_lane(amaxv[0], 2), wasm_f32x4_extract_lane(amaxv[0], 3)));
  715. const float d = amax / ((1 << 3) - 1);
  716. const float id = d ? 1.0/d : 0.0;
  717. y[i].d = d;
  718. for (int l = 0; l < 8; l++) {
  719. const v128_t v = wasm_f32x4_mul(srcv[l], wasm_f32x4_splat(id));
  720. const v128_t vf = wasm_f32x4_add(v, wasm_f32x4_splat(8.5f));
  721. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(vf);
  722. y[i].qs[2*l + 0] = wasm_i32x4_extract_lane(vi, 0) | (wasm_i32x4_extract_lane(vi, 1) << 4);
  723. y[i].qs[2*l + 1] = wasm_i32x4_extract_lane(vi, 2) | (wasm_i32x4_extract_lane(vi, 3) << 4);
  724. }
  725. }
  726. #else
  727. // scalar
  728. quantize_row_q4_0_reference(x, y, k);
  729. #endif
  730. }
  731. static void quantize_row_q4_1_reference(const float * restrict x, void * restrict vy, int k) {
  732. assert(k % QK4_1 == 0);
  733. const int nb = k / QK4_1;
  734. block_q4_1 * restrict y = vy;
  735. uint8_t pp[QK4_1/2];
  736. for (int i = 0; i < nb; i++) {
  737. float min = FLT_MAX;
  738. float max = -FLT_MAX;
  739. for (int l = 0; l < QK4_1; l++) {
  740. const float v = x[i*QK4_1 + l];
  741. if (v < min) min = v;
  742. if (v > max) max = v;
  743. }
  744. const float d = (max - min) / ((1 << 4) - 1);
  745. const float id = d ? 1.0f/d : 0.0f;
  746. y[i].d = d;
  747. y[i].m = min;
  748. for (int l = 0; l < QK4_1; l += 2) {
  749. const float v0 = (x[i*QK4_1 + l + 0] - min)*id;
  750. const float v1 = (x[i*QK4_1 + l + 1] - min)*id;
  751. const uint8_t vi0 = roundf(v0);
  752. const uint8_t vi1 = roundf(v1);
  753. assert(vi0 < 16);
  754. assert(vi1 < 16);
  755. pp[l/2] = vi0 | (vi1 << 4);
  756. }
  757. memcpy(y[i].qs, pp, sizeof(pp));
  758. }
  759. }
  760. static void quantize_row_q4_1(const float * restrict x, void * restrict vy, int k) {
  761. assert(k % QK4_1 == 0);
  762. const int nb = k / QK4_1;
  763. block_q4_1 * restrict y = vy;
  764. #if defined(__AVX2__)
  765. for (int i = 0; i < nb; i++) {
  766. // Load elements into 4 AVX vectors
  767. __m256 v0 = _mm256_loadu_ps( x );
  768. __m256 v1 = _mm256_loadu_ps( x + 8 );
  769. __m256 v2 = _mm256_loadu_ps( x + 16 );
  770. __m256 v3 = _mm256_loadu_ps( x + 24 );
  771. x += 32;
  772. // Compute max for the block
  773. __m256 vmax;
  774. vmax = _mm256_max_ps( v0, v1 );
  775. vmax = _mm256_max_ps( vmax, v2 );
  776. vmax = _mm256_max_ps( vmax, v3 );
  777. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( vmax, 1 ), _mm256_castps256_ps128( vmax ) );
  778. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  779. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  780. const float maxScalar = _mm_cvtss_f32( max4 );
  781. // Compute min for the block
  782. __m256 vmin;
  783. vmin = _mm256_min_ps( v0, v1 );
  784. vmin = _mm256_min_ps( vmin, v2 );
  785. vmin = _mm256_min_ps( vmin, v3 );
  786. __m128 min4 = _mm_min_ps( _mm256_extractf128_ps( vmin, 1 ), _mm256_castps256_ps128( vmin ) );
  787. min4 = _mm_min_ps( min4, _mm_movehl_ps( min4, min4 ) );
  788. min4 = _mm_min_ss( min4, _mm_movehdup_ps( min4 ) );
  789. const float minScalar = _mm_cvtss_f32( min4 );
  790. // Quantize these floats
  791. const float d = (maxScalar - minScalar) / ((1 << 4) - 1);
  792. const float id = d ? 1.0f/d : 0.0f;
  793. y[i].m = minScalar;
  794. y[i].d = d;
  795. // x = (x-min)*id
  796. const __m256 mul = _mm256_set1_ps( id );
  797. const __m256 off = _mm256_set1_ps( minScalar );
  798. v0 = _mm256_mul_ps( _mm256_sub_ps( v0, off ), mul );
  799. v1 = _mm256_mul_ps( _mm256_sub_ps( v1, off ), mul );
  800. v2 = _mm256_mul_ps( _mm256_sub_ps( v2, off ), mul );
  801. v3 = _mm256_mul_ps( _mm256_sub_ps( v3, off ), mul );
  802. // Round to nearest integer
  803. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  804. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  805. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  806. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  807. // Convert floats to integers
  808. __m256i i0 = _mm256_cvtps_epi32( v0 );
  809. __m256i i1 = _mm256_cvtps_epi32( v1 );
  810. __m256i i2 = _mm256_cvtps_epi32( v2 );
  811. __m256i i3 = _mm256_cvtps_epi32( v3 );
  812. // Convert int32 to int16
  813. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  814. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  815. // Convert int16 to int8
  816. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  817. // We got our precious signed bytes, but the order is now wrong
  818. // These AVX2 pack instructions process 16-byte pieces independently
  819. // The following instruction is fixing the order
  820. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  821. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  822. // Compress the vector into 4 bit/value, and store
  823. __m128i res = packNibbles( i0 );
  824. _mm_storeu_si128( ( __m128i* )y[i].qs, res );
  825. }
  826. #elif __ARM_NEON
  827. for (int i = 0; i < nb; i++) {
  828. float32x4_t srcv[8];
  829. float32x4_t minv[8];
  830. float32x4_t maxv[8];
  831. for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(x + i*QK4_1 + 4*l);
  832. for (int l = 0; l < 4; l++) minv[2*l] = vminq_f32(srcv[2*l], srcv[2*l + 1]);
  833. for (int l = 0; l < 2; l++) minv[4*l] = vminq_f32(minv[4*l], minv[4*l + 2]);
  834. for (int l = 0; l < 1; l++) minv[8*l] = vminq_f32(minv[8*l], minv[8*l + 4]);
  835. for (int l = 0; l < 4; l++) maxv[2*l] = vmaxq_f32(srcv[2*l], srcv[2*l + 1]);
  836. for (int l = 0; l < 2; l++) maxv[4*l] = vmaxq_f32(maxv[4*l], maxv[4*l + 2]);
  837. for (int l = 0; l < 1; l++) maxv[8*l] = vmaxq_f32(maxv[8*l], maxv[8*l + 4]);
  838. const float min = vminvq_f32(minv[0]);
  839. const float max = vmaxvq_f32(maxv[0]);
  840. const float d = (max - min) / ((1 << 4) - 1);
  841. const float id = d ? 1.0f/d : 0.0f;
  842. y[i].d = d;
  843. y[i].m = min;
  844. const float32x4_t minv0 = vdupq_n_f32(min);
  845. for (int l = 0; l < 8; l++) {
  846. const float32x4_t v = vmulq_n_f32(vsubq_f32(srcv[l], minv0), id);
  847. const float32x4_t vf = vaddq_f32(v, vdupq_n_f32(0.5f)); // needed to round to nearest
  848. const int32x4_t vi = vcvtq_s32_f32(vf);
  849. y[i].qs[2*l + 0] = vgetq_lane_s32(vi, 0) | (vgetq_lane_s32(vi, 1) << 4);
  850. y[i].qs[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4);
  851. }
  852. }
  853. #else
  854. // scalar
  855. quantize_row_q4_1_reference(x, vy, k);
  856. #endif
  857. }
  858. // reference implementation for deterministic creation of model files
  859. static void quantize_row_q4_2_reference(const float * restrict x, block_q4_2 * restrict y, int k) {
  860. assert(k % QK4_2 == 0);
  861. const int nb = k / QK4_2;
  862. for (int i = 0; i < nb; i++) {
  863. float amax = 0.0f; // absolute max
  864. for (int l = 0; l < QK4_2; l++) {
  865. const float v = x[i*QK4_2 + l];
  866. amax = MAX(amax, fabsf(v));
  867. }
  868. const float d = amax / ((1 << 3) - 1);
  869. const float id = d ? 1.0f/d : 0.0f;
  870. y[i].d = GGML_FP32_TO_FP16(d);
  871. for (int l = 0; l < QK4_2; l += 2) {
  872. const float v0 = x[i*QK4_2 + l + 0]*id;
  873. const float v1 = x[i*QK4_2 + l + 1]*id;
  874. const uint8_t vi0 = (uint8_t)(v0 + 8.5f);
  875. const uint8_t vi1 = (uint8_t)(v1 + 8.5f);
  876. assert(vi0 < 16);
  877. assert(vi1 < 16);
  878. y[i].qs[l/2] = vi0 | (vi1 << 4);
  879. }
  880. }
  881. }
  882. static void quantize_row_q4_2(const float * restrict x, void * restrict vy, int k) {
  883. assert(k % QK4_2 == 0);
  884. block_q4_2 * restrict y = vy;
  885. quantize_row_q4_2_reference(x, y, k);
  886. }
  887. // reference implementation for deterministic creation of model files
  888. static void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
  889. assert(k % QK8_0 == 0);
  890. const int nb = k / QK8_0;
  891. for (int i = 0; i < nb; i++) {
  892. float amax = 0.0f; // absolute max
  893. for (int l = 0; l < QK8_0; l++) {
  894. const float v = x[i*QK8_0 + l];
  895. amax = MAX(amax, fabsf(v));
  896. }
  897. const float d = amax / ((1 << 7) - 1);
  898. const float id = d ? 1.0f/d : 0.0f;
  899. y[i].d = d;
  900. for (int l = 0; l < QK8_0; ++l) {
  901. const float v = x[i*QK8_0 + l]*id;
  902. y[i].qs[l] = roundf(v);
  903. }
  904. }
  905. }
  906. static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
  907. assert(k % QK8_0 == 0);
  908. const int nb = k / QK8_0;
  909. block_q8_0 * restrict y = vy;
  910. #if defined(__ARM_NEON)
  911. for (int i = 0; i < nb; i++) {
  912. float32x4_t srcv [8];
  913. float32x4_t asrcv[8];
  914. float32x4_t amaxv[8];
  915. for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(x + i*32 + 4*l);
  916. for (int l = 0; l < 8; l++) asrcv[l] = vabsq_f32(srcv[l]);
  917. for (int l = 0; l < 4; l++) amaxv[2*l] = vmaxq_f32(asrcv[2*l], asrcv[2*l+1]);
  918. for (int l = 0; l < 2; l++) amaxv[4*l] = vmaxq_f32(amaxv[4*l], amaxv[4*l+2]);
  919. for (int l = 0; l < 1; l++) amaxv[8*l] = vmaxq_f32(amaxv[8*l], amaxv[8*l+4]);
  920. const float amax = vmaxvq_f32(amaxv[0]);
  921. const float d = amax / ((1 << 7) - 1);
  922. const float id = d ? 1.0f/d : 0.0f;
  923. y[i].d = d;
  924. for (int l = 0; l < 8; l++) {
  925. const float32x4_t v = vmulq_n_f32(srcv[l], id);
  926. const int32x4_t vi = vcvtnq_s32_f32(v);
  927. y[i].qs[4*l + 0] = vgetq_lane_s32(vi, 0);
  928. y[i].qs[4*l + 1] = vgetq_lane_s32(vi, 1);
  929. y[i].qs[4*l + 2] = vgetq_lane_s32(vi, 2);
  930. y[i].qs[4*l + 3] = vgetq_lane_s32(vi, 3);
  931. }
  932. }
  933. #elif defined(__AVX2__) || defined(__AVX__)
  934. for (int i = 0; i < nb; i++) {
  935. // Load elements into 4 AVX vectors
  936. __m256 v0 = _mm256_loadu_ps( x );
  937. __m256 v1 = _mm256_loadu_ps( x + 8 );
  938. __m256 v2 = _mm256_loadu_ps( x + 16 );
  939. __m256 v3 = _mm256_loadu_ps( x + 24 );
  940. x += 32;
  941. // Compute max(abs(e)) for the block
  942. const __m256 signBit = _mm256_set1_ps( -0.0f );
  943. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  944. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  945. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  946. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  947. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  948. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  949. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  950. const float maxScalar = _mm_cvtss_f32( max4 );
  951. // Quantize these floats
  952. const float d = maxScalar / 127.f;
  953. y[i].d = d;
  954. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  955. const __m256 mul = _mm256_set1_ps( id );
  956. // Apply the multiplier
  957. v0 = _mm256_mul_ps( v0, mul );
  958. v1 = _mm256_mul_ps( v1, mul );
  959. v2 = _mm256_mul_ps( v2, mul );
  960. v3 = _mm256_mul_ps( v3, mul );
  961. // Round to nearest integer
  962. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  963. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  964. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  965. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  966. // Convert floats to integers
  967. __m256i i0 = _mm256_cvtps_epi32( v0 );
  968. __m256i i1 = _mm256_cvtps_epi32( v1 );
  969. __m256i i2 = _mm256_cvtps_epi32( v2 );
  970. __m256i i3 = _mm256_cvtps_epi32( v3 );
  971. #if defined(__AVX2__)
  972. // Convert int32 to int16
  973. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  974. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  975. // Convert int16 to int8
  976. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  977. // We got our precious signed bytes, but the order is now wrong
  978. // These AVX2 pack instructions process 16-byte pieces independently
  979. // The following instruction is fixing the order
  980. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  981. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  982. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  983. #else
  984. // Since we don't have in AVX some necessary functions,
  985. // we split the registers in half and call AVX2 analogs from SSE
  986. __m128i ni0 = _mm256_castsi256_si128( i0 );
  987. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  988. __m128i ni2 = _mm256_castsi256_si128( i1 );
  989. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  990. __m128i ni4 = _mm256_castsi256_si128( i2 );
  991. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  992. __m128i ni6 = _mm256_castsi256_si128( i3 );
  993. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  994. // Convert int32 to int16
  995. ni0 = _mm_packs_epi32( ni0, ni1 );
  996. ni2 = _mm_packs_epi32( ni2, ni3 );
  997. ni4 = _mm_packs_epi32( ni4, ni5 );
  998. ni6 = _mm_packs_epi32( ni6, ni7 );
  999. // Convert int16 to int8
  1000. ni0 = _mm_packs_epi16( ni0, ni2 );
  1001. ni4 = _mm_packs_epi16( ni4, ni6 );
  1002. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  1003. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  1004. #endif
  1005. }
  1006. #else
  1007. // scalar
  1008. quantize_row_q8_0_reference(x, y, k);
  1009. #endif
  1010. }
  1011. static void dequantize_row_q4_0(const void * restrict vx, float * restrict y, int k) {
  1012. assert(k % QK4_0 == 0);
  1013. const int nb = k / QK4_0;
  1014. const block_q4_0 * restrict x = vx;
  1015. #if defined(__AVX2__)
  1016. for (int i = 0; i < nb; i++) {
  1017. // scale factor
  1018. const __m256 d_v = _mm256_broadcast_ss(&x[i].d);
  1019. const uint8_t * restrict pp = x[i].qs;
  1020. for (int l = 0; l < QK4_0; l += 32) {
  1021. // Load 32x4-bit integers into 32x8-bit integers
  1022. __m256i vx8 = bytesFromNibbles(pp+l/2);
  1023. // Subtract 8 from the integers
  1024. vx8 = _mm256_sub_epi8(vx8, _mm256_set1_epi8(8));
  1025. // Convert to 16-bit int
  1026. const __m256i vx16_lo = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 0));
  1027. const __m256i vx16_hi = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 1));
  1028. // Convert to 32-bit int -> float 32
  1029. const __m256 vf[4] = {
  1030. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 0))),
  1031. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 1))),
  1032. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 0))),
  1033. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 1)))
  1034. };
  1035. // Scale and store
  1036. for (int j = 0; j < 4; j++) {
  1037. const __m256 result = _mm256_mul_ps(vf[j], d_v);
  1038. _mm256_storeu_ps(y + i * QK4_0 + l + j*8, result);
  1039. }
  1040. }
  1041. }
  1042. #elif defined(__ARM_NEON)
  1043. for (int i = 0; i < nb; i++) {
  1044. const float32x4_t vd = vdupq_n_f32(x[i].d);
  1045. const uint8_t * restrict pp = x[i].qs;
  1046. for (int l = 0; l < QK4_0; l += 16) {
  1047. // Load 16x4-bit integers into 8x8-bit integers
  1048. const uint8x8_t v8 = vld1_u8(pp + l/2);
  1049. // Expand 4-bit qs to 8-bit bytes
  1050. const uint8x8_t v0 = vand_u8(v8, vdup_n_u8(0x0f));
  1051. const uint8x8_t v1 = vshr_n_u8(v8, 4);
  1052. // Convert to signed 8-bit integers
  1053. const int8x8_t vs_0 = vreinterpret_s8_u8(v0);
  1054. const int8x8_t vs_1 = vreinterpret_s8_u8(v1);
  1055. // Subtract 8 from each byte
  1056. const int8x8_t vb_0 = vsub_s8(vs_0, vdup_n_s8(8));
  1057. const int8x8_t vb_1 = vsub_s8(vs_1, vdup_n_s8(8));
  1058. // Interleave and combine
  1059. const int8x8_t vx_0 = vzip1_s8(vb_0, vb_1);
  1060. const int8x8_t vx_1 = vzip2_s8(vb_0, vb_1);
  1061. const int8x16_t vq = vcombine_s8(vx_0, vx_1);
  1062. // convert to 2x int16x8_t
  1063. const int16x8_t vi_0 = vmovl_s8(vget_low_s8 (vq));
  1064. const int16x8_t vi_1 = vmovl_s8(vget_high_s8(vq));
  1065. // convert to 4x float32x4_t
  1066. const float32x4_t vf_0 = vcvtq_f32_s32(vmovl_s16(vget_low_s16 (vi_0)));
  1067. const float32x4_t vf_1 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(vi_0)));
  1068. const float32x4_t vf_2 = vcvtq_f32_s32(vmovl_s16(vget_low_s16 (vi_1)));
  1069. const float32x4_t vf_3 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(vi_1)));
  1070. // Multiply by d
  1071. const float32x4_t r0 = vmulq_f32(vf_0, vd);
  1072. const float32x4_t r1 = vmulq_f32(vf_1, vd);
  1073. const float32x4_t r2 = vmulq_f32(vf_2, vd);
  1074. const float32x4_t r3 = vmulq_f32(vf_3, vd);
  1075. // Store
  1076. vst1q_f32(y + i*QK4_0 + l + 0, r0);
  1077. vst1q_f32(y + i*QK4_0 + l + 4, r1);
  1078. vst1q_f32(y + i*QK4_0 + l + 8, r2);
  1079. vst1q_f32(y + i*QK4_0 + l + 12, r3);
  1080. }
  1081. }
  1082. #else
  1083. // scalar
  1084. for (int i = 0; i < nb; i++) {
  1085. const float d = x[i].d;
  1086. const uint8_t * restrict pp = x[i].qs;
  1087. for (int l = 0; l < QK4_0; l += 2) {
  1088. const uint8_t vi = pp[l/2];
  1089. const int8_t vi0 = vi & 0xf;
  1090. const int8_t vi1 = vi >> 4;
  1091. const float v0 = (vi0 - 8)*d;
  1092. const float v1 = (vi1 - 8)*d;
  1093. //printf("d = %f, vi = %d, vi0 = %d, vi1 = %d, v0 = %f, v1 = %f\n", d, vi, vi0, vi1, v0, v1);
  1094. y[i*QK4_0 + l + 0] = v0;
  1095. y[i*QK4_0 + l + 1] = v1;
  1096. assert(!isnan(y[i*QK4_0 + l + 0]));
  1097. assert(!isnan(y[i*QK4_0 + l + 1]));
  1098. }
  1099. }
  1100. #endif
  1101. }
  1102. static void dequantize_row_q4_1(const void * restrict vx, float * restrict y, int k) {
  1103. assert(k % QK4_1 == 0);
  1104. const int nb = k / QK4_1;
  1105. const block_q4_1 * restrict x = vx;
  1106. #if defined(__AVX2__)
  1107. for (int i = 0; i < nb; i++) {
  1108. const __m256 d_v = _mm256_broadcast_ss(&x[i].d);
  1109. const __m256 d_m = _mm256_broadcast_ss(&x[i].m);
  1110. const uint8_t * restrict pp = x[i].qs;
  1111. for (int l = 0; l < QK4_1; l += 32) {
  1112. // Load 32x4-bit integers into 32x8-bit integers
  1113. __m256i vx8 = bytesFromNibbles(pp+l/2);
  1114. // Convert to 16-bit int
  1115. const __m256i vx16_lo = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 0));
  1116. const __m256i vx16_hi = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 1));
  1117. // Convert to 32-bit int -> float 32
  1118. const __m256 vf[4] = {
  1119. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 0))),
  1120. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 1))),
  1121. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 0))),
  1122. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 1)))
  1123. };
  1124. // Scale, add m and store
  1125. for (int j = 0; j < 4; j++) {
  1126. const __m256 result = _mm256_add_ps(_mm256_mul_ps(vf[j], d_v), d_m);
  1127. _mm256_storeu_ps(y + i * QK4_1 + l + j*8, result);
  1128. }
  1129. }
  1130. }
  1131. #elif defined(__ARM_NEON)
  1132. for (int i = 0; i < nb; i++) {
  1133. const float32x4_t vd = vdupq_n_f32(x[i].d);
  1134. const float32x4_t vm = vdupq_n_f32(x[i].m);
  1135. const uint8_t * restrict pp = x[i].qs;
  1136. for (int l = 0; l < QK4_1; l += 16) {
  1137. // Load 16x4-bit integers into 8x8-bit integers
  1138. const uint8x8_t v8 = vld1_u8(pp + l/2);
  1139. // Expand 4-bit qs to 8-bit bytes
  1140. const uint8x8_t v0 = vand_u8(v8, vdup_n_u8(0x0f));
  1141. const uint8x8_t v1 = vshr_n_u8(v8, 4);
  1142. // Interleave and combine
  1143. const uint8x8_t vx_0 = vzip1_u8(v0, v1);
  1144. const uint8x8_t vx_1 = vzip2_u8(v0, v1);
  1145. const uint8x16_t vq = vcombine_u8(vx_0, vx_1);
  1146. // convert to 2x uint16x8_t
  1147. const uint16x8_t vi_0 = vmovl_u8(vget_low_u8 (vq));
  1148. const uint16x8_t vi_1 = vmovl_u8(vget_high_u8(vq));
  1149. // convert to 4x float32x4_t
  1150. const float32x4_t vf_0 = vcvtq_f32_u32(vmovl_u16(vget_low_u16 (vi_0)));
  1151. const float32x4_t vf_1 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(vi_0)));
  1152. const float32x4_t vf_2 = vcvtq_f32_u32(vmovl_u16(vget_low_u16 (vi_1)));
  1153. const float32x4_t vf_3 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(vi_1)));
  1154. // multiply by d and add m
  1155. const float32x4_t r0 = vmlaq_f32(vm, vf_0, vd);
  1156. const float32x4_t r1 = vmlaq_f32(vm, vf_1, vd);
  1157. const float32x4_t r2 = vmlaq_f32(vm, vf_2, vd);
  1158. const float32x4_t r3 = vmlaq_f32(vm, vf_3, vd);
  1159. // Store
  1160. vst1q_f32(y + i*QK4_1 + l + 0, r0);
  1161. vst1q_f32(y + i*QK4_1 + l + 4, r1);
  1162. vst1q_f32(y + i*QK4_1 + l + 8, r2);
  1163. vst1q_f32(y + i*QK4_1 + l + 12, r3);
  1164. }
  1165. }
  1166. #else
  1167. for (int i = 0; i < nb; i++) {
  1168. const float d = x[i].d;
  1169. const float m = x[i].m;
  1170. const uint8_t * restrict pp = x[i].qs;
  1171. for (int l = 0; l < QK4_1; l += 2) {
  1172. const uint8_t vi = pp[l/2];
  1173. const int8_t vi0 = vi & 0xf;
  1174. const int8_t vi1 = vi >> 4;
  1175. const float v0 = vi0*d + m;
  1176. const float v1 = vi1*d + m;
  1177. y[i*QK4_1 + l + 0] = v0;
  1178. y[i*QK4_1 + l + 1] = v1;
  1179. assert(!isnan(y[i*QK4_1 + l + 0]));
  1180. assert(!isnan(y[i*QK4_1 + l + 1]));
  1181. }
  1182. }
  1183. #endif
  1184. }
  1185. static void dequantize_row_q4_2(const void * restrict vx, float * restrict y, int k) {
  1186. assert(k % QK4_2 == 0);
  1187. const int nb = k / QK4_2;
  1188. const block_q4_2 * restrict x = vx;
  1189. for (int i = 0; i < nb; i++) {
  1190. const float d = GGML_FP16_TO_FP32(x[i].d);
  1191. const uint8_t * restrict pp = x[i].qs;
  1192. for (int l = 0; l < QK4_2; l += 2) {
  1193. const uint8_t vi = pp[l/2];
  1194. const int8_t vi0 = vi & 0xf;
  1195. const int8_t vi1 = vi >> 4;
  1196. const float v0 = (vi0 - 8)*d;
  1197. const float v1 = (vi1 - 8)*d;
  1198. y[i*QK4_2 + l + 0] = v0;
  1199. y[i*QK4_2 + l + 1] = v1;
  1200. assert(!isnan(y[i*QK4_2 + l + 0]));
  1201. assert(!isnan(y[i*QK4_2 + l + 1]));
  1202. }
  1203. }
  1204. }
  1205. static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1206. static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1207. //static void ggml_vec_dot_q4_1_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1208. static void ggml_vec_dot_q4_2_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1209. static const quantize_fns_t quantize_fns[GGML_TYPE_COUNT] = {
  1210. [GGML_TYPE_Q4_0] = {
  1211. .dequantize_row_q = dequantize_row_q4_0,
  1212. .quantize_row_q = quantize_row_q4_0,
  1213. .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_0_reference,
  1214. .quantize_row_q_dot = quantize_row_q8_0,
  1215. .vec_dot_q = ggml_vec_dot_q4_0_q8_0,
  1216. },
  1217. [GGML_TYPE_Q4_1] = {
  1218. .dequantize_row_q = dequantize_row_q4_1,
  1219. .quantize_row_q = quantize_row_q4_1,
  1220. .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_1_reference,
  1221. .quantize_row_q_dot = quantize_row_q4_1,
  1222. .vec_dot_q = ggml_vec_dot_q4_1,
  1223. },
  1224. [GGML_TYPE_Q4_2] = {
  1225. .dequantize_row_q = dequantize_row_q4_2,
  1226. .quantize_row_q = quantize_row_q4_2,
  1227. .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_2_reference,
  1228. .quantize_row_q_dot = quantize_row_q8_0,
  1229. .vec_dot_q = ggml_vec_dot_q4_2_q8_0,
  1230. },
  1231. // TODO: GGML_TYPE_Q8_0
  1232. };
  1233. // For internal test use
  1234. quantize_fns_t ggml_internal_get_quantize_fn(size_t i) {
  1235. GGML_ASSERT(i < GGML_TYPE_COUNT);
  1236. return quantize_fns[i];
  1237. }
  1238. //
  1239. // simd mappings
  1240. //
  1241. // we define a common set of C macros which map to specific intrinsics based on the current architecture
  1242. // we then implement the fundamental computation operations below using only these macros
  1243. // adding support for new architectures requires to define the corresponding SIMD macros
  1244. //
  1245. // GGML_F32_STEP / GGML_F16_STEP
  1246. // number of elements to process in a single step
  1247. //
  1248. // GGML_F32_EPR / GGML_F16_EPR
  1249. // number of elements to fit in a single register
  1250. //
  1251. #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
  1252. #define GGML_SIMD
  1253. // F32 NEON
  1254. #define GGML_F32_STEP 16
  1255. #define GGML_F32_EPR 4
  1256. #define GGML_F32x4 float32x4_t
  1257. #define GGML_F32x4_ZERO vdupq_n_f32(0.0f)
  1258. #define GGML_F32x4_SET1(x) vdupq_n_f32(x)
  1259. #define GGML_F32x4_LOAD vld1q_f32
  1260. #define GGML_F32x4_STORE vst1q_f32
  1261. #define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
  1262. #define GGML_F32x4_ADD vaddq_f32
  1263. #define GGML_F32x4_MUL vmulq_f32
  1264. #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
  1265. #define GGML_F32x4_REDUCE(res, x) \
  1266. { \
  1267. for (int i = 0; i < GGML_F32_ARR/2; ++i) { \
  1268. x[2*i] = vaddq_f32(x[2*i], x[2*i+1]); \
  1269. } \
  1270. for (int i = 0; i < GGML_F32_ARR/4; ++i) { \
  1271. x[4*i] = vaddq_f32(x[4*i], x[4*i+2]); \
  1272. } \
  1273. for (int i = 0; i < GGML_F32_ARR/8; ++i) { \
  1274. x[8*i] = vaddq_f32(x[8*i], x[8*i+4]); \
  1275. } \
  1276. res = GGML_F32x4_REDUCE_ONE(x[0]); \
  1277. }
  1278. #define GGML_F32_VEC GGML_F32x4
  1279. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1280. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1281. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1282. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1283. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1284. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1285. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1286. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1287. // F16 NEON
  1288. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  1289. #define GGML_F16_STEP 32
  1290. #define GGML_F16_EPR 8
  1291. #define GGML_F16x8 float16x8_t
  1292. #define GGML_F16x8_ZERO vdupq_n_f16(0.0f)
  1293. #define GGML_F16x8_SET1(x) vdupq_n_f16(x)
  1294. #define GGML_F16x8_LOAD vld1q_f16
  1295. #define GGML_F16x8_STORE vst1q_f16
  1296. #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
  1297. #define GGML_F16x8_ADD vaddq_f16
  1298. #define GGML_F16x8_MUL vmulq_f16
  1299. #define GGML_F16x8_REDUCE(res, x) \
  1300. { \
  1301. for (int i = 0; i < GGML_F16_ARR/2; ++i) { \
  1302. x[2*i] = vaddq_f16(x[2*i], x[2*i+1]); \
  1303. } \
  1304. for (int i = 0; i < GGML_F16_ARR/4; ++i) { \
  1305. x[4*i] = vaddq_f16(x[4*i], x[4*i+2]); \
  1306. } \
  1307. for (int i = 0; i < GGML_F16_ARR/8; ++i) { \
  1308. x[8*i] = vaddq_f16(x[8*i], x[8*i+4]); \
  1309. } \
  1310. const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
  1311. const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
  1312. res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
  1313. }
  1314. #define GGML_F16_VEC GGML_F16x8
  1315. #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
  1316. #define GGML_F16_VEC_SET1 GGML_F16x8_SET1
  1317. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p)
  1318. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i])
  1319. #define GGML_F16_VEC_FMA GGML_F16x8_FMA
  1320. #define GGML_F16_VEC_ADD GGML_F16x8_ADD
  1321. #define GGML_F16_VEC_MUL GGML_F16x8_MUL
  1322. #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE
  1323. #else
  1324. // if FP16 vector arithmetic is not supported, we use FP32 instead
  1325. // and take advantage of the vcvt_ functions to convert to/from FP16
  1326. #define GGML_F16_STEP 16
  1327. #define GGML_F16_EPR 4
  1328. #define GGML_F32Cx4 float32x4_t
  1329. #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
  1330. #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
  1331. #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x))
  1332. #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
  1333. #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
  1334. #define GGML_F32Cx4_ADD vaddq_f32
  1335. #define GGML_F32Cx4_MUL vmulq_f32
  1336. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1337. #define GGML_F16_VEC GGML_F32Cx4
  1338. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1339. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1340. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1341. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1342. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1343. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1344. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1345. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1346. #endif
  1347. #elif defined(__AVX__)
  1348. #define GGML_SIMD
  1349. // F32 AVX
  1350. #define GGML_F32_STEP 32
  1351. #define GGML_F32_EPR 8
  1352. #define GGML_F32x8 __m256
  1353. #define GGML_F32x8_ZERO _mm256_setzero_ps()
  1354. #define GGML_F32x8_SET1(x) _mm256_set1_ps(x)
  1355. #define GGML_F32x8_LOAD _mm256_loadu_ps
  1356. #define GGML_F32x8_STORE _mm256_storeu_ps
  1357. #if defined(__FMA__)
  1358. #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
  1359. #else
  1360. #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
  1361. #endif
  1362. #define GGML_F32x8_ADD _mm256_add_ps
  1363. #define GGML_F32x8_MUL _mm256_mul_ps
  1364. #define GGML_F32x8_REDUCE(res, x) \
  1365. { \
  1366. for (int i = 0; i < GGML_F32_ARR/2; ++i) { \
  1367. x[2*i] = _mm256_add_ps(x[2*i], x[2*i+1]); \
  1368. } \
  1369. for (int i = 0; i < GGML_F32_ARR/4; ++i) { \
  1370. x[4*i] = _mm256_add_ps(x[4*i], x[4*i+2]); \
  1371. } \
  1372. for (int i = 0; i < GGML_F32_ARR/8; ++i) { \
  1373. x[8*i] = _mm256_add_ps(x[8*i], x[8*i+4]); \
  1374. } \
  1375. const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
  1376. _mm256_extractf128_ps(x[0], 1)); \
  1377. const __m128 t1 = _mm_hadd_ps(t0, t0); \
  1378. res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
  1379. }
  1380. // TODO: is this optimal ?
  1381. #define GGML_F32_VEC GGML_F32x8
  1382. #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO
  1383. #define GGML_F32_VEC_SET1 GGML_F32x8_SET1
  1384. #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD
  1385. #define GGML_F32_VEC_STORE GGML_F32x8_STORE
  1386. #define GGML_F32_VEC_FMA GGML_F32x8_FMA
  1387. #define GGML_F32_VEC_ADD GGML_F32x8_ADD
  1388. #define GGML_F32_VEC_MUL GGML_F32x8_MUL
  1389. #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE
  1390. // F16 AVX
  1391. #define GGML_F16_STEP 32
  1392. #define GGML_F16_EPR 8
  1393. // F16 arithmetic is not supported by AVX, so we use F32 instead
  1394. #define GGML_F32Cx8 __m256
  1395. #define GGML_F32Cx8_ZERO _mm256_setzero_ps()
  1396. #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
  1397. #if defined(__F16C__)
  1398. // the _mm256_cvt intrinsics require F16C
  1399. #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x)))
  1400. #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
  1401. #else
  1402. static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) {
  1403. float tmp[8];
  1404. for (int i = 0; i < 8; i++)
  1405. tmp[i] = GGML_FP16_TO_FP32(x[i]);
  1406. return _mm256_loadu_ps(tmp);
  1407. }
  1408. static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
  1409. float arr[8];
  1410. _mm256_storeu_ps(arr, y);
  1411. for (int i = 0; i < 8; i++)
  1412. x[i] = GGML_FP32_TO_FP16(arr[i]);
  1413. }
  1414. #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
  1415. #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
  1416. #endif
  1417. #define GGML_F32Cx8_FMA GGML_F32x8_FMA
  1418. #define GGML_F32Cx8_ADD _mm256_add_ps
  1419. #define GGML_F32Cx8_MUL _mm256_mul_ps
  1420. #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE
  1421. #define GGML_F16_VEC GGML_F32Cx8
  1422. #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO
  1423. #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1
  1424. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p)
  1425. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i])
  1426. #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA
  1427. #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD
  1428. #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL
  1429. #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE
  1430. #elif defined(__POWER9_VECTOR__)
  1431. #define GGML_SIMD
  1432. // F32 POWER9
  1433. #define GGML_F32_STEP 32
  1434. #define GGML_F32_EPR 4
  1435. #define GGML_F32x4 vector float
  1436. #define GGML_F32x4_ZERO 0.0f
  1437. #define GGML_F32x4_SET1 vec_splats
  1438. #define GGML_F32x4_LOAD(p) vec_xl(0, p)
  1439. #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
  1440. #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
  1441. #define GGML_F32x4_ADD vec_add
  1442. #define GGML_F32x4_MUL vec_mul
  1443. #define GGML_F32x4_REDUCE(res, x) \
  1444. { \
  1445. for (int i = 0; i < GGML_F32_ARR/2; ++i) { \
  1446. x[2*i] = vec_add(x[2*i], x[2*i+1]); \
  1447. } \
  1448. for (int i = 0; i < GGML_F32_ARR/4; ++i) { \
  1449. x[4*i] = vec_add(x[4*i], x[4*i+2]); \
  1450. } \
  1451. for (int i = 0; i < GGML_F32_ARR/8; ++i) { \
  1452. x[8*i] = vec_add(x[8*i], x[8*i+4]); \
  1453. } \
  1454. res = vec_extract(x[0], 0) + \
  1455. vec_extract(x[0], 1) + \
  1456. vec_extract(x[0], 2) + \
  1457. vec_extract(x[0], 3); \
  1458. }
  1459. #define GGML_F32_VEC GGML_F32x4
  1460. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1461. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1462. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1463. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1464. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1465. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1466. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1467. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1468. // F16 POWER9
  1469. #define GGML_F16_STEP GGML_F32_STEP
  1470. #define GGML_F16_EPR GGML_F32_EPR
  1471. #define GGML_F16_VEC GGML_F32x4
  1472. #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO
  1473. #define GGML_F16_VEC_SET1 GGML_F32x4_SET1
  1474. #define GGML_F16_VEC_FMA GGML_F32x4_FMA
  1475. #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE
  1476. // Use vec_xl, not vec_ld, in case the load address is not aligned.
  1477. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
  1478. vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
  1479. vec_extract_fp32_from_shortl(vec_xl(0, p))
  1480. #define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i]
  1481. #define GGML_F16_VEC_STORE(p, r, i) \
  1482. if (i & 0x1) \
  1483. vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \
  1484. r[i - GGML_ENDIAN_BYTE(0)]), \
  1485. 0, p - GGML_F16_EPR)
  1486. #elif defined(__wasm_simd128__)
  1487. #define GGML_SIMD
  1488. // F32 WASM
  1489. #define GGML_F32_STEP 16
  1490. #define GGML_F32_EPR 4
  1491. #define GGML_F32x4 v128_t
  1492. #define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
  1493. #define GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
  1494. #define GGML_F32x4_LOAD wasm_v128_load
  1495. #define GGML_F32x4_STORE wasm_v128_store
  1496. #define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
  1497. #define GGML_F32x4_ADD wasm_f32x4_add
  1498. #define GGML_F32x4_MUL wasm_f32x4_mul
  1499. #define GGML_F32x4_REDUCE(res, x) \
  1500. { \
  1501. for (int i = 0; i < GGML_F32_ARR/2; ++i) { \
  1502. x[2*i] = wasm_f32x4_add(x[2*i], x[2*i+1]); \
  1503. } \
  1504. for (int i = 0; i < GGML_F32_ARR/4; ++i) { \
  1505. x[4*i] = wasm_f32x4_add(x[4*i], x[4*i+2]); \
  1506. } \
  1507. for (int i = 0; i < GGML_F32_ARR/8; ++i) { \
  1508. x[8*i] = wasm_f32x4_add(x[8*i], x[8*i+4]); \
  1509. } \
  1510. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1511. wasm_f32x4_extract_lane(x[0], 1) + \
  1512. wasm_f32x4_extract_lane(x[0], 2) + \
  1513. wasm_f32x4_extract_lane(x[0], 3); \
  1514. }
  1515. #define GGML_F32_VEC GGML_F32x4
  1516. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1517. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1518. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1519. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1520. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1521. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1522. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1523. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1524. // F16 WASM
  1525. #define GGML_F16_STEP 16
  1526. #define GGML_F16_EPR 4
  1527. inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) {
  1528. float tmp[4];
  1529. tmp[0] = GGML_FP16_TO_FP32(p[0]);
  1530. tmp[1] = GGML_FP16_TO_FP32(p[1]);
  1531. tmp[2] = GGML_FP16_TO_FP32(p[2]);
  1532. tmp[3] = GGML_FP16_TO_FP32(p[3]);
  1533. return wasm_v128_load(tmp);
  1534. }
  1535. inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
  1536. float tmp[4];
  1537. wasm_v128_store(tmp, x);
  1538. p[0] = GGML_FP32_TO_FP16(tmp[0]);
  1539. p[1] = GGML_FP32_TO_FP16(tmp[1]);
  1540. p[2] = GGML_FP32_TO_FP16(tmp[2]);
  1541. p[3] = GGML_FP32_TO_FP16(tmp[3]);
  1542. }
  1543. #define GGML_F16x4 v128_t
  1544. #define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
  1545. #define GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
  1546. #define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
  1547. #define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
  1548. #define GGML_F16x4_FMA GGML_F32x4_FMA
  1549. #define GGML_F16x4_ADD wasm_f32x4_add
  1550. #define GGML_F16x4_MUL wasm_f32x4_mul
  1551. #define GGML_F16x4_REDUCE(res, x) \
  1552. { \
  1553. for (int i = 0; i < GGML_F16_ARR/2; ++i) { \
  1554. x[2*i] = wasm_f32x4_add(x[2*i], x[2*i+1]); \
  1555. } \
  1556. for (int i = 0; i < GGML_F16_ARR/4; ++i) { \
  1557. x[4*i] = wasm_f32x4_add(x[4*i], x[4*i+2]); \
  1558. } \
  1559. for (int i = 0; i < GGML_F16_ARR/8; ++i) { \
  1560. x[8*i] = wasm_f32x4_add(x[8*i], x[8*i+4]); \
  1561. } \
  1562. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1563. wasm_f32x4_extract_lane(x[0], 1) + \
  1564. wasm_f32x4_extract_lane(x[0], 2) + \
  1565. wasm_f32x4_extract_lane(x[0], 3); \
  1566. }
  1567. #define GGML_F16_VEC GGML_F16x4
  1568. #define GGML_F16_VEC_ZERO GGML_F16x4_ZERO
  1569. #define GGML_F16_VEC_SET1 GGML_F16x4_SET1
  1570. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p)
  1571. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i])
  1572. #define GGML_F16_VEC_FMA GGML_F16x4_FMA
  1573. #define GGML_F16_VEC_ADD GGML_F16x4_ADD
  1574. #define GGML_F16_VEC_MUL GGML_F16x4_MUL
  1575. #define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE
  1576. #elif defined(__SSE3__)
  1577. #define GGML_SIMD
  1578. // F32 SSE
  1579. #define GGML_F32_STEP 32
  1580. #define GGML_F32_EPR 4
  1581. #define GGML_F32x4 __m128
  1582. #define GGML_F32x4_ZERO _mm_setzero_ps()
  1583. #define GGML_F32x4_SET1(x) _mm_set1_ps(x)
  1584. #define GGML_F32x4_LOAD _mm_loadu_ps
  1585. #define GGML_F32x4_STORE _mm_storeu_ps
  1586. #if defined(__FMA__)
  1587. // TODO: Does this work?
  1588. #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
  1589. #else
  1590. #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
  1591. #endif
  1592. #define GGML_F32x4_ADD _mm_add_ps
  1593. #define GGML_F32x4_MUL _mm_mul_ps
  1594. #define GGML_F32x4_REDUCE(res, x) \
  1595. { \
  1596. for (int i = 0; i < GGML_F32_ARR/2; ++i) { \
  1597. x[2*i] = _mm_add_ps(x[2*i], x[2*i+1]); \
  1598. } \
  1599. for (int i = 0; i < GGML_F32_ARR/4; ++i) { \
  1600. x[4*i] = _mm_add_ps(x[4*i], x[4*i+2]); \
  1601. } \
  1602. for (int i = 0; i < GGML_F32_ARR/8; ++i) { \
  1603. x[8*i] = _mm_add_ps(x[8*i], x[8*i+4]); \
  1604. } \
  1605. const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
  1606. res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
  1607. }
  1608. // TODO: is this optimal ?
  1609. #define GGML_F32_VEC GGML_F32x4
  1610. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1611. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1612. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1613. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1614. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1615. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1616. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1617. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1618. // F16 SSE
  1619. #define GGML_F16_STEP 32
  1620. #define GGML_F16_EPR 4
  1621. static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) {
  1622. float tmp[4];
  1623. tmp[0] = GGML_FP16_TO_FP32(x[0]);
  1624. tmp[1] = GGML_FP16_TO_FP32(x[1]);
  1625. tmp[2] = GGML_FP16_TO_FP32(x[2]);
  1626. tmp[3] = GGML_FP16_TO_FP32(x[3]);
  1627. return _mm_loadu_ps(tmp);
  1628. }
  1629. static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) {
  1630. float arr[4];
  1631. _mm_storeu_ps(arr, y);
  1632. x[0] = GGML_FP32_TO_FP16(arr[0]);
  1633. x[1] = GGML_FP32_TO_FP16(arr[1]);
  1634. x[2] = GGML_FP32_TO_FP16(arr[2]);
  1635. x[3] = GGML_FP32_TO_FP16(arr[3]);
  1636. }
  1637. #define GGML_F32Cx4 __m128
  1638. #define GGML_F32Cx4_ZERO _mm_setzero_ps()
  1639. #define GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
  1640. #define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
  1641. #define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
  1642. #define GGML_F32Cx4_FMA GGML_F32x4_FMA
  1643. #define GGML_F32Cx4_ADD _mm_add_ps
  1644. #define GGML_F32Cx4_MUL _mm_mul_ps
  1645. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1646. #define GGML_F16_VEC GGML_F32Cx4
  1647. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1648. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1649. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1650. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1651. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1652. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1653. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1654. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1655. #endif
  1656. // GGML_F32_ARR / GGML_F16_ARR
  1657. // number of registers to use per step
  1658. #ifdef GGML_SIMD
  1659. #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
  1660. #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
  1661. #endif
  1662. //
  1663. // fundamental operations
  1664. //
  1665. inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1666. inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1667. inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1668. inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1669. inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
  1670. inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
  1671. inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
  1672. inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
  1673. inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1674. inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  1675. inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
  1676. inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
  1677. inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
  1678. inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
  1679. #ifdef GGML_SIMD
  1680. float sumf = 0.0f;
  1681. const int np = (n & ~(GGML_F32_STEP - 1));
  1682. GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
  1683. GGML_F32_VEC ax[GGML_F32_ARR];
  1684. GGML_F32_VEC ay[GGML_F32_ARR];
  1685. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1686. for (int j = 0; j < GGML_F32_ARR; j++) {
  1687. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1688. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1689. sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
  1690. }
  1691. }
  1692. // reduce sum0..sum3 to sum0
  1693. GGML_F32_VEC_REDUCE(sumf, sum);
  1694. // leftovers
  1695. for (int i = np; i < n; ++i) {
  1696. sumf += x[i]*y[i];
  1697. }
  1698. #else
  1699. // scalar
  1700. ggml_float sumf = 0.0;
  1701. for (int i = 0; i < n; ++i) {
  1702. sumf += (ggml_float)(x[i]*y[i]);
  1703. }
  1704. #endif
  1705. *s = sumf;
  1706. }
  1707. #if __AVX512F__ && QK4_0 == 32
  1708. static inline __m512i bytes_from_q4_0_twoblocks_avx512( const __m512i blocks ) {
  1709. // The 64 bytes of `blocks` contain two consecutive Q4_0 blocks loaded from memory:
  1710. // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
  1711. // |63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32|
  1712. // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
  1713. // | :. =_ () [] <> () Zz Yy|
  1714. // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
  1715. // |31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00|
  1716. // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
  1717. // |Xx Ww Vv Uu Tt Ss Rr Qq Pp Oo Nn Mm Ll Kk Jj Ii Hh Gg Ff Ee Dd Cc Bb Aa |
  1718. // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
  1719. //
  1720. // Bytes 04..19 (block #0) and 24..39 (block #1) both contain 32 nibbles (4-bit unsigned integers).
  1721. // We have exactly 64 nibbles, so we want to place each nibble into a separate byte.
  1722. // Bytes 00..03 and 20..23 contain scales, which are irrelevant to this function.
  1723. // Bytes 40..63 are masked when loading the data, so they are zeroed out.
  1724. #ifdef __AVX512VBMI__
  1725. const __m512i byte_perm = _mm512_set_epi8(
  1726. 39, 38, 39, 38, 37, 36, 37, 36, 35, 34, 35, 34, 33, 32, 33, 32,
  1727. 31, 30, 31, 30, 29, 28, 29, 28, 27, 26, 27, 26, 25, 24, 25, 24,
  1728. 19, 18, 19, 18, 17, 16, 17, 16, 15, 14, 15, 14, 13, 12, 13, 12,
  1729. 11, 10, 11, 10, 9, 8, 9, 8, 7, 6, 7, 6, 5, 4, 5, 4
  1730. );
  1731. const __m512i permuted = _mm512_permutexvar_epi8( byte_perm, blocks );
  1732. // After applying VPERMB, `permuted` looks like this:
  1733. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1734. // |63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32|
  1735. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1736. // |:. =_ :. =_ () [] () [] <> () <> () Zz Yy Zz Yy Xx Ww Xx Ww Vv Uu Vv Uu Tt Ss Tt Ss Rr Qq Rr Qq|
  1737. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1738. // |31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00|
  1739. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1740. // |Pp Oo Pp Oo Nn Mm Nn Mm Ll Kk Ll Kk Jj Ii Jj Ii Hh Gg Hh Gg Ff Ee Ff Ee Dd Cc Dd Cc Bb Aa Bb Aa|
  1741. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1742. #else
  1743. const __m512i word_perm = _mm512_set_epi16(
  1744. 19, 19, 18, 18, 17, 17, 16, 16, 15, 15, 14, 14, 13, 13, 12, 12,
  1745. 9, 9, 8, 8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2
  1746. );
  1747. const __m512i permuted = _mm512_permutexvar_epi16( word_perm, blocks );
  1748. // This is the fallback path for CPUs that don't support VPERMB. Since we permute 16-bit groups only,
  1749. // VPERMB can be replaced with VPERMW. We could always use VPERMW, but at least on Tiger Lake and
  1750. // Ice Lake VPERMW followed by a right shift is quite noticeably slower than VPERMB.
  1751. #endif
  1752. // Shift every odd-numbered 16-bit group to the right by 4 bits.
  1753. const __mmask32 shift_mask = 0xaaaaaaaa;
  1754. const __m512i shifted = _mm512_mask_srai_epi16( permuted, shift_mask, permuted, 4 );
  1755. // After applying VPSRAW, `shifted` looks like this (the "empty" nibbles are filled with zeroes):
  1756. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1757. // |63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
  1758. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1759. // | : .= :. =_ ( )[ () [] < >( <> () Z zY Zz Yy X xW Xx Ww V vU Vv Uu T tS Tt Ss R rQ Rr Qq
  1760. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1761. // |31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00|
  1762. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1763. // | P pO Pp Oo N nM Nn Mm L lK Ll Kk J jI Jj Ii H hG Hh Gg F fE Ff Ee D dC Dd Cc B bA Bb Aa|
  1764. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1765. // Now we just need to zero out the higher nibble in each byte, and we're done.
  1766. const __m512i low_nibble_mask = _mm512_set1_epi8( 0xf );
  1767. return _mm512_and_si512( low_nibble_mask, shifted );
  1768. // The final result looks like this:
  1769. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1770. // |63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32|
  1771. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1772. // | : = . _ ( [ ) ] < ( > ) Z Y z y X W x w V U v u T S t s R Q r q|
  1773. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1774. // |31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00|
  1775. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1776. // | P O p o N M n m L K l k J I j i H G h g F E f e D C d c B A b a|
  1777. // +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+
  1778. }
  1779. static inline __m512 dot_q4_0_twoblocks_avx512(
  1780. __m512 acc,
  1781. const block_q4_0 * restrict x,
  1782. const block_q4_0 * restrict y,
  1783. int i
  1784. ) {
  1785. // A pair of Q4_0 blocks spans 40 bytes, while an AVX-512 register has 64. The remaining 24 bytes
  1786. // can potentially be unaddressable, so we make sure to mask them out before the load, even though
  1787. // we don't use them at all. This might hurt the performance slightly, since the compiler is forced
  1788. // to use e.g. `VMOVDQU64 REG, MASK, [ADDR] + VPERMB ..., REG` instead of just `VPERMB ..., [ADDR]`.
  1789. const __mmask8 load_mask = 0x1f;
  1790. const __m512i blocks_0 = _mm512_maskz_loadu_epi64( load_mask, &x[i] );
  1791. const __m512i blocks_1 = _mm512_maskz_loadu_epi64( load_mask, &y[i] );
  1792. // We want to multiply the scales, so we interpret both registers as 16 32-bit floats:
  1793. // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
  1794. // | 15 | 14 | 13 | 12 | 11 | 10 | 09 | 08 | 07 | 06 | 05 | 04 | 03 | 02 | 01 | 00 |
  1795. // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
  1796. // blocks_0_float
  1797. // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
  1798. // | | | | | | | xx | xx | xx | xx | B | xx | xx | xx | xx | A |
  1799. // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
  1800. // blocks_1_float
  1801. // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
  1802. // | | | | | | | xx | xx | xx | xx | D | xx | xx | xx | xx | C |
  1803. // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
  1804. const __m512 blocks_0_float = _mm512_castsi512_ps( blocks_0 );
  1805. const __m512 blocks_1_float = _mm512_castsi512_ps( blocks_1 );
  1806. // We absolutely shouldn't touch the floats marked with `xx`: they contain some
  1807. // random data, which might very well underflow. At least on Intel, this leads
  1808. // to a huge penalty that can't be ignored (easily 100x or more) unless you
  1809. // compile your code with something like `-ffast-math` to enable FTZ/DAZ flags.
  1810. // (and ggml can't assume that you do)...
  1811. const __mmask16 scale_mul_mask = 0x21;
  1812. #ifdef __clang__
  1813. // ...however, clang decides to optimize the multiplication mask away:
  1814. // https://godbolt.org/z/P8PqdsfvW
  1815. // gcc and MSVC do the sane thing. This horrible workaround forces clang to emit the mask.
  1816. __m512i scales;
  1817. __asm__(
  1818. "vmulps %1, %2, %0%{%3%}"
  1819. : "=v" ( scales )
  1820. : "vm" ( blocks_0_float ), "v" ( blocks_1_float ), "Yk" ( scale_mul_mask )
  1821. );
  1822. #else
  1823. const __m512 scales = _mm512_maskz_mul_ps( scale_mul_mask, blocks_0_float, blocks_1_float );
  1824. #endif
  1825. const __m512i scale_perm = _mm512_set_epi32(
  1826. 5, 5, 5, 5, 5, 5, 5, 5,
  1827. 0, 0, 0, 0, 0, 0, 0, 0
  1828. );
  1829. const __m512 permuted_scales = _mm512_permutexvar_ps( scale_perm, scales );
  1830. // After VMULPS and VPERMPS, `permuted_scales` looks like this:
  1831. // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
  1832. // | 15 | 14 | 13 | 12 | 11 | 10 | 09 | 08 | 07 | 06 | 05 | 04 | 03 | 02 | 01 | 00 |
  1833. // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
  1834. // | B*D| B*D| B*D| B*D| B*D| B*D| B*D| B*D| A*C| A*C| A*C| A*C| A*C| A*C| A*C| A*C|
  1835. // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
  1836. const __m512i bytes_0 = bytes_from_q4_0_twoblocks_avx512( blocks_0 );
  1837. const __m512i bytes_1 = bytes_from_q4_0_twoblocks_avx512( blocks_1 );
  1838. // Now we want to compute dot products of 4-element byte vectors and store them in
  1839. // 32-bit integers. That is (only one 4-element vector is shown for clarity):
  1840. // +----+----+----+----+
  1841. // ... | 03 | 02 | 01 | 00 |
  1842. // +----+----+----+----+
  1843. // bytes_0
  1844. // +----+----+----+----+
  1845. // ... | D | C | B | A |
  1846. // +----+----+----+----+
  1847. // bytes_1
  1848. // +----+----+----+----+
  1849. // ... | H | G | F | E |
  1850. // +----+----+----+----+
  1851. // final_res_int
  1852. // +----+----+----+----+
  1853. // ... | A*E+B*F+C*G+D*H |
  1854. // +----+----+----+----+
  1855. const __m512i plus_8 = _mm512_set1_epi8( 8 );
  1856. const __m512i bytes_1_minus_8 = _mm512_sub_epi8( bytes_1, plus_8 );
  1857. #ifdef __AVX512VNNI__
  1858. // We have VPDPBUSDS in AVX512-VNNI, which does exactly what we want, but with a catch:
  1859. // the *left* operand is supposed to be unsigned, while Q4_0 quantization subtracts 8
  1860. // from each nibble, so they can be negative. So, instead of `(bytes_0 - 8) * (bytes_1 - 8)`,
  1861. // we compute `bytes_0 * (bytes_1 - 8) + bytes_1 * (-8) + 64`. VPDPBUSDS uses an accumulator,
  1862. // which means we only need 2 instructions.
  1863. const __m512i dot_init = _mm512_set1_epi32( 4 * 64 );
  1864. const __m512i minus_8 = _mm512_set1_epi8( -8 );
  1865. const __m512i prod_0 = _mm512_dpbusds_epi32( dot_init, bytes_1, minus_8 );
  1866. const __m512i final_res_int = _mm512_dpbusds_epi32( prod_0, bytes_0, bytes_1_minus_8 );
  1867. #else
  1868. // As a fallback, we have VPMADDUBSW in AVX512-BW, which uses 16-bit products instead of 32-bit ones.
  1869. // It has the same catch as VPDPBUSDS: the left operand should be unsigned.
  1870. // This is essentially the AVX-512 version of the AVX-2 trick used by GH user Const-me
  1871. // ref: https://gist.github.com/Const-me/4d30e1fc767ab314596e16e90f53b6f4#file-matmultest-cpp-L119
  1872. const __m512i one = _mm512_set1_epi16( 1 );
  1873. const __m512i prod_0 = _mm512_maddubs_epi16( bytes_0, bytes_1_minus_8 );
  1874. const __m512i prod_1 = _mm512_maddubs_epi16( plus_8, bytes_1_minus_8 );
  1875. const __m512i diff = _mm512_sub_epi16( prod_0, prod_1 );
  1876. const __m512i final_res_int = _mm512_madd_epi16( diff, one );
  1877. #endif
  1878. // Finally, we multiply the permuted scales and the 32-bit dot products, then accumulate.
  1879. const __m512 final_res_float = _mm512_cvtepi32_ps( final_res_int );
  1880. return _mm512_fmadd_ps( permuted_scales, final_res_float, acc );
  1881. }
  1882. #endif
  1883. inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
  1884. ggml_float sumf = 0.0;
  1885. #if defined(GGML_SIMD)
  1886. const int np = (n & ~(GGML_F16_STEP - 1));
  1887. GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
  1888. GGML_F16_VEC ax[GGML_F16_ARR];
  1889. GGML_F16_VEC ay[GGML_F16_ARR];
  1890. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1891. for (int j = 0; j < GGML_F16_ARR; j++) {
  1892. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  1893. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1894. sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
  1895. }
  1896. }
  1897. // reduce sum0..sum3 to sum0
  1898. GGML_F16_VEC_REDUCE(sumf, sum);
  1899. // leftovers
  1900. for (int i = np; i < n; ++i) {
  1901. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1902. }
  1903. #else
  1904. for (int i = 0; i < n; ++i) {
  1905. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1906. }
  1907. #endif
  1908. *s = sumf;
  1909. }
  1910. static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  1911. const int nb = n / QK4_0;
  1912. assert(n % QK4_0 == 0);
  1913. assert(nb % 2 == 0);
  1914. const block_q4_0 * restrict x = vx;
  1915. const block_q4_0 * restrict y = vy;
  1916. float sumf = 0.0;
  1917. #if defined(__ARM_NEON)
  1918. float sum0 = 0.0f;
  1919. float sum1 = 0.0f;
  1920. for (int i = 0; i < nb; i += 2) {
  1921. const block_q4_0 * restrict x0 = &x[i + 0];
  1922. const block_q4_0 * restrict y0 = &y[i + 0];
  1923. const block_q4_0 * restrict x1 = &x[i + 1];
  1924. const block_q4_0 * restrict y1 = &y[i + 1];
  1925. const uint8x16_t m4b = vdupq_n_u8(0xf);
  1926. const int8x16_t s8b = vdupq_n_s8(0x8);
  1927. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  1928. const uint8x16_t v1_0 = vld1q_u8(y0->qs);
  1929. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  1930. const uint8x16_t v1_1 = vld1q_u8(y1->qs);
  1931. // 4-bit -> 8-bit
  1932. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8(v0_0, m4b));
  1933. const int8x16_t v1_0l = vreinterpretq_s8_u8(vandq_u8(v1_0, m4b));
  1934. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  1935. const int8x16_t v1_0h = vreinterpretq_s8_u8(vshrq_n_u8(v1_0, 4));
  1936. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8(v0_1, m4b));
  1937. const int8x16_t v1_1l = vreinterpretq_s8_u8(vandq_u8(v1_1, m4b));
  1938. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  1939. const int8x16_t v1_1h = vreinterpretq_s8_u8(vshrq_n_u8(v1_1, 4));
  1940. // sub 8
  1941. const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
  1942. const int8x16_t v1_0ls = vsubq_s8(v1_0l, s8b);
  1943. const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
  1944. const int8x16_t v1_0hs = vsubq_s8(v1_0h, s8b);
  1945. const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
  1946. const int8x16_t v1_1ls = vsubq_s8(v1_1l, s8b);
  1947. const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
  1948. const int8x16_t v1_1hs = vsubq_s8(v1_1h, s8b);
  1949. #if defined(__ARM_FEATURE_DOTPROD)
  1950. // dot product into int32x4_t
  1951. int32x4_t p_0 = vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0ls);
  1952. int32x4_t p_1 = vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1ls);
  1953. p_0 = vdotq_s32(p_0, v0_0hs, v1_0hs);
  1954. p_1 = vdotq_s32(p_1, v0_1hs, v1_1hs);
  1955. sum0 += x0->d*y0->d*vaddvq_s32(p_0);
  1956. sum1 += x1->d*y1->d*vaddvq_s32(p_1);
  1957. #else
  1958. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls));
  1959. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0ls));
  1960. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0hs));
  1961. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0hs));
  1962. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1ls));
  1963. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1ls));
  1964. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1hs));
  1965. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1hs));
  1966. const int16x8_t pl_0 = vaddq_s16(pl0l, pl0h);
  1967. const int16x8_t ph_0 = vaddq_s16(ph0l, ph0h);
  1968. const int16x8_t pl_1 = vaddq_s16(pl1l, pl1h);
  1969. const int16x8_t ph_1 = vaddq_s16(ph1l, ph1h);
  1970. const int16x8_t p_0 = vaddq_s16(pl_0, ph_0);
  1971. const int16x8_t p_1 = vaddq_s16(pl_1, ph_1);
  1972. sum0 += x0->d*y0->d*vaddvq_s16(p_0);
  1973. sum1 += x1->d*y1->d*vaddvq_s16(p_1);
  1974. #endif
  1975. }
  1976. sumf = sum0 + sum1;
  1977. #elif defined(__AVX512F__)
  1978. // Initialize accumulator with zeros
  1979. __m512 acc0 = _mm512_setzero_ps();
  1980. __m512 acc1 = _mm512_setzero_ps();
  1981. const int superblock_size = 16;
  1982. const int superblock_count = nb / superblock_size;
  1983. for (int superblock_ix = 0; superblock_ix < superblock_count; superblock_ix += 1) {
  1984. int i = superblock_ix * superblock_size;
  1985. acc0 = dot_q4_0_twoblocks_avx512( acc0, x, y, i+0 );
  1986. acc1 = dot_q4_0_twoblocks_avx512( acc1, x, y, i+2 );
  1987. acc0 = dot_q4_0_twoblocks_avx512( acc0, x, y, i+4 );
  1988. acc1 = dot_q4_0_twoblocks_avx512( acc1, x, y, i+6 );
  1989. acc0 = dot_q4_0_twoblocks_avx512( acc0, x, y, i+8 );
  1990. acc1 = dot_q4_0_twoblocks_avx512( acc1, x, y, i+10 );
  1991. acc0 = dot_q4_0_twoblocks_avx512( acc0, x, y, i+12 );
  1992. acc1 = dot_q4_0_twoblocks_avx512( acc1, x, y, i+14 );
  1993. }
  1994. // Remainders
  1995. for (int i = superblock_count * superblock_size; i < nb; i += 2) {
  1996. acc0 = dot_q4_0_twoblocks_avx512( acc0, x, y, i );
  1997. }
  1998. // Horizontal sum of all lanes of the accumulator
  1999. sumf = _mm512_reduce_add_ps( acc0 ) + _mm512_reduce_add_ps( acc1 );
  2000. #elif defined(__AVX2__)
  2001. // Initialize accumulator with zeros
  2002. __m256 acc = _mm256_setzero_ps();
  2003. /* Prepare the constants we will need during execution */
  2004. const __m256i lowMask = _mm256_set1_epi8( 0xF );
  2005. const __m256i offset_8 = _mm256_set1_epi16( 8 );
  2006. #define UNROLL_COUNT 8
  2007. // make sure we only unroll multiples of the block count
  2008. assert(nb % UNROLL_COUNT == 0);
  2009. // Main loop
  2010. for (int i = 0; i < nb; i+=UNROLL_COUNT) {
  2011. // This loop will be unrolled by the compiler
  2012. for (int u=0;u<UNROLL_COUNT;u++) {
  2013. /* Compute combined scale for the block */
  2014. const __m256 scale = _mm256_mul_ps(
  2015. _mm256_broadcast_ss( &x[i+u].d ),
  2016. _mm256_broadcast_ss( &y[i+u].d ) );
  2017. /* get input from x
  2018. Input: 32 Nibbles (16 bytes) at *x[i+u]
  2019. Output: 2 vectors with 16 values of type int16_t (x_high_q, x_low_q) */
  2020. /* Load 16 bytes from memory */
  2021. const __m128i tmp_x = _mm_loadu_si128( ( const __m128i* ) x[i+u].qs);
  2022. /* Expand bytes into uint16_t values */
  2023. const __m256i bytes_x = _mm256_cvtepu8_epi16(tmp_x);
  2024. /* Unpack values into individual bytes */
  2025. __m256i x_low_q = _mm256_and_si256( lowMask, bytes_x );
  2026. const __m256i pre_shift_x_high_q = _mm256_andnot_si256( lowMask, bytes_x );
  2027. __m256i x_high_q = _mm256_srli_epi16( pre_shift_x_high_q, 4 );
  2028. /* Now we have two vectors with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. */
  2029. x_high_q = _mm256_sub_epi16( x_high_q, offset_8 );
  2030. x_low_q = _mm256_sub_epi16( x_low_q, offset_8 );
  2031. /* get input from y
  2032. Input: 32 Nibbles (16 bytes) at *y[i+u]
  2033. Output: 2 vectors with 16 values of type int16_t (y_high_q, y_low_q) */
  2034. /* Load 16 bytes from memory */
  2035. const __m128i tmp_y = _mm_loadu_si128( (const __m128i* ) y[i+u].qs);
  2036. /* Expand bytes into uint16_t values */
  2037. const __m256i bytes_y = _mm256_cvtepu8_epi16(tmp_y);
  2038. /* Unpack values into individual bytes */
  2039. const __m256i pre_shift_y_high_q = _mm256_andnot_si256( lowMask, bytes_y );
  2040. __m256i y_high_q = _mm256_srli_epi16( pre_shift_y_high_q, 4 );
  2041. __m256i y_low_q = _mm256_and_si256( lowMask, bytes_y );
  2042. /* Now we have two vectors with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. */
  2043. y_high_q = _mm256_sub_epi16( y_high_q, offset_8 );
  2044. y_low_q = _mm256_sub_epi16( y_low_q, offset_8 );
  2045. /* Compute products of int16_t integers, add pairwise, store as int32_t */
  2046. __m256i xy_high_q = _mm256_madd_epi16( x_high_q, y_high_q );
  2047. __m256i xy_low_q = _mm256_madd_epi16( x_low_q, y_low_q );
  2048. /* Accumulate the products of int32_t integers -> we now have a vector of 8 int_32t */
  2049. __m256i xy_q = _mm256_add_epi32( xy_high_q, xy_low_q );
  2050. /* Convert to vectore of 8 int32_t to 8 floats */
  2051. __m256 q = _mm256_cvtepi32_ps( xy_q );
  2052. /* Multiply q with scale and accumulate */
  2053. acc = _mm256_fmadd_ps( scale, q, acc );
  2054. }
  2055. }
  2056. // Return horizontal sum of the acc vector
  2057. __m128 res = _mm256_extractf128_ps( acc, 1 );
  2058. res = _mm_add_ps( res, _mm256_castps256_ps128( acc ) );
  2059. res = _mm_add_ps( res, _mm_movehl_ps( res, res ) );
  2060. res = _mm_add_ss( res, _mm_movehdup_ps( res ) );
  2061. sumf = _mm_cvtss_f32( res );
  2062. #elif defined(__AVX__)
  2063. // Initialize accumulator with zeros
  2064. __m256 acc = _mm256_setzero_ps();
  2065. // Main loop
  2066. for (int i = 0; i < nb; ++i) {
  2067. // Compute combined scale for the block
  2068. const __m256 d = _mm256_mul_ps( _mm256_broadcast_ss( &x[i].d ), _mm256_broadcast_ss( &y[i].d ) );
  2069. __m128i i32[2];
  2070. for (int j = 0; j < 2; ++j) {
  2071. // Load 8 bytes, and unpack 4 bit fields into bytes, making 16 bytes
  2072. __m128i bx = bytesFromNibbles( x[i].qs + 8*j );
  2073. __m128i by = bytesFromNibbles( y[i].qs + 8*j );
  2074. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  2075. const __m128i off = _mm_set1_epi8( 8 );
  2076. bx = _mm_sub_epi8( bx, off );
  2077. by = _mm_sub_epi8( by, off );
  2078. // Get absolute values of x vectors
  2079. const __m128i ax = _mm_sign_epi8(bx, bx);
  2080. // Sign the values of the y vectors
  2081. const __m128i sy = _mm_sign_epi8(by, bx);
  2082. // Perform multiplication and create 16-bit values
  2083. const __m128i dot = _mm_maddubs_epi16(ax, sy);
  2084. const __m128i ones = _mm_set1_epi16(1);
  2085. i32[j] = _mm_madd_epi16(ones, dot);
  2086. }
  2087. // Convert int32_t to float
  2088. __m256 p = _mm256_cvtepi32_ps( _mm256_set_m128i( i32[0], i32[1] ));
  2089. // Apply the scale, and accumulate
  2090. acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
  2091. }
  2092. // Return horizontal sum of the acc vector
  2093. __m128 res = _mm256_extractf128_ps( acc, 1 );
  2094. res = _mm_add_ps( res, _mm256_castps256_ps128( acc ) );
  2095. res = _mm_add_ps( res, _mm_movehl_ps( res, res ) );
  2096. res = _mm_add_ss( res, _mm_movehdup_ps( res ) );
  2097. sumf = _mm_cvtss_f32( res );
  2098. #elif defined(__wasm_simd128__)
  2099. // wasm simd
  2100. float sum0 = 0.0f;
  2101. float sum1 = 0.0f;
  2102. for (int i = 0; i < nb; i += 2) {
  2103. const block_q4_0 * restrict x0 = &x[i + 0];
  2104. const block_q4_0 * restrict y0 = &y[i + 0];
  2105. const block_q4_0 * restrict x1 = &x[i + 1];
  2106. const block_q4_0 * restrict y1 = &y[i + 1];
  2107. const v128_t m4b = wasm_u8x16_splat(0xf);
  2108. const v128_t s8b = wasm_i8x16_splat(0x8);
  2109. const v128_t v0_0 = wasm_v128_load(x0->qs);
  2110. const v128_t v0_1 = wasm_v128_load(y0->qs);
  2111. const v128_t v1_0 = wasm_v128_load(x1->qs);
  2112. const v128_t v1_1 = wasm_v128_load(y1->qs);
  2113. // 4-bit -> 8-bit
  2114. const v128_t v0_0l = wasm_v128_and(v0_0, m4b);
  2115. const v128_t v1_0l = wasm_v128_and(v1_0, m4b);
  2116. const v128_t v0_0h = wasm_u8x16_shr(v0_0, 4);
  2117. const v128_t v1_0h = wasm_u8x16_shr(v1_0, 4);
  2118. const v128_t v0_1l = wasm_v128_and(v0_1, m4b);
  2119. const v128_t v1_1l = wasm_v128_and(v1_1, m4b);
  2120. const v128_t v0_1h = wasm_u8x16_shr(v0_1, 4);
  2121. const v128_t v1_1h = wasm_u8x16_shr(v1_1, 4);
  2122. // sub 8
  2123. const v128_t v0_0ls = wasm_i8x16_sub(v0_0l, s8b);
  2124. const v128_t v1_0ls = wasm_i8x16_sub(v1_0l, s8b);
  2125. const v128_t v0_0hs = wasm_i8x16_sub(v0_0h, s8b);
  2126. const v128_t v1_0hs = wasm_i8x16_sub(v1_0h, s8b);
  2127. const v128_t v0_1ls = wasm_i8x16_sub(v0_1l, s8b);
  2128. const v128_t v1_1ls = wasm_i8x16_sub(v1_1l, s8b);
  2129. const v128_t v0_1hs = wasm_i8x16_sub(v0_1h, s8b);
  2130. const v128_t v1_1hs = wasm_i8x16_sub(v1_1h, s8b);
  2131. // dot product into int16x8_t
  2132. const v128_t pl0l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_0ls), wasm_i16x8_extend_low_i8x16(v1_0ls));
  2133. const v128_t pl0h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_0ls), wasm_i16x8_extend_high_i8x16(v1_0ls));
  2134. const v128_t ph0l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_0hs), wasm_i16x8_extend_low_i8x16(v1_0hs));
  2135. const v128_t ph0h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_0hs), wasm_i16x8_extend_high_i8x16(v1_0hs));
  2136. const v128_t pl1l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_1ls), wasm_i16x8_extend_low_i8x16(v1_1ls));
  2137. const v128_t pl1h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_1ls), wasm_i16x8_extend_high_i8x16(v1_1ls));
  2138. const v128_t ph1l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_1hs), wasm_i16x8_extend_low_i8x16(v1_1hs));
  2139. const v128_t ph1h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_1hs), wasm_i16x8_extend_high_i8x16(v1_1hs));
  2140. const v128_t pl_0 = wasm_i16x8_add(pl0l, pl0h);
  2141. const v128_t ph_0 = wasm_i16x8_add(ph0l, ph0h);
  2142. const v128_t pl_1 = wasm_i16x8_add(pl1l, pl1h);
  2143. const v128_t ph_1 = wasm_i16x8_add(ph1l, ph1h);
  2144. const v128_t p_0 = wasm_i16x8_add(pl_0, ph_0);
  2145. const v128_t p_1 = wasm_i16x8_add(pl_1, ph_1);
  2146. sum0 += x0->d * y0->d * (
  2147. wasm_i16x8_extract_lane(p_0, 0) + wasm_i16x8_extract_lane(p_0, 1) +
  2148. wasm_i16x8_extract_lane(p_0, 2) + wasm_i16x8_extract_lane(p_0, 3) +
  2149. wasm_i16x8_extract_lane(p_0, 4) + wasm_i16x8_extract_lane(p_0, 5) +
  2150. wasm_i16x8_extract_lane(p_0, 6) + wasm_i16x8_extract_lane(p_0, 7));
  2151. sum1 += x1->d * y1->d * (
  2152. wasm_i16x8_extract_lane(p_1, 0) + wasm_i16x8_extract_lane(p_1, 1) +
  2153. wasm_i16x8_extract_lane(p_1, 2) + wasm_i16x8_extract_lane(p_1, 3) +
  2154. wasm_i16x8_extract_lane(p_1, 4) + wasm_i16x8_extract_lane(p_1, 5) +
  2155. wasm_i16x8_extract_lane(p_1, 6) + wasm_i16x8_extract_lane(p_1, 7));
  2156. }
  2157. sumf = sum0 + sum1;
  2158. #else
  2159. // scalar
  2160. for (int i = 0; i < nb; i++) {
  2161. const float d0 = x[i].d;
  2162. const float d1 = y[i].d;
  2163. const uint8_t * restrict p0 = x[i].qs;
  2164. const uint8_t * restrict p1 = y[i].qs;
  2165. int sumi = 0;
  2166. for (int j = 0; j < QK4_0/2; j++) {
  2167. const uint8_t v0 = p0[j];
  2168. const uint8_t v1 = p1[j];
  2169. const int i0 = (v0 & 0xf) - 8;
  2170. const int i1 = (v0 >> 4) - 8;
  2171. const int i2 = (v1 & 0xf) - 8;
  2172. const int i3 = (v1 >> 4) - 8;
  2173. sumi += i0*i2 + i1*i3;
  2174. }
  2175. sumf += d0 * d1 * sumi;
  2176. }
  2177. #endif
  2178. *s = sumf;
  2179. }
  2180. static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2181. const int nb = n / QK4_1;
  2182. const block_q4_1 * restrict x = vx;
  2183. const block_q4_1 * restrict y = vy;
  2184. float sumf = 0.0;
  2185. #if defined(__AVX2__)
  2186. // Initialize accumulator with zeros
  2187. __m256 acc = _mm256_setzero_ps();
  2188. // Accumulator for constant offsets
  2189. float acc_offset = 0.0f;
  2190. // Main loop
  2191. for (int i = 0; i < nb; ++i) {
  2192. const float * d0 = &x[i].d;
  2193. const float * d1 = &y[i].d;
  2194. const float * m0 = &x[i].m;
  2195. const float * m1 = &y[i].m;
  2196. const __m256 d0v = _mm256_broadcast_ss( d0 );
  2197. const __m256 d1v = _mm256_broadcast_ss( d1 );
  2198. const __m256 m0v = _mm256_broadcast_ss( m0 );
  2199. const __m256 m1v = _mm256_broadcast_ss( m1 );
  2200. // Compute combined scale for the block
  2201. const __m256 scale_01 = _mm256_mul_ps( d0v, d1v );
  2202. // Compute cross scales for the block
  2203. const __m256 scale_0 = _mm256_mul_ps( d0v, m1v );
  2204. const __m256 scale_1 = _mm256_mul_ps( m0v, d1v );
  2205. const __m256 cross_scales = _mm256_blend_ps( scale_0, scale_1, 0xAA /* 0b10101010 */ );
  2206. // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
  2207. __m256i bx = bytesFromNibbles( x[i].qs );
  2208. __m256i by = bytesFromNibbles( y[i].qs );
  2209. // Now we have a vector with bytes in [ 0 .. 15 ] interval.
  2210. // Sign-extend first 16 signed bytes into int16_t
  2211. __m256i x16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( bx ) );
  2212. __m256i y16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( by ) );
  2213. // Compute products of int16_t integers, add pairwise
  2214. __m256i i32 = _mm256_madd_epi16( x16, y16 );
  2215. // Sign-extend last 16 signed bytes into int16_t vectors
  2216. __m256i x16_h = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( bx, 1 ) );
  2217. __m256i y16_h = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( by, 1 ) );
  2218. // Accumulate products of int16_t integers
  2219. i32 = _mm256_add_epi32( i32, _mm256_madd_epi16( x16_h, y16_h ) );
  2220. // compute sums of unsigned bytes in bx, by in blocks of 8.
  2221. // This results in a layout like X100 0000 X200 0000 X300 0000 X400 0000,
  2222. // which we then interleave as X100 Y100 X200 Y200 X300 Y300 X400 Y400.
  2223. // so if we then cast to 8 singles, we get 8 floats like [ x0_7, y0_7, x8_15, y8_15, x16_23, y16_23, x24_31, y24_31 ]
  2224. __m256i xsumi = _mm256_sad_epu8( bx, _mm256_setzero_si256() );
  2225. __m256i ysumi = _mm256_sad_epu8( by, _mm256_setzero_si256() );
  2226. __m256i sumsi = _mm256_or_si256( xsumi, _mm256_slli_si256( ysumi, 4 ) );
  2227. __m256 sums = _mm256_cvtepi32_ps( sumsi );
  2228. // Convert int32_t to float
  2229. __m256 p = _mm256_cvtepi32_ps( i32 );
  2230. // Apply the scale, and accumulate
  2231. // acc += d0*d1*x*y + d0*m1*x + d1*m0*y
  2232. acc = _mm256_fmadd_ps( scale_01, p, acc );
  2233. acc = _mm256_fmadd_ps( cross_scales, sums, acc );
  2234. // acc_offset += m0*m1 (for each entry in the block)
  2235. acc_offset += (*m0)*(*m1);
  2236. }
  2237. // Return horizontal sum of the acc vector
  2238. __m128 res = _mm256_extractf128_ps( acc, 1 );
  2239. res = _mm_add_ps( res, _mm256_castps256_ps128( acc ) );
  2240. res = _mm_add_ps( res, _mm_movehl_ps( res, res ) );
  2241. res = _mm_add_ss( res, _mm_movehdup_ps( res ) );
  2242. sumf = _mm_cvtss_f32( res ) + acc_offset * QK4_1;
  2243. #elif defined(__ARM_NEON)
  2244. float sum00 = 0.0f;
  2245. float sum01 = 0.0f;
  2246. float sum10 = 0.0f;
  2247. float sum11 = 0.0f;
  2248. for (int i = 0; i < nb; i += 2) {
  2249. const block_q4_1 * restrict x0 = &x[i + 0];
  2250. const block_q4_1 * restrict y0 = &y[i + 0];
  2251. const block_q4_1 * restrict x1 = &x[i + 1];
  2252. const block_q4_1 * restrict y1 = &y[i + 1];
  2253. const uint8x16_t m4b = vdupq_n_u8(0xf);
  2254. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2255. const uint8x16_t v1_0 = vld1q_u8(y0->qs);
  2256. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2257. const uint8x16_t v1_1 = vld1q_u8(y1->qs);
  2258. // 4-bit -> 8-bit
  2259. const uint8x16_t v0_0l = vandq_u8(v0_0, m4b);
  2260. const uint8x16_t v1_0l = vandq_u8(v1_0, m4b);
  2261. const uint8x16_t v0_0h = vshrq_n_u8(v0_0, 4);
  2262. const uint8x16_t v1_0h = vshrq_n_u8(v1_0, 4);
  2263. const uint8x16_t v0_1l = vandq_u8(v0_1, m4b);
  2264. const uint8x16_t v1_1l = vandq_u8(v1_1, m4b);
  2265. const uint8x16_t v0_1h = vshrq_n_u8(v0_1, 4);
  2266. const uint8x16_t v1_1h = vshrq_n_u8(v1_1, 4);
  2267. sum00 += x0->m*y0->m;
  2268. sum01 += y0->m*x0->d*((uint16_t)vaddvq_u8(v0_0l) + (uint16_t)vaddvq_u8(v0_0h));
  2269. sum10 += x0->m*y0->d*((uint16_t)vaddvq_u8(v1_0l) + (uint16_t)vaddvq_u8(v1_0h));
  2270. sum00 += x1->m*y1->m;
  2271. sum01 += y1->m*x1->d*((uint16_t)vaddvq_u8(v0_1l) + (uint16_t)vaddvq_u8(v0_1h));
  2272. sum10 += x1->m*y1->d*((uint16_t)vaddvq_u8(v1_1l) + (uint16_t)vaddvq_u8(v1_1h));
  2273. #if defined(__ARM_FEATURE_DOTPROD)
  2274. // dot product into int32x4_t
  2275. uint32x4_t p_0 = vdotq_u32(vdupq_n_u32(0), v0_0l, v1_0l);
  2276. uint32x4_t p_1 = vdotq_u32(vdupq_n_u32(0), v0_1l, v1_1l);
  2277. p_0 = vdotq_u32(p_0, v0_0h, v1_0h);
  2278. p_1 = vdotq_u32(p_1, v0_1h, v1_1h);
  2279. sum11 += x0->d*y0->d*vaddvq_u32(p_0);
  2280. sum11 += x1->d*y1->d*vaddvq_u32(p_1);
  2281. #else
  2282. const uint16x8_t pl0l = vmull_u8(vget_low_u8 (v0_0l), vget_low_u8 (v1_0l));
  2283. const uint16x8_t pl0h = vmull_u8(vget_high_u8(v0_0l), vget_high_u8(v1_0l));
  2284. const uint16x8_t ph0l = vmull_u8(vget_low_u8 (v0_0h), vget_low_u8 (v1_0h));
  2285. const uint16x8_t ph0h = vmull_u8(vget_high_u8(v0_0h), vget_high_u8(v1_0h));
  2286. const uint16x8_t pl1l = vmull_u8(vget_low_u8 (v0_1l), vget_low_u8 (v1_1l));
  2287. const uint16x8_t pl1h = vmull_u8(vget_high_u8(v0_1l), vget_high_u8(v1_1l));
  2288. const uint16x8_t ph1l = vmull_u8(vget_low_u8 (v0_1h), vget_low_u8 (v1_1h));
  2289. const uint16x8_t ph1h = vmull_u8(vget_high_u8(v0_1h), vget_high_u8(v1_1h));
  2290. const uint16x8_t pl_0 = vaddq_u16(pl0l, pl0h);
  2291. const uint16x8_t ph_0 = vaddq_u16(ph0l, ph0h);
  2292. const uint16x8_t pl_1 = vaddq_u16(pl1l, pl1h);
  2293. const uint16x8_t ph_1 = vaddq_u16(ph1l, ph1h);
  2294. const uint16x8_t p_0 = vaddq_u16(pl_0, ph_0);
  2295. const uint16x8_t p_1 = vaddq_u16(pl_1, ph_1);
  2296. sum11 += x0->d*y0->d*vaddvq_u16(p_0);
  2297. sum11 += x1->d*y1->d*vaddvq_u16(p_1);
  2298. #endif
  2299. }
  2300. sumf = QK4_1*sum00 + sum01 + sum10 + sum11;
  2301. #else
  2302. // scalar
  2303. for (int i = 0; i < nb; i++) {
  2304. const float d0 = x[i].d;
  2305. const float d1 = y[i].d;
  2306. const float m0 = x[i].m;
  2307. const float m1 = y[i].m;
  2308. const uint8_t * restrict p0 = x[i].qs;
  2309. const uint8_t * restrict p1 = y[i].qs;
  2310. for (int j = 0; j < QK4_1/2; j++) {
  2311. const uint8_t v0 = p0[j];
  2312. const uint8_t v1 = p1[j];
  2313. const float f0 = d0*(v0 & 0xf) + m0;
  2314. const float f1 = d0*(v0 >> 4) + m0;
  2315. const float f2 = d1*(v1 & 0xf) + m1;
  2316. const float f3 = d1*(v1 >> 4) + m1;
  2317. sumf += f0*f2 + f1*f3;
  2318. }
  2319. }
  2320. #endif
  2321. *s = sumf;
  2322. }
  2323. static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2324. const int nb = n / QK8_0;
  2325. assert(n % QK8_0 == 0);
  2326. assert(nb % 2 == 0);
  2327. const block_q4_0 * restrict x = vx;
  2328. const block_q8_0 * restrict y = vy;
  2329. float sumf = 0.0;
  2330. #if defined(__ARM_NEON)
  2331. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2332. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2333. for (int i = 0; i < nb; i += 2) {
  2334. const block_q4_0 * restrict x0 = &x[i + 0];
  2335. const block_q4_0 * restrict x1 = &x[i + 1];
  2336. const block_q8_0 * restrict y0 = &y[i + 0];
  2337. const block_q8_0 * restrict y1 = &y[i + 1];
  2338. const uint8x16_t m4b = vdupq_n_u8(0xf);
  2339. const int8x16_t s8b = vdupq_n_s8(0x8);
  2340. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2341. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2342. // 4-bit -> 8-bit
  2343. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2344. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2345. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2346. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2347. // sub 8
  2348. const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
  2349. const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
  2350. const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
  2351. const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
  2352. // load y
  2353. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2354. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2355. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2356. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2357. // interleave
  2358. const int8x16_t v1_0ls = vuzp1q_s8(v1_0l, v1_0h);
  2359. const int8x16_t v1_0hs = vuzp2q_s8(v1_0l, v1_0h);
  2360. const int8x16_t v1_1ls = vuzp1q_s8(v1_1l, v1_1h);
  2361. const int8x16_t v1_1hs = vuzp2q_s8(v1_1l, v1_1h);
  2362. #if defined(__ARM_FEATURE_DOTPROD)
  2363. // dot product into int32x4_t
  2364. const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0ls), v0_0hs, v1_0hs);
  2365. const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1ls), v0_1hs, v1_1hs);
  2366. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), x0->d*y0->d);
  2367. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), x1->d*y1->d);
  2368. #else
  2369. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls));
  2370. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0ls));
  2371. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0hs));
  2372. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0hs));
  2373. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1ls));
  2374. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1ls));
  2375. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1hs));
  2376. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1hs));
  2377. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2378. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2379. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2380. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2381. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), x0->d*y0->d);
  2382. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), x1->d*y1->d);
  2383. #endif
  2384. }
  2385. sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2386. #elif defined(__AVX2__)
  2387. // Initialize accumulator with zeros
  2388. __m256 acc = _mm256_setzero_ps();
  2389. // Main loop
  2390. for (int i = 0; i < nb; ++i) {
  2391. /* Compute combined scale for the block */
  2392. const __m256 d = _mm256_mul_ps( _mm256_broadcast_ss( &x[i].d ), _mm256_broadcast_ss( &y[i].d ) );
  2393. __m256i bx = bytesFromNibbles(x[i].qs);
  2394. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  2395. const __m256i off = _mm256_set1_epi8( 8 );
  2396. bx = _mm256_sub_epi8( bx, off );
  2397. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2398. // Get absolute values of x vectors
  2399. const __m256i ax = _mm256_sign_epi8(bx, bx);
  2400. // Sign the values of the y vectors
  2401. const __m256i sy = _mm256_sign_epi8(by, bx);
  2402. // Perform multiplication and create 16-bit values
  2403. const __m256i dot = _mm256_maddubs_epi16(ax, sy);
  2404. const __m256i ones = _mm256_set1_epi16(1);
  2405. __m256i xy_q = _mm256_madd_epi16(ones, dot);
  2406. /* Convert to vectore of 8 int32_t to 8 floats */
  2407. __m256 q = _mm256_cvtepi32_ps( xy_q );
  2408. /* Multiply q with scale and accumulate */
  2409. acc = _mm256_fmadd_ps( d, q, acc );
  2410. }
  2411. // Return horizontal sum of the acc vector
  2412. __m128 res = _mm256_extractf128_ps( acc, 1 );
  2413. res = _mm_add_ps( res, _mm256_castps256_ps128( acc ) );
  2414. res = _mm_add_ps( res, _mm_movehl_ps( res, res ) );
  2415. res = _mm_add_ss( res, _mm_movehdup_ps( res ) );
  2416. sumf = _mm_cvtss_f32( res );
  2417. #elif defined(__AVX__)
  2418. // Initialize accumulator with zeros
  2419. __m256 acc = _mm256_setzero_ps();
  2420. // Main loop
  2421. for (int i = 0; i < nb; ++i) {
  2422. // Compute combined scale for the block
  2423. const __m256 d = _mm256_mul_ps( _mm256_broadcast_ss( &x[i].d ), _mm256_broadcast_ss( &y[i].d ) );
  2424. __m128i i32[2];
  2425. for (int j = 0; j < 2; ++j) {
  2426. // Load 8 bytes, and unpack 4 bit fields into bytes, making 16 bytes
  2427. __m128i bx = bytesFromNibbles( x[i].qs + 8*j );
  2428. __m128i by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16*j));
  2429. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  2430. const __m128i off = _mm_set1_epi8( 8 );
  2431. bx = _mm_sub_epi8( bx, off );
  2432. // Get absolute values of x vectors
  2433. const __m128i ax = _mm_sign_epi8(bx, bx);
  2434. // Sign the values of the y vectors
  2435. const __m128i sy = _mm_sign_epi8(by, bx);
  2436. // Perform multiplication and create 16-bit values
  2437. const __m128i dot = _mm_maddubs_epi16(ax, sy);
  2438. const __m128i ones = _mm_set1_epi16(1);
  2439. i32[j] = _mm_madd_epi16(ones, dot);
  2440. }
  2441. // Convert int32_t to float
  2442. __m256 p = _mm256_cvtepi32_ps( _mm256_set_m128i( i32[0], i32[1] ));
  2443. // Apply the scale, and accumulate
  2444. acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
  2445. }
  2446. // Return horizontal sum of the acc vector
  2447. __m128 res = _mm256_extractf128_ps( acc, 1 );
  2448. res = _mm_add_ps( res, _mm256_castps256_ps128( acc ) );
  2449. res = _mm_add_ps( res, _mm_movehl_ps( res, res ) );
  2450. res = _mm_add_ss( res, _mm_movehdup_ps( res ) );
  2451. sumf = _mm_cvtss_f32( res );
  2452. #else
  2453. // scalar
  2454. for (int i = 0; i < nb; i++) {
  2455. const float d0 = x[i].d;
  2456. const float d1 = y[i].d;
  2457. const uint8_t * restrict p0 = x[i].qs;
  2458. const int8_t * restrict p1 = y[i].qs;
  2459. int sumi = 0;
  2460. for (int j = 0; j < QK8_0/2; j++) {
  2461. const uint8_t v0 = p0[j];
  2462. const int i0 = (int8_t) (v0 & 0xf) - 8;
  2463. const int i1 = (int8_t) (v0 >> 4) - 8;
  2464. const int i2 = p1[2*j + 0];
  2465. const int i3 = p1[2*j + 1];
  2466. sumi += i0*i2 + i1*i3;
  2467. }
  2468. sumf += d0*d1*sumi;
  2469. }
  2470. #endif
  2471. *s = sumf;
  2472. }
  2473. static void ggml_vec_dot_q4_2_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2474. const int nb = n / QK8_0;
  2475. assert(n % QK8_0 == 0);
  2476. assert(nb % 2 == 0);
  2477. assert(QK8_0 == 2*QK4_2);
  2478. const block_q4_2 * restrict x = vx;
  2479. const block_q8_0 * restrict y = vy;
  2480. float sumf = 0.0;
  2481. #if defined(__ARM_NEON)
  2482. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2483. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2484. for (int i = 0; i < nb; i += 2) {
  2485. const block_q4_2 * restrict x0_0 = &x[2*(i + 0) + 0];
  2486. const block_q4_2 * restrict x0_1 = &x[2*(i + 0) + 1];
  2487. const block_q4_2 * restrict x1_0 = &x[2*(i + 1) + 0];
  2488. const block_q4_2 * restrict x1_1 = &x[2*(i + 1) + 1];
  2489. const block_q8_0 * restrict y0 = &y[i + 0];
  2490. const block_q8_0 * restrict y1 = &y[i + 1];
  2491. const uint8x16_t m4b = vdupq_n_u8(0xf);
  2492. const int8x16_t s8b = vdupq_n_s8(0x8);
  2493. const uint8x16_t v0_0 = vcombine_u8(vld1_u8(x0_0->qs), vld1_u8(x0_1->qs));
  2494. const uint8x16_t v0_1 = vcombine_u8(vld1_u8(x1_0->qs), vld1_u8(x1_1->qs));
  2495. // 4-bit -> 8-bit
  2496. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2497. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2498. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2499. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2500. // sub 8
  2501. const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
  2502. const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
  2503. const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
  2504. const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
  2505. // interleave
  2506. const int8x16_t v0_0lz = vzip1q_s8(v0_0ls, v0_0hs);
  2507. const int8x16_t v0_0hz = vzip2q_s8(v0_0ls, v0_0hs);
  2508. const int8x16_t v0_1lz = vzip1q_s8(v0_1ls, v0_1hs);
  2509. const int8x16_t v0_1hz = vzip2q_s8(v0_1ls, v0_1hs);
  2510. // load y
  2511. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2512. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2513. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2514. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2515. #if defined(__ARM_FEATURE_DOTPROD)
  2516. sumv0 = vmlaq_n_f32(sumv0, vaddq_f32(
  2517. vmulq_n_f32(vcvtq_f32_s32(vdotq_s32(vdupq_n_s32(0), v0_0lz, v1_0l)), GGML_FP16_TO_FP32(x0_0->d)),
  2518. vmulq_n_f32(vcvtq_f32_s32(vdotq_s32(vdupq_n_s32(0), v0_0hz, v1_0h)), GGML_FP16_TO_FP32(x0_1->d))), y0->d);
  2519. sumv1 = vmlaq_n_f32(sumv1, vaddq_f32(
  2520. vmulq_n_f32(vcvtq_f32_s32(vdotq_s32(vdupq_n_s32(0), v0_1lz, v1_1l)), GGML_FP16_TO_FP32(x1_0->d)),
  2521. vmulq_n_f32(vcvtq_f32_s32(vdotq_s32(vdupq_n_s32(0), v0_1hz, v1_1h)), GGML_FP16_TO_FP32(x1_1->d))), y1->d);
  2522. #else
  2523. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lz), vget_low_s8 (v1_0l));
  2524. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lz), vget_high_s8(v1_0l));
  2525. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hz), vget_low_s8 (v1_0h));
  2526. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hz), vget_high_s8(v1_0h));
  2527. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lz), vget_low_s8 (v1_1l));
  2528. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lz), vget_high_s8(v1_1l));
  2529. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hz), vget_low_s8 (v1_1h));
  2530. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hz), vget_high_s8(v1_1h));
  2531. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2532. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2533. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2534. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2535. sumv0 = vmlaq_n_f32(sumv0, vaddq_f32(
  2536. vmulq_n_f32(vcvtq_f32_s32(pl0), GGML_FP16_TO_FP32(x0_0->d)),
  2537. vmulq_n_f32(vcvtq_f32_s32(ph0), GGML_FP16_TO_FP32(x0_1->d))), y0->d);
  2538. sumv1 = vmlaq_n_f32(sumv1, vaddq_f32(
  2539. vmulq_n_f32(vcvtq_f32_s32(pl1), GGML_FP16_TO_FP32(x1_0->d)),
  2540. vmulq_n_f32(vcvtq_f32_s32(ph1), GGML_FP16_TO_FP32(x1_1->d))), y1->d);
  2541. #endif
  2542. }
  2543. sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2544. #else
  2545. // scalar
  2546. for (int i = 0; i < nb; i++) {
  2547. const uint8_t * restrict x0 = x[2*i + 0].qs;
  2548. const uint8_t * restrict x1 = x[2*i + 1].qs;
  2549. const int8_t * restrict y0 = y[i].qs;
  2550. const float d0 = GGML_FP16_TO_FP32(x[2*i + 0].d);
  2551. const float d1 = GGML_FP16_TO_FP32(x[2*i + 1].d);
  2552. int sumi_0 = 0;
  2553. int sumi_1 = 0;
  2554. for (int j = 0; j < QK8_0/4; j++) {
  2555. const uint8_t v0 = x0[j];
  2556. const uint8_t v1 = x1[j];
  2557. const int i0_0 = (int8_t) (v0 & 0xf) - 8;
  2558. const int i1_0 = (int8_t) (v0 >> 4) - 8;
  2559. const int i0_1 = (int8_t) (v1 & 0xf) - 8;
  2560. const int i1_1 = (int8_t) (v1 >> 4) - 8;
  2561. const int i2_0 = y0[2*j + 0];
  2562. const int i3_0 = y0[2*j + 1];
  2563. const int i2_1 = y0[2*(j + QK8_0/4) + 0];
  2564. const int i3_1 = y0[2*(j + QK8_0/4) + 1];
  2565. sumi_0 += i0_0*i2_0 + i1_0*i3_0;
  2566. sumi_1 += i0_1*i2_1 + i1_1*i3_1;
  2567. }
  2568. sumf += (d0 * y[i].d) * sumi_0;
  2569. sumf += (d1 * y[i].d) * sumi_1;
  2570. }
  2571. #endif
  2572. *s = sumf;
  2573. }
  2574. // compute GGML_VEC_DOT_UNROLL dot products at once
  2575. // xs - x row stride in bytes
  2576. inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
  2577. ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
  2578. ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
  2579. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  2580. x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
  2581. }
  2582. #if defined(GGML_SIMD)
  2583. const int np = (n & ~(GGML_F16_STEP - 1));
  2584. GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
  2585. GGML_F16_VEC ax[GGML_F16_ARR];
  2586. GGML_F16_VEC ay[GGML_F16_ARR];
  2587. for (int i = 0; i < np; i += GGML_F16_STEP) {
  2588. for (int j = 0; j < GGML_F16_ARR; j++) {
  2589. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  2590. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  2591. ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
  2592. sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
  2593. }
  2594. }
  2595. }
  2596. // reduce sum0..sum3 to sum0
  2597. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  2598. GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
  2599. }
  2600. // leftovers
  2601. for (int i = np; i < n; ++i) {
  2602. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  2603. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  2604. }
  2605. }
  2606. #else
  2607. for (int i = 0; i < n; ++i) {
  2608. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  2609. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  2610. }
  2611. }
  2612. #endif
  2613. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  2614. s[i] = sumf[i];
  2615. }
  2616. }
  2617. inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
  2618. #if defined(GGML_SIMD)
  2619. const int np = (n & ~(GGML_F32_STEP - 1));
  2620. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  2621. GGML_F32_VEC ax[GGML_F32_ARR];
  2622. GGML_F32_VEC ay[GGML_F32_ARR];
  2623. for (int i = 0; i < np; i += GGML_F32_STEP) {
  2624. for (int j = 0; j < GGML_F32_ARR; j++) {
  2625. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  2626. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  2627. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
  2628. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  2629. }
  2630. }
  2631. // leftovers
  2632. for (int i = np; i < n; ++i) {
  2633. y[i] += x[i]*v;
  2634. }
  2635. #else
  2636. // scalar
  2637. for (int i = 0; i < n; ++i) {
  2638. y[i] += x[i]*v;
  2639. }
  2640. #endif
  2641. }
  2642. //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
  2643. inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
  2644. #if defined(GGML_SIMD)
  2645. const int np = (n & ~(GGML_F32_STEP - 1));
  2646. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  2647. GGML_F32_VEC ay[GGML_F32_ARR];
  2648. for (int i = 0; i < np; i += GGML_F32_STEP) {
  2649. for (int j = 0; j < GGML_F32_ARR; j++) {
  2650. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  2651. ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
  2652. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  2653. }
  2654. }
  2655. // leftovers
  2656. for (int i = np; i < n; ++i) {
  2657. y[i] *= v;
  2658. }
  2659. #else
  2660. // scalar
  2661. for (int i = 0; i < n; ++i) {
  2662. y[i] *= v;
  2663. }
  2664. #endif
  2665. }
  2666. inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); }
  2667. inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
  2668. inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
  2669. inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
  2670. inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
  2671. inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
  2672. inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
  2673. static const float GELU_COEF_A = 0.044715f;
  2674. static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  2675. inline static float ggml_gelu_f32(float x) {
  2676. return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  2677. }
  2678. inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  2679. const uint16_t * i16 = (const uint16_t *) x;
  2680. for (int i = 0; i < n; ++i) {
  2681. y[i] = table_gelu_f16[i16[i]];
  2682. }
  2683. }
  2684. #ifdef GGML_GELU_FP16
  2685. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  2686. uint16_t t;
  2687. for (int i = 0; i < n; ++i) {
  2688. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  2689. memcpy(&t, &fp16, sizeof(uint16_t));
  2690. y[i] = GGML_FP16_TO_FP32(table_gelu_f16[t]);
  2691. }
  2692. }
  2693. #else
  2694. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  2695. for (int i = 0; i < n; ++i) {
  2696. y[i] = ggml_gelu_f32(x[i]);
  2697. }
  2698. }
  2699. #endif
  2700. // Sigmoid Linear Unit (SiLU) function
  2701. inline static float ggml_silu_f32(float x) {
  2702. return x/(1.0f + expf(-x));
  2703. }
  2704. inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  2705. const uint16_t * i16 = (const uint16_t *) x;
  2706. for (int i = 0; i < n; ++i) {
  2707. y[i] = table_silu_f16[i16[i]];
  2708. }
  2709. }
  2710. #ifdef GGML_SILU_FP16
  2711. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  2712. uint16_t t;
  2713. for (int i = 0; i < n; ++i) {
  2714. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  2715. memcpy(&t, &fp16, sizeof(uint16_t));
  2716. y[i] = GGML_FP16_TO_FP32(table_silu_f16[t]);
  2717. }
  2718. }
  2719. #else
  2720. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  2721. for (int i = 0; i < n; ++i) {
  2722. y[i] = ggml_silu_f32(x[i]);
  2723. }
  2724. }
  2725. #endif
  2726. inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
  2727. #ifndef GGML_USE_ACCELERATE
  2728. ggml_float sum = 0.0;
  2729. for (int i = 0; i < n; ++i) {
  2730. sum += (ggml_float)x[i];
  2731. }
  2732. *s = sum;
  2733. #else
  2734. vDSP_sve(x, 1, s, n);
  2735. #endif
  2736. }
  2737. inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
  2738. #ifndef GGML_USE_ACCELERATE
  2739. float max = -INFINITY;
  2740. for (int i = 0; i < n; ++i) {
  2741. max = MAX(max, x[i]);
  2742. }
  2743. *s = max;
  2744. #else
  2745. vDSP_maxv(x, 1, s, n);
  2746. #endif
  2747. }
  2748. inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) {
  2749. ggml_vec_norm_f32(n, s, x);
  2750. *s = 1.f/(*s);
  2751. }
  2752. //
  2753. // logging
  2754. //
  2755. #if (GGML_DEBUG >= 1)
  2756. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  2757. #else
  2758. #define GGML_PRINT_DEBUG(...)
  2759. #endif
  2760. #if (GGML_DEBUG >= 5)
  2761. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  2762. #else
  2763. #define GGML_PRINT_DEBUG_5(...)
  2764. #endif
  2765. #if (GGML_DEBUG >= 10)
  2766. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  2767. #else
  2768. #define GGML_PRINT_DEBUG_10(...)
  2769. #endif
  2770. #define GGML_PRINT(...) printf(__VA_ARGS__)
  2771. //
  2772. // data types
  2773. //
  2774. static const int GGML_BLCK_SIZE[GGML_TYPE_COUNT] = {
  2775. [GGML_TYPE_F32] = 1,
  2776. [GGML_TYPE_F16] = 1,
  2777. [GGML_TYPE_Q4_0] = QK4_0,
  2778. [GGML_TYPE_Q4_1] = QK4_1,
  2779. [GGML_TYPE_Q4_2] = QK4_2,
  2780. [GGML_TYPE_Q8_0] = QK8_0,
  2781. [GGML_TYPE_I8] = 1,
  2782. [GGML_TYPE_I16] = 1,
  2783. [GGML_TYPE_I32] = 1,
  2784. };
  2785. static_assert(GGML_TYPE_COUNT == 9, "GGML_BLCK_SIZE is outdated");
  2786. static const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = {
  2787. [GGML_TYPE_F32] = sizeof(float),
  2788. [GGML_TYPE_F16] = sizeof(ggml_fp16_t),
  2789. [GGML_TYPE_Q4_0] = sizeof(block_q4_0),
  2790. [GGML_TYPE_Q4_1] = sizeof(block_q4_1),
  2791. [GGML_TYPE_Q4_2] = sizeof(block_q4_2),
  2792. [GGML_TYPE_Q8_0] = sizeof(block_q8_0),
  2793. [GGML_TYPE_I8] = sizeof(int8_t),
  2794. [GGML_TYPE_I16] = sizeof(int16_t),
  2795. [GGML_TYPE_I32] = sizeof(int32_t),
  2796. };
  2797. static_assert(GGML_TYPE_COUNT == 9, "GGML_TYPE_SIZE is outdated");
  2798. static const char * GGML_TYPE_NAME[GGML_TYPE_COUNT] = {
  2799. [GGML_TYPE_F32] = "f32",
  2800. [GGML_TYPE_F16] = "f16",
  2801. [GGML_TYPE_Q4_0] = "q4_0",
  2802. [GGML_TYPE_Q4_1] = "q4_1",
  2803. [GGML_TYPE_Q4_2] = "q4_2",
  2804. [GGML_TYPE_Q8_0] = "q8_0",
  2805. [GGML_TYPE_I8] = "i8",
  2806. [GGML_TYPE_I16] = "i16",
  2807. [GGML_TYPE_I32] = "i32",
  2808. };
  2809. static_assert(GGML_TYPE_COUNT == 9, "GGML_TYPE_NAME is outdated");
  2810. static bool GGML_IS_QUANTIZED[GGML_TYPE_COUNT] = {
  2811. [GGML_TYPE_F32] = false,
  2812. [GGML_TYPE_F16] = false,
  2813. [GGML_TYPE_Q4_0] = true,
  2814. [GGML_TYPE_Q4_1] = true,
  2815. [GGML_TYPE_Q4_2] = true,
  2816. [GGML_TYPE_Q8_0] = true,
  2817. [GGML_TYPE_I8] = false,
  2818. [GGML_TYPE_I16] = false,
  2819. [GGML_TYPE_I32] = false,
  2820. };
  2821. static_assert(GGML_TYPE_COUNT == 9, "GGML_IS_QUANTIZED is outdated");
  2822. static const char * GGML_OP_LABEL[GGML_OP_COUNT] = {
  2823. "NONE",
  2824. "DUP",
  2825. "ADD",
  2826. "SUB",
  2827. "MUL",
  2828. "DIV",
  2829. "SQR",
  2830. "SQRT",
  2831. "SUM",
  2832. "MEAN",
  2833. "REPEAT",
  2834. "ABS",
  2835. "SGN",
  2836. "NEG",
  2837. "STEP",
  2838. "RELU",
  2839. "GELU",
  2840. "SILU",
  2841. "NORM",
  2842. "RMS_NORM",
  2843. "MUL_MAT",
  2844. "SCALE",
  2845. "CPY",
  2846. "CONT",
  2847. "RESHAPE",
  2848. "VIEW",
  2849. "PERMUTE",
  2850. "TRANSPOSE",
  2851. "GET_ROWS",
  2852. "DIAG_MASK_INF",
  2853. "SOFT_MAX",
  2854. "ROPE",
  2855. "CONV_1D_1S",
  2856. "CONV_1D_2S",
  2857. "FLASH_ATTN",
  2858. "FLASH_FF",
  2859. "MAP_UNARY",
  2860. "MAP_BINARY",
  2861. };
  2862. static_assert(GGML_OP_COUNT == 38, "GGML_OP_COUNT != 38");
  2863. static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
  2864. "none",
  2865. "x",
  2866. "x+y",
  2867. "x-y",
  2868. "x*y",
  2869. "x/y",
  2870. "x^2",
  2871. "√x",
  2872. "Σx",
  2873. "Σx/n",
  2874. "repeat(x)",
  2875. "abs(x)",
  2876. "sgn(x)",
  2877. "-x",
  2878. "step(x)",
  2879. "relu(x)",
  2880. "gelu(x)",
  2881. "silu(x)",
  2882. "norm(x)",
  2883. "rms_norm(x)",
  2884. "X*Y",
  2885. "x*v",
  2886. "x-\\>y",
  2887. "cont(x)",
  2888. "reshape(x)",
  2889. "view(x)",
  2890. "permute(x)",
  2891. "transpose(x)",
  2892. "get_rows(x)",
  2893. "diag_mask_inf(x)",
  2894. "soft_max(x)",
  2895. "rope(x)",
  2896. "conv_1d_1s(x)",
  2897. "conv_1d_2s(x)",
  2898. "flash_attn(x)",
  2899. "flash_ff(x)",
  2900. "f(x)",
  2901. "f(x,y)",
  2902. };
  2903. static_assert(GGML_OP_COUNT == 38, "GGML_OP_COUNT != 38");
  2904. static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
  2905. static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
  2906. //
  2907. // ggml context
  2908. //
  2909. struct ggml_context {
  2910. size_t mem_size;
  2911. void * mem_buffer;
  2912. bool mem_buffer_owned;
  2913. bool no_alloc;
  2914. int n_objects;
  2915. struct ggml_object * objects_begin;
  2916. struct ggml_object * objects_end;
  2917. struct ggml_scratch scratch;
  2918. struct ggml_scratch scratch_save;
  2919. };
  2920. struct ggml_context_container {
  2921. bool used;
  2922. struct ggml_context context;
  2923. };
  2924. //
  2925. // compute types
  2926. //
  2927. enum ggml_task_type {
  2928. GGML_TASK_INIT = 0,
  2929. GGML_TASK_COMPUTE,
  2930. GGML_TASK_FINALIZE,
  2931. };
  2932. struct ggml_compute_params {
  2933. enum ggml_task_type type;
  2934. int ith, nth;
  2935. // work buffer for all threads
  2936. size_t wsize;
  2937. void * wdata;
  2938. };
  2939. //
  2940. // ggml state
  2941. //
  2942. struct ggml_state {
  2943. struct ggml_context_container contexts[GGML_MAX_CONTEXTS];
  2944. };
  2945. // global state
  2946. static struct ggml_state g_state;
  2947. static atomic_int g_state_barrier = 0;
  2948. // barrier via spin lock
  2949. inline static void ggml_critical_section_start(void) {
  2950. int processing = atomic_fetch_add(&g_state_barrier, 1);
  2951. while (processing > 0) {
  2952. // wait for other threads to finish
  2953. atomic_fetch_sub(&g_state_barrier, 1);
  2954. sched_yield(); // TODO: reconsider this
  2955. processing = atomic_fetch_add(&g_state_barrier, 1);
  2956. }
  2957. }
  2958. // TODO: make this somehow automatically executed
  2959. // some sort of "sentry" mechanism
  2960. inline static void ggml_critical_section_end(void) {
  2961. atomic_fetch_sub(&g_state_barrier, 1);
  2962. }
  2963. ////////////////////////////////////////////////////////////////////////////////
  2964. void ggml_print_object(const struct ggml_object * obj) {
  2965. GGML_PRINT(" - ggml_object: offset = %zu, size = %zu, next = %p\n",
  2966. obj->offs, obj->size, (const void *) obj->next);
  2967. }
  2968. void ggml_print_objects(const struct ggml_context * ctx) {
  2969. struct ggml_object * obj = ctx->objects_begin;
  2970. GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx);
  2971. while (obj != NULL) {
  2972. ggml_print_object(obj);
  2973. obj = obj->next;
  2974. }
  2975. GGML_PRINT("%s: --- end ---\n", __func__);
  2976. }
  2977. int64_t ggml_nelements(const struct ggml_tensor * tensor) {
  2978. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2979. return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  2980. }
  2981. int ggml_nrows(const struct ggml_tensor * tensor) {
  2982. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2983. return tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  2984. }
  2985. size_t ggml_nbytes(const struct ggml_tensor * tensor) {
  2986. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2987. return (ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type])/GGML_BLCK_SIZE[tensor->type];
  2988. }
  2989. int ggml_blck_size(enum ggml_type type) {
  2990. return GGML_BLCK_SIZE[type];
  2991. }
  2992. size_t ggml_type_size(enum ggml_type type) {
  2993. return GGML_TYPE_SIZE[type];
  2994. }
  2995. float ggml_type_sizef(enum ggml_type type) {
  2996. return ((float)(GGML_TYPE_SIZE[type]))/GGML_BLCK_SIZE[type];
  2997. }
  2998. const char * ggml_type_name(enum ggml_type type) {
  2999. return GGML_TYPE_NAME[type];
  3000. }
  3001. size_t ggml_element_size(const struct ggml_tensor * tensor) {
  3002. return GGML_TYPE_SIZE[tensor->type];
  3003. }
  3004. static inline bool ggml_is_scalar(const struct ggml_tensor * tensor) {
  3005. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3006. return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3007. }
  3008. static inline bool ggml_is_vector(const struct ggml_tensor * tensor) {
  3009. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3010. return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3011. }
  3012. static inline bool ggml_is_matrix(const struct ggml_tensor * tensor) {
  3013. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3014. return tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3015. }
  3016. static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3017. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3018. return
  3019. (t0->ne[0] == t1->ne[0]) &&
  3020. (t0->ne[2] == t1->ne[2]) &&
  3021. (t0->ne[3] == t1->ne[3]);
  3022. }
  3023. static inline bool ggml_is_quantized(enum ggml_type type) {
  3024. return GGML_IS_QUANTIZED[type];
  3025. }
  3026. static inline bool ggml_is_transposed(const struct ggml_tensor * tensor) {
  3027. return tensor->nb[0] > tensor->nb[1];
  3028. }
  3029. static inline bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
  3030. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3031. return
  3032. tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] &&
  3033. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/GGML_BLCK_SIZE[tensor->type] &&
  3034. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3035. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3036. }
  3037. static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) {
  3038. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3039. return
  3040. tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] &&
  3041. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3042. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3043. }
  3044. static inline bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3045. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3046. return
  3047. (t0->ne[0] == t1->ne[0] ) &&
  3048. (t0->ne[1] == t1->ne[1] ) &&
  3049. (t0->ne[2] == t1->ne[2] ) &&
  3050. (t0->ne[3] == t1->ne[3] );
  3051. }
  3052. // check if t1 can be represented as a repeatition of t0
  3053. static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3054. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3055. return
  3056. (t1->ne[0]%t0->ne[0] == 0) &&
  3057. (t1->ne[1]%t0->ne[1] == 0) &&
  3058. (t1->ne[2]%t0->ne[2] == 0) &&
  3059. (t1->ne[3]%t0->ne[3] == 0);
  3060. }
  3061. static inline int ggml_up32(int n) {
  3062. return (n + 31) & ~31;
  3063. }
  3064. static inline int ggml_up64(int n) {
  3065. return (n + 63) & ~63;
  3066. }
  3067. static inline int ggml_up(int n, int m) {
  3068. // assert m is a power of 2
  3069. GGML_ASSERT((m & (m - 1)) == 0);
  3070. return (n + m - 1) & ~(m - 1);
  3071. }
  3072. // assert that pointer is aligned to GGML_MEM_ALIGN
  3073. #define ggml_assert_aligned(ptr) \
  3074. GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
  3075. ////////////////////////////////////////////////////////////////////////////////
  3076. struct ggml_context * ggml_init(struct ggml_init_params params) {
  3077. // make this function thread safe
  3078. ggml_critical_section_start();
  3079. static bool is_first_call = true;
  3080. if (is_first_call) {
  3081. // initialize time system (required on Windows)
  3082. ggml_time_init();
  3083. // initialize GELU, SILU and EXP F32 tables
  3084. {
  3085. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  3086. ggml_fp16_t ii;
  3087. for (int i = 0; i < (1 << 16); ++i) {
  3088. uint16_t ui = i;
  3089. memcpy(&ii, &ui, sizeof(ii));
  3090. const float f = table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
  3091. table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
  3092. table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
  3093. table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
  3094. }
  3095. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  3096. GGML_PRINT_DEBUG("%s: GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  3097. }
  3098. // initialize g_state
  3099. {
  3100. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  3101. g_state = (struct ggml_state) {
  3102. /*.contexts =*/ { { 0 } },
  3103. };
  3104. for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) {
  3105. g_state.contexts[i].used = false;
  3106. }
  3107. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  3108. GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  3109. }
  3110. is_first_call = false;
  3111. }
  3112. // find non-used context in g_state
  3113. struct ggml_context * ctx = NULL;
  3114. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  3115. if (!g_state.contexts[i].used) {
  3116. g_state.contexts[i].used = true;
  3117. ctx = &g_state.contexts[i].context;
  3118. GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i);
  3119. break;
  3120. }
  3121. }
  3122. if (ctx == NULL) {
  3123. GGML_PRINT_DEBUG("%s: no unused context found\n", __func__);
  3124. ggml_critical_section_end();
  3125. return NULL;
  3126. }
  3127. const size_t mem_size = (params.mem_size + GGML_MEM_ALIGN - 1) & ~(GGML_MEM_ALIGN - 1);
  3128. *ctx = (struct ggml_context) {
  3129. /*.mem_size =*/ mem_size,
  3130. /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size),
  3131. /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
  3132. /*.no_alloc =*/ params.no_alloc,
  3133. /*.n_objects =*/ 0,
  3134. /*.objects_begin =*/ NULL,
  3135. /*.objects_end =*/ NULL,
  3136. /*.scratch =*/ { 0, 0, NULL, },
  3137. /*.scratch_save =*/ { 0, 0, NULL, },
  3138. };
  3139. GGML_ASSERT(ctx->mem_buffer != NULL);
  3140. ggml_assert_aligned(ctx->mem_buffer);
  3141. GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
  3142. ggml_critical_section_end();
  3143. return ctx;
  3144. }
  3145. void ggml_free(struct ggml_context * ctx) {
  3146. // make this function thread safe
  3147. ggml_critical_section_start();
  3148. bool found = false;
  3149. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  3150. if (&g_state.contexts[i].context == ctx) {
  3151. g_state.contexts[i].used = false;
  3152. GGML_PRINT_DEBUG("%s: context %d with %d objects has been freed. memory used = %zu\n",
  3153. __func__, i, ctx->n_objects, ctx->objects_end->offs + ctx->objects_end->size);
  3154. if (ctx->mem_buffer_owned) {
  3155. GGML_ALIGNED_FREE(ctx->mem_buffer);
  3156. }
  3157. found = true;
  3158. break;
  3159. }
  3160. }
  3161. if (!found) {
  3162. GGML_PRINT_DEBUG("%s: context not found\n", __func__);
  3163. }
  3164. ggml_critical_section_end();
  3165. }
  3166. size_t ggml_used_mem(const struct ggml_context * ctx) {
  3167. return ctx->objects_end->offs + ctx->objects_end->size;
  3168. }
  3169. size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
  3170. const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
  3171. ctx->scratch = scratch;
  3172. return result;
  3173. }
  3174. ////////////////////////////////////////////////////////////////////////////////
  3175. struct ggml_tensor * ggml_new_tensor_impl(
  3176. struct ggml_context * ctx,
  3177. enum ggml_type type,
  3178. int n_dims,
  3179. const int64_t* ne,
  3180. void* data) {
  3181. // always insert objects at the end of the context's memory pool
  3182. struct ggml_object * obj_cur = ctx->objects_end;
  3183. const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs;
  3184. const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
  3185. const size_t cur_end = cur_offs + cur_size;
  3186. size_t size_needed = 0;
  3187. if (data == NULL && !ctx->no_alloc) {
  3188. size_needed += GGML_TYPE_SIZE[type]*(ne[0]/GGML_BLCK_SIZE[type]);
  3189. for (int i = 1; i < n_dims; i++) {
  3190. size_needed *= ne[i];
  3191. }
  3192. // align to GGML_MEM_ALIGN
  3193. size_needed = ((size_needed + GGML_MEM_ALIGN - 1)/GGML_MEM_ALIGN)*GGML_MEM_ALIGN;
  3194. }
  3195. char * const mem_buffer = ctx->mem_buffer;
  3196. struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
  3197. if (ctx->scratch.data == NULL || data != NULL) {
  3198. size_needed += sizeof(struct ggml_tensor);
  3199. if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
  3200. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  3201. __func__, cur_end + size_needed + GGML_OBJECT_SIZE, ctx->mem_size);
  3202. assert(false);
  3203. return NULL;
  3204. }
  3205. *obj_new = (struct ggml_object) {
  3206. .offs = cur_end + GGML_OBJECT_SIZE,
  3207. .size = size_needed,
  3208. .next = NULL,
  3209. };
  3210. } else {
  3211. if (ctx->scratch.offs + size_needed > ctx->scratch.size) {
  3212. GGML_PRINT("%s: not enough space in the scratch memory\n", __func__);
  3213. assert(false);
  3214. return NULL;
  3215. }
  3216. if (cur_end + sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE > ctx->mem_size) {
  3217. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  3218. __func__, cur_end + sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE, ctx->mem_size);
  3219. assert(false);
  3220. return NULL;
  3221. }
  3222. data = (char * const) ctx->scratch.data + ctx->scratch.offs;
  3223. *obj_new = (struct ggml_object) {
  3224. .offs = cur_end + GGML_OBJECT_SIZE,
  3225. .size = sizeof(struct ggml_tensor),
  3226. .next = NULL,
  3227. };
  3228. //printf("scratch offs = %zu, size_needed = %zu\n", ctx->scratch.offs, size_needed);
  3229. ctx->scratch.offs += size_needed;
  3230. }
  3231. if (obj_cur != NULL) {
  3232. obj_cur->next = obj_new;
  3233. } else {
  3234. // this is the first object in this context
  3235. ctx->objects_begin = obj_new;
  3236. }
  3237. ctx->objects_end = obj_new;
  3238. //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size);
  3239. struct ggml_tensor * const result = (struct ggml_tensor *)(mem_buffer + obj_new->offs);
  3240. ggml_assert_aligned(result);
  3241. *result = (struct ggml_tensor) {
  3242. /*.type =*/ type,
  3243. /*.n_dims =*/ n_dims,
  3244. /*.ne =*/ { 1, 1, 1, 1 },
  3245. /*.nb =*/ { 0, 0, 0, 0 },
  3246. /*.op =*/ GGML_OP_NONE,
  3247. /*.is_param =*/ false,
  3248. /*.grad =*/ NULL,
  3249. /*.src0 =*/ NULL,
  3250. /*.src1 =*/ NULL,
  3251. /*.opt =*/ { NULL },
  3252. /*.n_tasks =*/ 0,
  3253. /*.perf_runs =*/ 0,
  3254. /*.perf_cycles =*/ 0,
  3255. /*.perf_time_us =*/ 0,
  3256. /*.data =*/ (data == NULL && !ctx->no_alloc) ? (void *)(result + 1) : data,
  3257. /*.pad =*/ { 0 },
  3258. };
  3259. // TODO: this should not be needed as long as we don't rely on aligned SIMD loads
  3260. //ggml_assert_aligned(result->data);
  3261. for (int i = 0; i < n_dims; i++) {
  3262. result->ne[i] = ne[i];
  3263. }
  3264. result->nb[0] = GGML_TYPE_SIZE[type];
  3265. result->nb[1] = result->nb[0]*(result->ne[0]/GGML_BLCK_SIZE[type]);
  3266. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  3267. result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
  3268. }
  3269. ctx->n_objects++;
  3270. return result;
  3271. }
  3272. struct ggml_tensor * ggml_new_tensor(
  3273. struct ggml_context * ctx,
  3274. enum ggml_type type,
  3275. int n_dims,
  3276. const int64_t * ne) {
  3277. return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL);
  3278. }
  3279. struct ggml_tensor * ggml_new_tensor_1d(
  3280. struct ggml_context * ctx,
  3281. enum ggml_type type,
  3282. int64_t ne0) {
  3283. return ggml_new_tensor(ctx, type, 1, &ne0);
  3284. }
  3285. struct ggml_tensor * ggml_new_tensor_2d(
  3286. struct ggml_context * ctx,
  3287. enum ggml_type type,
  3288. int64_t ne0,
  3289. int64_t ne1) {
  3290. const int64_t ne[2] = { ne0, ne1 };
  3291. return ggml_new_tensor(ctx, type, 2, ne);
  3292. }
  3293. struct ggml_tensor * ggml_new_tensor_3d(
  3294. struct ggml_context * ctx,
  3295. enum ggml_type type,
  3296. int64_t ne0,
  3297. int64_t ne1,
  3298. int64_t ne2) {
  3299. const int64_t ne[3] = { ne0, ne1, ne2 };
  3300. return ggml_new_tensor(ctx, type, 3, ne);
  3301. }
  3302. struct ggml_tensor * ggml_new_tensor_4d(
  3303. struct ggml_context * ctx,
  3304. enum ggml_type type,
  3305. int64_t ne0,
  3306. int64_t ne1,
  3307. int64_t ne2,
  3308. int64_t ne3) {
  3309. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  3310. return ggml_new_tensor(ctx, type, 4, ne);
  3311. }
  3312. struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
  3313. ctx->scratch_save = ctx->scratch;
  3314. ctx->scratch.data = NULL;
  3315. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
  3316. ctx->scratch = ctx->scratch_save;
  3317. ggml_set_i32(result, value);
  3318. return result;
  3319. }
  3320. struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
  3321. ctx->scratch_save = ctx->scratch;
  3322. ctx->scratch.data = NULL;
  3323. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  3324. ctx->scratch = ctx->scratch_save;
  3325. ggml_set_f32(result, value);
  3326. return result;
  3327. }
  3328. struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) {
  3329. return ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, NULL);
  3330. }
  3331. struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) {
  3332. memset(tensor->data, 0, ggml_nbytes(tensor));
  3333. return tensor;
  3334. }
  3335. struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
  3336. const int n = ggml_nrows(tensor);
  3337. const int nc = tensor->ne[0];
  3338. const size_t n1 = tensor->nb[1];
  3339. char * const data = tensor->data;
  3340. switch (tensor->type) {
  3341. case GGML_TYPE_I8:
  3342. {
  3343. assert(tensor->nb[0] == sizeof(int8_t));
  3344. for (int i = 0; i < n; i++) {
  3345. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  3346. }
  3347. } break;
  3348. case GGML_TYPE_I16:
  3349. {
  3350. assert(tensor->nb[0] == sizeof(int16_t));
  3351. for (int i = 0; i < n; i++) {
  3352. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  3353. }
  3354. } break;
  3355. case GGML_TYPE_I32:
  3356. {
  3357. assert(tensor->nb[0] == sizeof(int32_t));
  3358. for (int i = 0; i < n; i++) {
  3359. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  3360. }
  3361. } break;
  3362. case GGML_TYPE_F16:
  3363. {
  3364. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  3365. for (int i = 0; i < n; i++) {
  3366. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), value);
  3367. }
  3368. } break;
  3369. case GGML_TYPE_F32:
  3370. {
  3371. assert(tensor->nb[0] == sizeof(float));
  3372. for (int i = 0; i < n; i++) {
  3373. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  3374. }
  3375. } break;
  3376. default:
  3377. {
  3378. GGML_ASSERT(false);
  3379. } break;
  3380. }
  3381. return tensor;
  3382. }
  3383. struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
  3384. const int n = ggml_nrows(tensor);
  3385. const int nc = tensor->ne[0];
  3386. const size_t n1 = tensor->nb[1];
  3387. char * const data = tensor->data;
  3388. switch (tensor->type) {
  3389. case GGML_TYPE_I8:
  3390. {
  3391. assert(tensor->nb[0] == sizeof(int8_t));
  3392. for (int i = 0; i < n; i++) {
  3393. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  3394. }
  3395. } break;
  3396. case GGML_TYPE_I16:
  3397. {
  3398. assert(tensor->nb[0] == sizeof(int16_t));
  3399. for (int i = 0; i < n; i++) {
  3400. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  3401. }
  3402. } break;
  3403. case GGML_TYPE_I32:
  3404. {
  3405. assert(tensor->nb[0] == sizeof(int32_t));
  3406. for (int i = 0; i < n; i++) {
  3407. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  3408. }
  3409. } break;
  3410. case GGML_TYPE_F16:
  3411. {
  3412. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  3413. for (int i = 0; i < n; i++) {
  3414. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), value);
  3415. }
  3416. } break;
  3417. case GGML_TYPE_F32:
  3418. {
  3419. assert(tensor->nb[0] == sizeof(float));
  3420. for (int i = 0; i < n; i++) {
  3421. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  3422. }
  3423. } break;
  3424. default:
  3425. {
  3426. GGML_ASSERT(false);
  3427. } break;
  3428. }
  3429. return tensor;
  3430. }
  3431. int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
  3432. switch (tensor->type) {
  3433. case GGML_TYPE_I8:
  3434. {
  3435. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3436. return ((int8_t *)(tensor->data))[i];
  3437. } break;
  3438. case GGML_TYPE_I16:
  3439. {
  3440. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3441. return ((int16_t *)(tensor->data))[i];
  3442. } break;
  3443. case GGML_TYPE_I32:
  3444. {
  3445. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3446. return ((int32_t *)(tensor->data))[i];
  3447. } break;
  3448. case GGML_TYPE_F16:
  3449. {
  3450. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3451. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  3452. } break;
  3453. case GGML_TYPE_F32:
  3454. {
  3455. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3456. return ((float *)(tensor->data))[i];
  3457. } break;
  3458. default:
  3459. {
  3460. GGML_ASSERT(false);
  3461. } break;
  3462. }
  3463. return 0.0f;
  3464. }
  3465. void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
  3466. switch (tensor->type) {
  3467. case GGML_TYPE_I8:
  3468. {
  3469. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3470. ((int8_t *)(tensor->data))[i] = value;
  3471. } break;
  3472. case GGML_TYPE_I16:
  3473. {
  3474. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3475. ((int16_t *)(tensor->data))[i] = value;
  3476. } break;
  3477. case GGML_TYPE_I32:
  3478. {
  3479. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3480. ((int32_t *)(tensor->data))[i] = value;
  3481. } break;
  3482. case GGML_TYPE_F16:
  3483. {
  3484. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3485. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  3486. } break;
  3487. case GGML_TYPE_F32:
  3488. {
  3489. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3490. ((float *)(tensor->data))[i] = value;
  3491. } break;
  3492. default:
  3493. {
  3494. GGML_ASSERT(false);
  3495. } break;
  3496. }
  3497. }
  3498. float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
  3499. switch (tensor->type) {
  3500. case GGML_TYPE_I8:
  3501. {
  3502. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3503. return ((int8_t *)(tensor->data))[i];
  3504. } break;
  3505. case GGML_TYPE_I16:
  3506. {
  3507. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3508. return ((int16_t *)(tensor->data))[i];
  3509. } break;
  3510. case GGML_TYPE_I32:
  3511. {
  3512. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3513. return ((int32_t *)(tensor->data))[i];
  3514. } break;
  3515. case GGML_TYPE_F16:
  3516. {
  3517. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3518. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  3519. } break;
  3520. case GGML_TYPE_F32:
  3521. {
  3522. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3523. return ((float *)(tensor->data))[i];
  3524. } break;
  3525. default:
  3526. {
  3527. GGML_ASSERT(false);
  3528. } break;
  3529. }
  3530. return 0.0f;
  3531. }
  3532. void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
  3533. switch (tensor->type) {
  3534. case GGML_TYPE_I8:
  3535. {
  3536. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3537. ((int8_t *)(tensor->data))[i] = value;
  3538. } break;
  3539. case GGML_TYPE_I16:
  3540. {
  3541. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3542. ((int16_t *)(tensor->data))[i] = value;
  3543. } break;
  3544. case GGML_TYPE_I32:
  3545. {
  3546. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3547. ((int32_t *)(tensor->data))[i] = value;
  3548. } break;
  3549. case GGML_TYPE_F16:
  3550. {
  3551. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3552. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  3553. } break;
  3554. case GGML_TYPE_F32:
  3555. {
  3556. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3557. ((float *)(tensor->data))[i] = value;
  3558. } break;
  3559. default:
  3560. {
  3561. GGML_ASSERT(false);
  3562. } break;
  3563. }
  3564. }
  3565. void * ggml_get_data(const struct ggml_tensor * tensor) {
  3566. return tensor->data;
  3567. }
  3568. float * ggml_get_data_f32(const struct ggml_tensor * tensor) {
  3569. assert(tensor->type == GGML_TYPE_F32);
  3570. return (float *)(tensor->data);
  3571. }
  3572. struct ggml_tensor * ggml_view_tensor(
  3573. struct ggml_context * ctx,
  3574. const struct ggml_tensor * src) {
  3575. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src->data);
  3576. result->nb[0] = src->nb[0];
  3577. result->nb[1] = src->nb[1];
  3578. result->nb[2] = src->nb[2];
  3579. result->nb[3] = src->nb[3];
  3580. return result;
  3581. }
  3582. ////////////////////////////////////////////////////////////////////////////////
  3583. // ggml_dup
  3584. struct ggml_tensor * ggml_dup_impl(
  3585. struct ggml_context * ctx,
  3586. struct ggml_tensor * a,
  3587. bool inplace) {
  3588. bool is_node = false;
  3589. if (!inplace && (a->grad)) {
  3590. is_node = true;
  3591. }
  3592. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3593. result->op = GGML_OP_DUP;
  3594. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3595. result->src0 = a;
  3596. result->src1 = NULL;
  3597. return result;
  3598. }
  3599. struct ggml_tensor * ggml_dup(
  3600. struct ggml_context * ctx,
  3601. struct ggml_tensor * a) {
  3602. return ggml_dup_impl(ctx, a, false);
  3603. }
  3604. struct ggml_tensor * ggml_dup_inplace(
  3605. struct ggml_context * ctx,
  3606. struct ggml_tensor * a) {
  3607. return ggml_dup_impl(ctx, a, true);
  3608. }
  3609. // ggml_add
  3610. struct ggml_tensor * ggml_add_impl(
  3611. struct ggml_context * ctx,
  3612. struct ggml_tensor * a,
  3613. struct ggml_tensor * b,
  3614. bool inplace) {
  3615. GGML_ASSERT(ggml_are_same_shape(a, b));
  3616. bool is_node = false;
  3617. if (!inplace && (a->grad || b->grad)) {
  3618. is_node = true;
  3619. }
  3620. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3621. result->op = GGML_OP_ADD;
  3622. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3623. result->src0 = a;
  3624. result->src1 = b;
  3625. return result;
  3626. }
  3627. struct ggml_tensor * ggml_add(
  3628. struct ggml_context * ctx,
  3629. struct ggml_tensor * a,
  3630. struct ggml_tensor * b) {
  3631. return ggml_add_impl(ctx, a, b, false);
  3632. }
  3633. struct ggml_tensor * ggml_add_inplace(
  3634. struct ggml_context * ctx,
  3635. struct ggml_tensor * a,
  3636. struct ggml_tensor * b) {
  3637. return ggml_add_impl(ctx, a, b, true);
  3638. }
  3639. // ggml_sub
  3640. struct ggml_tensor * ggml_sub_impl(
  3641. struct ggml_context * ctx,
  3642. struct ggml_tensor * a,
  3643. struct ggml_tensor * b,
  3644. bool inplace) {
  3645. GGML_ASSERT(ggml_are_same_shape(a, b));
  3646. bool is_node = false;
  3647. if (!inplace && (a->grad || b->grad)) {
  3648. is_node = true;
  3649. }
  3650. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3651. result->op = GGML_OP_SUB;
  3652. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3653. result->src0 = a;
  3654. result->src1 = b;
  3655. return result;
  3656. }
  3657. struct ggml_tensor * ggml_sub(
  3658. struct ggml_context * ctx,
  3659. struct ggml_tensor * a,
  3660. struct ggml_tensor * b) {
  3661. return ggml_sub_impl(ctx, a, b, false);
  3662. }
  3663. struct ggml_tensor * ggml_sub_inplace(
  3664. struct ggml_context * ctx,
  3665. struct ggml_tensor * a,
  3666. struct ggml_tensor * b) {
  3667. return ggml_sub_impl(ctx, a, b, true);
  3668. }
  3669. // ggml_mul
  3670. struct ggml_tensor * ggml_mul_impl(
  3671. struct ggml_context * ctx,
  3672. struct ggml_tensor * a,
  3673. struct ggml_tensor * b,
  3674. bool inplace) {
  3675. GGML_ASSERT(ggml_are_same_shape(a, b));
  3676. bool is_node = false;
  3677. if (!inplace && (a->grad || b->grad)) {
  3678. is_node = true;
  3679. }
  3680. if (inplace) {
  3681. GGML_ASSERT(is_node == false);
  3682. }
  3683. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3684. result->op = GGML_OP_MUL;
  3685. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3686. result->src0 = a;
  3687. result->src1 = b;
  3688. return result;
  3689. }
  3690. struct ggml_tensor * ggml_mul(
  3691. struct ggml_context * ctx,
  3692. struct ggml_tensor * a,
  3693. struct ggml_tensor * b) {
  3694. return ggml_mul_impl(ctx, a, b, false);
  3695. }
  3696. struct ggml_tensor * ggml_mul_inplace(
  3697. struct ggml_context * ctx,
  3698. struct ggml_tensor * a,
  3699. struct ggml_tensor * b) {
  3700. return ggml_mul_impl(ctx, a, b, true);
  3701. }
  3702. // ggml_div
  3703. struct ggml_tensor * ggml_div_impl(
  3704. struct ggml_context * ctx,
  3705. struct ggml_tensor * a,
  3706. struct ggml_tensor * b,
  3707. bool inplace) {
  3708. GGML_ASSERT(ggml_are_same_shape(a, b));
  3709. bool is_node = false;
  3710. if (!inplace && (a->grad || b->grad)) {
  3711. is_node = true;
  3712. }
  3713. if (inplace) {
  3714. GGML_ASSERT(is_node == false);
  3715. }
  3716. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3717. result->op = GGML_OP_DIV;
  3718. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3719. result->src0 = a;
  3720. result->src1 = b;
  3721. return result;
  3722. }
  3723. struct ggml_tensor * ggml_div(
  3724. struct ggml_context * ctx,
  3725. struct ggml_tensor * a,
  3726. struct ggml_tensor * b) {
  3727. return ggml_div_impl(ctx, a, b, false);
  3728. }
  3729. struct ggml_tensor * ggml_div_inplace(
  3730. struct ggml_context * ctx,
  3731. struct ggml_tensor * a,
  3732. struct ggml_tensor * b) {
  3733. return ggml_div_impl(ctx, a, b, true);
  3734. }
  3735. // ggml_sqr
  3736. struct ggml_tensor * ggml_sqr_impl(
  3737. struct ggml_context * ctx,
  3738. struct ggml_tensor * a,
  3739. bool inplace) {
  3740. bool is_node = false;
  3741. if (!inplace && (a->grad)) {
  3742. is_node = true;
  3743. }
  3744. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3745. result->op = GGML_OP_SQR;
  3746. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3747. result->src0 = a;
  3748. result->src1 = NULL;
  3749. return result;
  3750. }
  3751. struct ggml_tensor * ggml_sqr(
  3752. struct ggml_context * ctx,
  3753. struct ggml_tensor * a) {
  3754. return ggml_sqr_impl(ctx, a, false);
  3755. }
  3756. struct ggml_tensor * ggml_sqr_inplace(
  3757. struct ggml_context * ctx,
  3758. struct ggml_tensor * a) {
  3759. return ggml_sqr_impl(ctx, a, true);
  3760. }
  3761. // ggml_sqrt
  3762. struct ggml_tensor * ggml_sqrt_impl(
  3763. struct ggml_context * ctx,
  3764. struct ggml_tensor * a,
  3765. bool inplace) {
  3766. bool is_node = false;
  3767. if (!inplace && (a->grad)) {
  3768. is_node = true;
  3769. }
  3770. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3771. result->op = GGML_OP_SQRT;
  3772. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3773. result->src0 = a;
  3774. result->src1 = NULL;
  3775. return result;
  3776. }
  3777. struct ggml_tensor * ggml_sqrt(
  3778. struct ggml_context * ctx,
  3779. struct ggml_tensor * a) {
  3780. return ggml_sqrt_impl(ctx, a, false);
  3781. }
  3782. struct ggml_tensor * ggml_sqrt_inplace(
  3783. struct ggml_context * ctx,
  3784. struct ggml_tensor * a) {
  3785. return ggml_sqrt_impl(ctx, a, true);
  3786. }
  3787. // ggml_sum
  3788. struct ggml_tensor * ggml_sum(
  3789. struct ggml_context * ctx,
  3790. struct ggml_tensor * a) {
  3791. bool is_node = false;
  3792. if (a->grad) {
  3793. is_node = true;
  3794. }
  3795. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  3796. result->op = GGML_OP_SUM;
  3797. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3798. result->src0 = a;
  3799. result->src1 = NULL;
  3800. return result;
  3801. }
  3802. // ggml_mean
  3803. struct ggml_tensor * ggml_mean(
  3804. struct ggml_context * ctx,
  3805. struct ggml_tensor * a) {
  3806. bool is_node = false;
  3807. if (a->grad) {
  3808. GGML_ASSERT(false); // TODO: implement
  3809. is_node = true;
  3810. }
  3811. int64_t ne[GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] };
  3812. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, ne);
  3813. result->op = GGML_OP_MEAN;
  3814. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3815. result->src0 = a;
  3816. result->src1 = NULL;
  3817. return result;
  3818. }
  3819. // ggml_repeat
  3820. struct ggml_tensor * ggml_repeat(
  3821. struct ggml_context * ctx,
  3822. struct ggml_tensor * a,
  3823. struct ggml_tensor * b) {
  3824. GGML_ASSERT(ggml_can_repeat(a, b));
  3825. bool is_node = false;
  3826. if (a->grad) {
  3827. is_node = true;
  3828. }
  3829. if (ggml_are_same_shape(a, b) && !is_node) {
  3830. return a;
  3831. }
  3832. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
  3833. result->op = GGML_OP_REPEAT;
  3834. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3835. result->src0 = a;
  3836. result->src1 = b;
  3837. return result;
  3838. }
  3839. // ggml_abs
  3840. struct ggml_tensor * ggml_abs_impl(
  3841. struct ggml_context * ctx,
  3842. struct ggml_tensor * a,
  3843. bool inplace) {
  3844. bool is_node = false;
  3845. if (!inplace && (a->grad)) {
  3846. is_node = true;
  3847. }
  3848. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3849. result->op = GGML_OP_ABS;
  3850. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3851. result->src0 = a;
  3852. result->src1 = NULL;
  3853. return result;
  3854. }
  3855. struct ggml_tensor * ggml_abs(
  3856. struct ggml_context * ctx,
  3857. struct ggml_tensor * a) {
  3858. return ggml_abs_impl(ctx, a, false);
  3859. }
  3860. struct ggml_tensor * ggml_abs_inplace(
  3861. struct ggml_context * ctx,
  3862. struct ggml_tensor * a) {
  3863. return ggml_abs_impl(ctx, a, true);
  3864. }
  3865. // ggml_sgn
  3866. struct ggml_tensor * ggml_sgn_impl(
  3867. struct ggml_context * ctx,
  3868. struct ggml_tensor * a,
  3869. bool inplace) {
  3870. bool is_node = false;
  3871. if (!inplace && (a->grad)) {
  3872. is_node = true;
  3873. }
  3874. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3875. result->op = GGML_OP_SGN;
  3876. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3877. result->src0 = a;
  3878. result->src1 = NULL;
  3879. return result;
  3880. }
  3881. struct ggml_tensor * ggml_sgn(
  3882. struct ggml_context * ctx,
  3883. struct ggml_tensor * a) {
  3884. return ggml_sgn_impl(ctx, a, false);
  3885. }
  3886. struct ggml_tensor * ggml_sgn_inplace(
  3887. struct ggml_context * ctx,
  3888. struct ggml_tensor * a) {
  3889. return ggml_sgn_impl(ctx, a, true);
  3890. }
  3891. // ggml_neg
  3892. struct ggml_tensor * ggml_neg_impl(
  3893. struct ggml_context * ctx,
  3894. struct ggml_tensor * a,
  3895. bool inplace) {
  3896. bool is_node = false;
  3897. if (!inplace && (a->grad)) {
  3898. is_node = true;
  3899. }
  3900. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3901. result->op = GGML_OP_NEG;
  3902. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3903. result->src0 = a;
  3904. result->src1 = NULL;
  3905. return result;
  3906. }
  3907. struct ggml_tensor * ggml_neg(
  3908. struct ggml_context * ctx,
  3909. struct ggml_tensor * a) {
  3910. return ggml_neg_impl(ctx, a, false);
  3911. }
  3912. struct ggml_tensor * ggml_neg_inplace(
  3913. struct ggml_context * ctx,
  3914. struct ggml_tensor * a) {
  3915. return ggml_neg_impl(ctx, a, true);
  3916. }
  3917. // ggml_step
  3918. struct ggml_tensor * ggml_step_impl(
  3919. struct ggml_context * ctx,
  3920. struct ggml_tensor * a,
  3921. bool inplace) {
  3922. bool is_node = false;
  3923. if (!inplace && (a->grad)) {
  3924. is_node = true;
  3925. }
  3926. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3927. result->op = GGML_OP_STEP;
  3928. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3929. result->src0 = a;
  3930. result->src1 = NULL;
  3931. return result;
  3932. }
  3933. struct ggml_tensor * ggml_step(
  3934. struct ggml_context * ctx,
  3935. struct ggml_tensor * a) {
  3936. return ggml_step_impl(ctx, a, false);
  3937. }
  3938. struct ggml_tensor * ggml_step_inplace(
  3939. struct ggml_context * ctx,
  3940. struct ggml_tensor * a) {
  3941. return ggml_step_impl(ctx, a, true);
  3942. }
  3943. // ggml_relu
  3944. struct ggml_tensor * ggml_relu_impl(
  3945. struct ggml_context * ctx,
  3946. struct ggml_tensor * a,
  3947. bool inplace) {
  3948. bool is_node = false;
  3949. if (!inplace && (a->grad)) {
  3950. is_node = true;
  3951. }
  3952. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3953. result->op = GGML_OP_RELU;
  3954. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3955. result->src0 = a;
  3956. result->src1 = NULL;
  3957. return result;
  3958. }
  3959. struct ggml_tensor * ggml_relu(
  3960. struct ggml_context * ctx,
  3961. struct ggml_tensor * a) {
  3962. return ggml_relu_impl(ctx, a, false);
  3963. }
  3964. struct ggml_tensor * ggml_relu_inplace(
  3965. struct ggml_context * ctx,
  3966. struct ggml_tensor * a) {
  3967. return ggml_relu_impl(ctx, a, true);
  3968. }
  3969. // ggml_gelu
  3970. struct ggml_tensor * ggml_gelu_impl(
  3971. struct ggml_context * ctx,
  3972. struct ggml_tensor * a,
  3973. bool inplace) {
  3974. bool is_node = false;
  3975. if (!inplace && (a->grad)) {
  3976. is_node = true;
  3977. }
  3978. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3979. result->op = GGML_OP_GELU;
  3980. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3981. result->src0 = a;
  3982. result->src1 = NULL;
  3983. return result;
  3984. }
  3985. struct ggml_tensor * ggml_gelu(
  3986. struct ggml_context * ctx,
  3987. struct ggml_tensor * a) {
  3988. return ggml_gelu_impl(ctx, a, false);
  3989. }
  3990. struct ggml_tensor * ggml_gelu_inplace(
  3991. struct ggml_context * ctx,
  3992. struct ggml_tensor * a) {
  3993. return ggml_gelu_impl(ctx, a, true);
  3994. }
  3995. // ggml_silu
  3996. struct ggml_tensor * ggml_silu_impl(
  3997. struct ggml_context * ctx,
  3998. struct ggml_tensor * a,
  3999. bool inplace) {
  4000. bool is_node = false;
  4001. if (!inplace && (a->grad)) {
  4002. is_node = true;
  4003. }
  4004. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4005. result->op = GGML_OP_SILU;
  4006. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4007. result->src0 = a;
  4008. result->src1 = NULL;
  4009. return result;
  4010. }
  4011. struct ggml_tensor * ggml_silu(
  4012. struct ggml_context * ctx,
  4013. struct ggml_tensor * a) {
  4014. return ggml_silu_impl(ctx, a, false);
  4015. }
  4016. struct ggml_tensor * ggml_silu_inplace(
  4017. struct ggml_context * ctx,
  4018. struct ggml_tensor * a) {
  4019. return ggml_silu_impl(ctx, a, true);
  4020. }
  4021. // ggml_norm
  4022. struct ggml_tensor * ggml_norm_impl(
  4023. struct ggml_context * ctx,
  4024. struct ggml_tensor * a,
  4025. bool inplace) {
  4026. bool is_node = false;
  4027. if (!inplace && (a->grad)) {
  4028. GGML_ASSERT(false); // TODO: implement backward
  4029. is_node = true;
  4030. }
  4031. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4032. result->op = GGML_OP_NORM;
  4033. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4034. result->src0 = a;
  4035. result->src1 = NULL; // TODO: maybe store epsilon here?
  4036. return result;
  4037. }
  4038. struct ggml_tensor * ggml_norm(
  4039. struct ggml_context * ctx,
  4040. struct ggml_tensor * a) {
  4041. return ggml_norm_impl(ctx, a, false);
  4042. }
  4043. struct ggml_tensor * ggml_norm_inplace(
  4044. struct ggml_context * ctx,
  4045. struct ggml_tensor * a) {
  4046. return ggml_norm_impl(ctx, a, true);
  4047. }
  4048. struct ggml_tensor * ggml_rms_norm_impl(
  4049. struct ggml_context * ctx,
  4050. struct ggml_tensor * a,
  4051. bool inplace) {
  4052. bool is_node = false;
  4053. if (!inplace && (a->grad)) {
  4054. GGML_ASSERT(false); // TODO: implement backward
  4055. is_node = true;
  4056. }
  4057. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4058. result->op = GGML_OP_RMS_NORM;
  4059. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4060. result->src0 = a;
  4061. result->src1 = NULL; // TODO: maybe store epsilon here?
  4062. return result;
  4063. }
  4064. struct ggml_tensor * ggml_rms_norm(
  4065. struct ggml_context * ctx,
  4066. struct ggml_tensor * a) {
  4067. return ggml_rms_norm_impl(ctx, a, false);
  4068. }
  4069. struct ggml_tensor * ggml_rms_norm_inplace(
  4070. struct ggml_context * ctx,
  4071. struct ggml_tensor * a) {
  4072. return ggml_rms_norm_impl(ctx, a, true);
  4073. }
  4074. // ggml_mul_mat
  4075. struct ggml_tensor * ggml_mul_mat(
  4076. struct ggml_context * ctx,
  4077. struct ggml_tensor * a,
  4078. struct ggml_tensor * b) {
  4079. GGML_ASSERT(ggml_can_mul_mat(a, b));
  4080. GGML_ASSERT(!ggml_is_transposed(a));
  4081. bool is_node = false;
  4082. if (a->grad || b->grad) {
  4083. is_node = true;
  4084. }
  4085. const int64_t ne[4] = { a->ne[1], b->ne[1], a->ne[2], b->ne[3] };
  4086. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MIN(a->n_dims, b->n_dims), ne);
  4087. result->op = GGML_OP_MUL_MAT;
  4088. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4089. result->src0 = a;
  4090. result->src1 = b;
  4091. return result;
  4092. }
  4093. // ggml_scale
  4094. struct ggml_tensor * ggml_scale_impl(
  4095. struct ggml_context * ctx,
  4096. struct ggml_tensor * a,
  4097. struct ggml_tensor * b,
  4098. bool inplace) {
  4099. GGML_ASSERT(ggml_is_scalar(b));
  4100. GGML_ASSERT(ggml_is_padded_1d(a));
  4101. bool is_node = false;
  4102. if (!inplace && (a->grad || b->grad)) {
  4103. GGML_ASSERT(false); // TODO: implement backward
  4104. is_node = true;
  4105. }
  4106. // TODO: when implement backward, fix this:
  4107. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4108. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4109. result->op = GGML_OP_SCALE;
  4110. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4111. result->src0 = a;
  4112. result->src1 = b;
  4113. return result;
  4114. }
  4115. struct ggml_tensor * ggml_scale(
  4116. struct ggml_context * ctx,
  4117. struct ggml_tensor * a,
  4118. struct ggml_tensor * b) {
  4119. return ggml_scale_impl(ctx, a, b, false);
  4120. }
  4121. struct ggml_tensor * ggml_scale_inplace(
  4122. struct ggml_context * ctx,
  4123. struct ggml_tensor * a,
  4124. struct ggml_tensor * b) {
  4125. return ggml_scale_impl(ctx, a, b, true);
  4126. }
  4127. // ggml_cpy
  4128. struct ggml_tensor * ggml_cpy_impl(
  4129. struct ggml_context * ctx,
  4130. struct ggml_tensor * a,
  4131. struct ggml_tensor * b,
  4132. bool inplace) {
  4133. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  4134. bool is_node = false;
  4135. if (!inplace && (a->grad || b->grad)) {
  4136. GGML_ASSERT(false); // TODO: implement backward
  4137. is_node = true;
  4138. }
  4139. // make a view of the destination
  4140. struct ggml_tensor * result = ggml_view_tensor(ctx, b);
  4141. result->op = GGML_OP_CPY;
  4142. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4143. result->src0 = a;
  4144. result->src1 = b;
  4145. return result;
  4146. }
  4147. struct ggml_tensor * ggml_cpy(
  4148. struct ggml_context * ctx,
  4149. struct ggml_tensor * a,
  4150. struct ggml_tensor * b) {
  4151. return ggml_cpy_impl(ctx, a, b, false);
  4152. }
  4153. struct ggml_tensor * ggml_cpy_inplace(
  4154. struct ggml_context * ctx,
  4155. struct ggml_tensor * a,
  4156. struct ggml_tensor * b) {
  4157. return ggml_cpy_impl(ctx, a, b, true);
  4158. }
  4159. // ggml_cont
  4160. struct ggml_tensor * ggml_cont_impl(
  4161. struct ggml_context * ctx,
  4162. struct ggml_tensor * a,
  4163. bool inplace) {
  4164. bool is_node = false;
  4165. if (!inplace && a->grad) {
  4166. GGML_ASSERT(false); // TODO: implement backward
  4167. is_node = true;
  4168. }
  4169. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4170. result->op = GGML_OP_CONT;
  4171. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4172. result->src0 = a;
  4173. result->src1 = NULL;
  4174. return result;
  4175. }
  4176. struct ggml_tensor * ggml_cont(
  4177. struct ggml_context * ctx,
  4178. struct ggml_tensor * a) {
  4179. return ggml_cont_impl(ctx, a, false);
  4180. }
  4181. struct ggml_tensor * ggml_cont_inplace(
  4182. struct ggml_context * ctx,
  4183. struct ggml_tensor * a) {
  4184. return ggml_cont_impl(ctx, a, true);
  4185. }
  4186. // ggml_reshape
  4187. struct ggml_tensor * ggml_reshape(
  4188. struct ggml_context * ctx,
  4189. struct ggml_tensor * a,
  4190. struct ggml_tensor * b) {
  4191. GGML_ASSERT(ggml_is_contiguous(a));
  4192. GGML_ASSERT(ggml_is_contiguous(b));
  4193. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  4194. bool is_node = false;
  4195. if (a->grad || b->grad) {
  4196. GGML_ASSERT(false); // TODO: implement backward
  4197. is_node = true;
  4198. }
  4199. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a->data);
  4200. result->op = GGML_OP_RESHAPE;
  4201. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4202. result->src0 = a;
  4203. result->src1 = NULL;
  4204. return result;
  4205. }
  4206. struct ggml_tensor * ggml_reshape_2d(
  4207. struct ggml_context * ctx,
  4208. struct ggml_tensor * a,
  4209. int64_t ne0,
  4210. int64_t ne1) {
  4211. GGML_ASSERT(ggml_is_contiguous(a));
  4212. GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
  4213. bool is_node = false;
  4214. if (a->grad) {
  4215. GGML_ASSERT(false); // TODO: implement backward
  4216. is_node = true;
  4217. }
  4218. const int64_t ne[2] = { ne0, ne1 };
  4219. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a->data);
  4220. result->op = GGML_OP_RESHAPE;
  4221. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4222. result->src0 = a;
  4223. result->src1 = NULL;
  4224. return result;
  4225. }
  4226. struct ggml_tensor * ggml_reshape_3d(
  4227. struct ggml_context * ctx,
  4228. struct ggml_tensor * a,
  4229. int64_t ne0,
  4230. int64_t ne1,
  4231. int64_t ne2) {
  4232. GGML_ASSERT(ggml_is_contiguous(a));
  4233. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
  4234. bool is_node = false;
  4235. if (a->grad) {
  4236. GGML_ASSERT(false); // TODO: implement backward
  4237. is_node = true;
  4238. }
  4239. const int64_t ne[3] = { ne0, ne1, ne2 };
  4240. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a->data);
  4241. result->op = GGML_OP_RESHAPE;
  4242. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4243. result->src0 = a;
  4244. result->src1 = NULL;
  4245. return result;
  4246. }
  4247. // ggml_view_1d
  4248. struct ggml_tensor * ggml_view_1d(
  4249. struct ggml_context * ctx,
  4250. struct ggml_tensor * a,
  4251. int64_t ne0,
  4252. size_t offset) {
  4253. if (a->grad) {
  4254. GGML_ASSERT(false); // gradient propagation is not supported
  4255. }
  4256. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset);
  4257. result->op = GGML_OP_VIEW;
  4258. result->grad = NULL;
  4259. result->src0 = a;
  4260. result->src1 = NULL; // TODO: maybe store the offset here?
  4261. return result;
  4262. }
  4263. // ggml_view_2d
  4264. struct ggml_tensor * ggml_view_2d(
  4265. struct ggml_context * ctx,
  4266. struct ggml_tensor * a,
  4267. int64_t ne0,
  4268. int64_t ne1,
  4269. size_t nb1,
  4270. size_t offset) {
  4271. if (a->grad) {
  4272. GGML_ASSERT(false); // gradient propagation is not supported
  4273. }
  4274. const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 };
  4275. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, (char *) a->data + offset);
  4276. result->nb[1] = nb1;
  4277. result->nb[2] = result->nb[1]*ne1;
  4278. result->nb[3] = result->nb[2];
  4279. result->op = GGML_OP_VIEW;
  4280. result->grad = NULL;
  4281. result->src0 = a;
  4282. result->src1 = NULL; // TODO: maybe store the offset here?
  4283. return result;
  4284. }
  4285. // ggml_view_3d
  4286. struct ggml_tensor * ggml_view_3d(
  4287. struct ggml_context * ctx,
  4288. struct ggml_tensor * a,
  4289. int64_t ne0,
  4290. int64_t ne1,
  4291. int64_t ne2,
  4292. size_t nb1,
  4293. size_t nb2,
  4294. size_t offset) {
  4295. if (a->grad) {
  4296. GGML_ASSERT(false); // gradient propagation is not supported
  4297. }
  4298. const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, 1 };
  4299. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, (char *) a->data + offset);
  4300. result->nb[1] = nb1;
  4301. result->nb[2] = nb2;
  4302. result->nb[3] = result->nb[2]*ne2;
  4303. result->op = GGML_OP_VIEW;
  4304. result->grad = NULL;
  4305. result->src0 = a;
  4306. result->src1 = NULL; // TODO: maybe store the offset here?
  4307. return result;
  4308. }
  4309. // ggml_permute
  4310. struct ggml_tensor * ggml_permute(
  4311. struct ggml_context * ctx,
  4312. struct ggml_tensor * a,
  4313. int axis0,
  4314. int axis1,
  4315. int axis2,
  4316. int axis3) {
  4317. GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
  4318. GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
  4319. GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
  4320. GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
  4321. GGML_ASSERT(axis0 != axis1);
  4322. GGML_ASSERT(axis0 != axis2);
  4323. GGML_ASSERT(axis0 != axis3);
  4324. GGML_ASSERT(axis1 != axis2);
  4325. GGML_ASSERT(axis1 != axis3);
  4326. GGML_ASSERT(axis2 != axis3);
  4327. bool is_node = false;
  4328. if (a->grad) {
  4329. GGML_ASSERT(false); // TODO: implement backward
  4330. is_node = true;
  4331. }
  4332. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4333. int ne[GGML_MAX_DIMS];
  4334. int nb[GGML_MAX_DIMS];
  4335. ne[axis0] = a->ne[0];
  4336. ne[axis1] = a->ne[1];
  4337. ne[axis2] = a->ne[2];
  4338. ne[axis3] = a->ne[3];
  4339. nb[axis0] = a->nb[0];
  4340. nb[axis1] = a->nb[1];
  4341. nb[axis2] = a->nb[2];
  4342. nb[axis3] = a->nb[3];
  4343. result->ne[0] = ne[0];
  4344. result->ne[1] = ne[1];
  4345. result->ne[2] = ne[2];
  4346. result->ne[3] = ne[3];
  4347. result->nb[0] = nb[0];
  4348. result->nb[1] = nb[1];
  4349. result->nb[2] = nb[2];
  4350. result->nb[3] = nb[3];
  4351. result->op = GGML_OP_PERMUTE;
  4352. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4353. result->src0 = a;
  4354. result->src1 = NULL; // TODO: maybe store the permutation here?
  4355. return result;
  4356. }
  4357. // ggml_transpose
  4358. struct ggml_tensor * ggml_transpose(
  4359. struct ggml_context * ctx,
  4360. struct ggml_tensor * a) {
  4361. bool is_node = false;
  4362. if (a->grad) {
  4363. GGML_ASSERT(false); // TODO: implement backward
  4364. is_node = true;
  4365. }
  4366. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4367. result->ne[0] = a->ne[1];
  4368. result->ne[1] = a->ne[0];
  4369. result->nb[0] = a->nb[1];
  4370. result->nb[1] = a->nb[0];
  4371. result->op = GGML_OP_TRANSPOSE;
  4372. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4373. result->src0 = a;
  4374. result->src1 = NULL;
  4375. return result;
  4376. }
  4377. // ggml_get_rows
  4378. struct ggml_tensor * ggml_get_rows(
  4379. struct ggml_context * ctx,
  4380. struct ggml_tensor * a,
  4381. struct ggml_tensor * b) {
  4382. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  4383. bool is_node = false;
  4384. if (a->grad || b->grad) {
  4385. GGML_ASSERT(false); // TODO: implement backward
  4386. is_node = true;
  4387. }
  4388. // TODO: implement non F32 return
  4389. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  4390. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, a->ne[0], b->ne[0]);
  4391. result->op = GGML_OP_GET_ROWS;
  4392. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4393. result->src0 = a;
  4394. result->src1 = b;
  4395. return result;
  4396. }
  4397. // ggml_diag_mask_inf
  4398. struct ggml_tensor * ggml_diag_mask_inf(
  4399. struct ggml_context * ctx,
  4400. struct ggml_tensor * a,
  4401. int n_past) {
  4402. bool is_node = false;
  4403. if (a->grad) {
  4404. GGML_ASSERT(false); // TODO: implement backward
  4405. is_node = true;
  4406. }
  4407. // TODO: when implement backward, fix this:
  4408. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4409. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4410. struct ggml_tensor * b = ggml_new_i32(ctx, n_past);
  4411. result->op = GGML_OP_DIAG_MASK_INF;
  4412. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4413. result->src0 = a;
  4414. result->src1 = b;
  4415. return result;
  4416. }
  4417. // ggml_soft_max
  4418. struct ggml_tensor * ggml_soft_max(
  4419. struct ggml_context * ctx,
  4420. struct ggml_tensor * a) {
  4421. bool is_node = false;
  4422. if (a->grad) {
  4423. GGML_ASSERT(false); // TODO: implement backward
  4424. is_node = true;
  4425. }
  4426. // TODO: when implement backward, fix this:
  4427. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4428. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4429. result->op = GGML_OP_SOFT_MAX;
  4430. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4431. result->src0 = a;
  4432. result->src1 = NULL;
  4433. return result;
  4434. }
  4435. // ggml_rope
  4436. struct ggml_tensor * ggml_rope(
  4437. struct ggml_context * ctx,
  4438. struct ggml_tensor * a,
  4439. int n_past,
  4440. int n_dims,
  4441. int mode) {
  4442. GGML_ASSERT(n_past >= 0);
  4443. bool is_node = false;
  4444. if (a->grad) {
  4445. GGML_ASSERT(false); // TODO: implement backward
  4446. is_node = true;
  4447. }
  4448. // TODO: when implement backward, fix this:
  4449. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4450. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4451. struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 3);
  4452. ((int32_t *) b->data)[0] = n_past;
  4453. ((int32_t *) b->data)[1] = n_dims;
  4454. ((int32_t *) b->data)[2] = mode;
  4455. result->op = GGML_OP_ROPE;
  4456. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4457. result->src0 = a;
  4458. result->src1 = b;
  4459. return result;
  4460. }
  4461. // ggml_conv_1d_1s
  4462. struct ggml_tensor * ggml_conv_1d_1s(
  4463. struct ggml_context * ctx,
  4464. struct ggml_tensor * a,
  4465. struct ggml_tensor * b) {
  4466. GGML_ASSERT(ggml_is_matrix(b));
  4467. GGML_ASSERT(a->ne[1] == b->ne[1]);
  4468. GGML_ASSERT(a->ne[3] == 1);
  4469. bool is_node = false;
  4470. if (a->grad || b->grad) {
  4471. GGML_ASSERT(false); // TODO: implement backward
  4472. is_node = true;
  4473. }
  4474. const int64_t ne[4] = { b->ne[0], a->ne[2], 1, 1, };
  4475. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  4476. result->op = GGML_OP_CONV_1D_1S;
  4477. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4478. result->src0 = a;
  4479. result->src1 = b;
  4480. return result;
  4481. }
  4482. // ggml_conv_1d_2s
  4483. struct ggml_tensor * ggml_conv_1d_2s(
  4484. struct ggml_context * ctx,
  4485. struct ggml_tensor * a,
  4486. struct ggml_tensor * b) {
  4487. GGML_ASSERT(ggml_is_matrix(b));
  4488. GGML_ASSERT(a->ne[1] == b->ne[1]);
  4489. GGML_ASSERT(a->ne[3] == 1);
  4490. bool is_node = false;
  4491. if (a->grad || b->grad) {
  4492. GGML_ASSERT(false); // TODO: implement backward
  4493. is_node = true;
  4494. }
  4495. const int64_t ne[4] = { b->ne[0]/2, a->ne[2], 1, 1, };
  4496. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  4497. result->op = GGML_OP_CONV_1D_2S;
  4498. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4499. result->src0 = a;
  4500. result->src1 = b;
  4501. return result;
  4502. }
  4503. // ggml_flash_attn
  4504. struct ggml_tensor * ggml_flash_attn(
  4505. struct ggml_context * ctx,
  4506. struct ggml_tensor * q,
  4507. struct ggml_tensor * k,
  4508. struct ggml_tensor * v,
  4509. bool masked) {
  4510. GGML_ASSERT(ggml_can_mul_mat(k, q));
  4511. // TODO: check if vT can be multiplied by (k*qT)
  4512. bool is_node = false;
  4513. if (q->grad || k->grad || v->grad) {
  4514. GGML_ASSERT(false); // TODO: implement backward
  4515. is_node = true;
  4516. }
  4517. //struct ggml_tensor * result = ggml_dup_tensor(ctx, q);
  4518. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, q->ne);
  4519. result->op = GGML_OP_FLASH_ATTN;
  4520. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4521. result->src0 = q;
  4522. result->src1 = k;
  4523. result->opt[0] = v;
  4524. result->opt[1] = ggml_new_i32(ctx, masked ? 1 : 0);
  4525. return result;
  4526. }
  4527. // ggml_flash_ff
  4528. struct ggml_tensor * ggml_flash_ff(
  4529. struct ggml_context * ctx,
  4530. struct ggml_tensor * a,
  4531. struct ggml_tensor * b0,
  4532. struct ggml_tensor * b1,
  4533. struct ggml_tensor * c0,
  4534. struct ggml_tensor * c1) {
  4535. GGML_ASSERT(ggml_can_mul_mat(b0, a));
  4536. // TODO: more checks
  4537. bool is_node = false;
  4538. if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) {
  4539. GGML_ASSERT(false); // TODO: implement backward
  4540. is_node = true;
  4541. }
  4542. //struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4543. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, a->ne);
  4544. result->op = GGML_OP_FLASH_FF;
  4545. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4546. result->src0 = a;
  4547. result->src1 = b0;
  4548. result->opt[0] = b1;
  4549. result->opt[1] = c0;
  4550. result->opt[2] = c1;
  4551. return result;
  4552. }
  4553. // ggml_map_unary
  4554. struct ggml_tensor * ggml_map_unary_impl_f32(
  4555. struct ggml_context * ctx,
  4556. struct ggml_tensor * a,
  4557. const ggml_unary_op_f32_t fun,
  4558. bool inplace) {
  4559. bool is_node = false;
  4560. if (!inplace && a->grad) {
  4561. is_node = true;
  4562. }
  4563. struct ggml_tensor * addr_tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(void *) / sizeof(int32_t));
  4564. *((void (**)(void))addr_tensor->data) = (void (*)(void))fun;
  4565. struct ggml_tensor *result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4566. result->op = GGML_OP_MAP_UNARY;
  4567. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4568. result->src0 = a;
  4569. result->opt[0] = addr_tensor;
  4570. return result;
  4571. }
  4572. struct ggml_tensor * ggml_map_unary_f32(
  4573. struct ggml_context * ctx,
  4574. struct ggml_tensor * a,
  4575. const ggml_unary_op_f32_t fun) {
  4576. return ggml_map_unary_impl_f32(ctx, a, fun, false);
  4577. }
  4578. struct ggml_tensor * ggml_map_unary_inplace_f32(
  4579. struct ggml_context * ctx,
  4580. struct ggml_tensor * a,
  4581. const ggml_unary_op_f32_t fun) {
  4582. return ggml_map_unary_impl_f32(ctx, a, fun, true);
  4583. }
  4584. // ggml_map_binary
  4585. struct ggml_tensor * ggml_map_binary_impl_f32(
  4586. struct ggml_context * ctx,
  4587. struct ggml_tensor * a,
  4588. struct ggml_tensor * b,
  4589. const ggml_binary_op_f32_t fun,
  4590. bool inplace) {
  4591. GGML_ASSERT(ggml_are_same_shape(a, b));
  4592. bool is_node = false;
  4593. if (!inplace && (a->grad || b->grad)) {
  4594. is_node = true;
  4595. }
  4596. struct ggml_tensor * addr_tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(void *) / sizeof(int32_t));
  4597. *((void (**)(void))addr_tensor->data) = (void (*)(void))fun;
  4598. struct ggml_tensor *result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4599. result->op = GGML_OP_MAP_BINARY;
  4600. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4601. result->src0 = a;
  4602. result->src1 = b;
  4603. result->opt[0] = addr_tensor;
  4604. return result;
  4605. }
  4606. struct ggml_tensor * ggml_map_binary_f32(
  4607. struct ggml_context * ctx,
  4608. struct ggml_tensor * a,
  4609. struct ggml_tensor * b,
  4610. const ggml_binary_op_f32_t fun) {
  4611. return ggml_map_binary_impl_f32(ctx, a, b, fun, false);
  4612. }
  4613. struct ggml_tensor * ggml_map_binary_inplace_f32(
  4614. struct ggml_context * ctx,
  4615. struct ggml_tensor * a,
  4616. struct ggml_tensor * b,
  4617. const ggml_binary_op_f32_t fun) {
  4618. return ggml_map_binary_impl_f32(ctx, a, b, fun, true);
  4619. }
  4620. ////////////////////////////////////////////////////////////////////////////////
  4621. void ggml_set_param(
  4622. struct ggml_context * ctx,
  4623. struct ggml_tensor * tensor) {
  4624. tensor->is_param = true;
  4625. GGML_ASSERT(tensor->grad == NULL);
  4626. tensor->grad = ggml_dup_tensor(ctx, tensor);
  4627. }
  4628. // ggml_compute_forward_dup
  4629. static void ggml_compute_forward_dup_f16(
  4630. const struct ggml_compute_params * params,
  4631. const struct ggml_tensor * src0,
  4632. struct ggml_tensor * dst) {
  4633. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  4634. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4635. return;
  4636. }
  4637. const int64_t ne00 = src0->ne[0];
  4638. const int64_t ne01 = src0->ne[1];
  4639. const int64_t ne02 = src0->ne[2];
  4640. const int64_t ne03 = src0->ne[3];
  4641. const int64_t ne0 = dst->ne[0];
  4642. const int64_t ne1 = dst->ne[1];
  4643. const int64_t ne2 = dst->ne[2];
  4644. const int64_t ne3 = dst->ne[3];
  4645. const size_t nb00 = src0->nb[0];
  4646. const size_t nb01 = src0->nb[1];
  4647. const size_t nb02 = src0->nb[2];
  4648. const size_t nb03 = src0->nb[3];
  4649. const size_t nb0 = dst->nb[0];
  4650. const size_t nb1 = dst->nb[1];
  4651. const size_t nb2 = dst->nb[2];
  4652. const size_t nb3 = dst->nb[3];
  4653. const int ith = params->ith; // thread index
  4654. const int nth = params->nth; // number of threads
  4655. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  4656. // parallelize by elements
  4657. const int ne = ggml_nelements(dst);
  4658. const int dr = (ne + nth - 1) / nth;
  4659. const int ie0 = dr * ith;
  4660. const int ie1 = MIN(ie0 + dr, ne);
  4661. memcpy(
  4662. ((char *) dst->data + ie0*nb0),
  4663. ((char *) src0->data + ie0*nb00),
  4664. (ie1 - ie0) * GGML_TYPE_SIZE[src0->type]);
  4665. return;
  4666. }
  4667. // parallelize by rows
  4668. const int nr = ne01;
  4669. // number of rows per thread
  4670. const int dr = (nr + nth - 1) / nth;
  4671. // row range for this thread
  4672. const int ir0 = dr * ith;
  4673. const int ir1 = MIN(ir0 + dr, nr);
  4674. if (src0->type == dst->type &&
  4675. ne00 == ne0 &&
  4676. nb00 == GGML_TYPE_SIZE[src0->type] && nb0 == GGML_TYPE_SIZE[dst->type]) {
  4677. // copy by rows
  4678. const size_t rs = ne00*nb00;
  4679. for (int64_t i03 = 0; i03 < ne03; i03++) {
  4680. for (int64_t i02 = 0; i02 < ne02; i02++) {
  4681. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  4682. memcpy(
  4683. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  4684. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  4685. rs);
  4686. }
  4687. }
  4688. }
  4689. return;
  4690. }
  4691. // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
  4692. if (ggml_is_contiguous(dst)) {
  4693. if (nb00 == sizeof(ggml_fp16_t)) {
  4694. if (dst->type == GGML_TYPE_F16) {
  4695. size_t id = 0;
  4696. const size_t rs = ne00 * nb00;
  4697. char * dst_ptr = (char *) dst->data;
  4698. for (int i03 = 0; i03 < ne03; i03++) {
  4699. for (int i02 = 0; i02 < ne02; i02++) {
  4700. id += rs * ir0;
  4701. for (int i01 = ir0; i01 < ir1; i01++) {
  4702. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  4703. memcpy(dst_ptr + id, src0_ptr, rs);
  4704. id += rs;
  4705. }
  4706. id += rs * (ne01 - ir1);
  4707. }
  4708. }
  4709. } else if (dst->type == GGML_TYPE_F32) {
  4710. size_t id = 0;
  4711. float * dst_ptr = (float *) dst->data;
  4712. for (int i03 = 0; i03 < ne03; i03++) {
  4713. for (int i02 = 0; i02 < ne02; i02++) {
  4714. id += ne00 * ir0;
  4715. for (int i01 = ir0; i01 < ir1; i01++) {
  4716. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  4717. for (int i00 = 0; i00 < ne00; i00++) {
  4718. dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  4719. id++;
  4720. }
  4721. }
  4722. id += ne00 * (ne01 - ir1);
  4723. }
  4724. }
  4725. } else if (ggml_is_quantized(dst->type)) {
  4726. quantize_row_q_t const quantize_row_q = quantize_fns[dst->type].quantize_row_q;
  4727. float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  4728. size_t id = 0;
  4729. size_t rs = nb0 * (ne00 / GGML_BLCK_SIZE[dst->type]);
  4730. char * dst_ptr = (char *) dst->data;
  4731. for (int i03 = 0; i03 < ne03; i03++) {
  4732. for (int i02 = 0; i02 < ne02; i02++) {
  4733. id += rs * ir0;
  4734. for (int i01 = ir0; i01 < ir1; i01++) {
  4735. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  4736. for (int i00 = 0; i00 < ne00; i00++) {
  4737. src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  4738. }
  4739. quantize_row_q(src0_f32, dst_ptr + id, ne00);
  4740. id += rs;
  4741. }
  4742. id += rs * (ne01 - ir1);
  4743. }
  4744. }
  4745. } else {
  4746. GGML_ASSERT(false); // TODO: implement
  4747. }
  4748. } else {
  4749. //printf("%s: this is not optimal - fix me\n", __func__);
  4750. if (dst->type == GGML_TYPE_F32) {
  4751. size_t id = 0;
  4752. float * dst_ptr = (float *) dst->data;
  4753. for (int i03 = 0; i03 < ne03; i03++) {
  4754. for (int i02 = 0; i02 < ne02; i02++) {
  4755. id += ne00 * ir0;
  4756. for (int i01 = ir0; i01 < ir1; i01++) {
  4757. for (int i00 = 0; i00 < ne00; i00++) {
  4758. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  4759. dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr);
  4760. id++;
  4761. }
  4762. }
  4763. id += ne00 * (ne01 - ir1);
  4764. }
  4765. }
  4766. } else if (dst->type == GGML_TYPE_F16) {
  4767. size_t id = 0;
  4768. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  4769. for (int i03 = 0; i03 < ne03; i03++) {
  4770. for (int i02 = 0; i02 < ne02; i02++) {
  4771. id += ne00 * ir0;
  4772. for (int i01 = ir0; i01 < ir1; i01++) {
  4773. for (int i00 = 0; i00 < ne00; i00++) {
  4774. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  4775. dst_ptr[id] = *src0_ptr;
  4776. id++;
  4777. }
  4778. }
  4779. id += ne00 * (ne01 - ir1);
  4780. }
  4781. }
  4782. } else {
  4783. GGML_ASSERT(false); // TODO: implement
  4784. }
  4785. }
  4786. return;
  4787. }
  4788. // dst counters
  4789. int64_t i10 = 0;
  4790. int64_t i11 = 0;
  4791. int64_t i12 = 0;
  4792. int64_t i13 = 0;
  4793. if (dst->type == GGML_TYPE_F16) {
  4794. for (int64_t i03 = 0; i03 < ne03; i03++) {
  4795. for (int64_t i02 = 0; i02 < ne02; i02++) {
  4796. i10 += ne00 * ir0;
  4797. while (i10 >= ne0) {
  4798. i10 -= ne0;
  4799. if (++i11 == ne1) {
  4800. i11 = 0;
  4801. if (++i12 == ne2) {
  4802. i12 = 0;
  4803. if (++i13 == ne3) {
  4804. i13 = 0;
  4805. }
  4806. }
  4807. }
  4808. }
  4809. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  4810. for (int64_t i00 = 0; i00 < ne00; i00++) {
  4811. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  4812. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  4813. memcpy(dst_ptr, src0_ptr, sizeof(ggml_fp16_t));
  4814. if (++i10 == ne00) {
  4815. i10 = 0;
  4816. if (++i11 == ne01) {
  4817. i11 = 0;
  4818. if (++i12 == ne02) {
  4819. i12 = 0;
  4820. if (++i13 == ne03) {
  4821. i13 = 0;
  4822. }
  4823. }
  4824. }
  4825. }
  4826. }
  4827. }
  4828. i10 += ne00 * (ne01 - ir1);
  4829. while (i10 >= ne0) {
  4830. i10 -= ne0;
  4831. if (++i11 == ne1) {
  4832. i11 = 0;
  4833. if (++i12 == ne2) {
  4834. i12 = 0;
  4835. if (++i13 == ne3) {
  4836. i13 = 0;
  4837. }
  4838. }
  4839. }
  4840. }
  4841. }
  4842. }
  4843. } else if (dst->type == GGML_TYPE_F32) {
  4844. for (int64_t i03 = 0; i03 < ne03; i03++) {
  4845. for (int64_t i02 = 0; i02 < ne02; i02++) {
  4846. i10 += ne00 * ir0;
  4847. while (i10 >= ne0) {
  4848. i10 -= ne0;
  4849. if (++i11 == ne1) {
  4850. i11 = 0;
  4851. if (++i12 == ne2) {
  4852. i12 = 0;
  4853. if (++i13 == ne3) {
  4854. i13 = 0;
  4855. }
  4856. }
  4857. }
  4858. }
  4859. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  4860. for (int64_t i00 = 0; i00 < ne00; i00++) {
  4861. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  4862. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  4863. *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr);
  4864. if (++i10 == ne0) {
  4865. i10 = 0;
  4866. if (++i11 == ne1) {
  4867. i11 = 0;
  4868. if (++i12 == ne2) {
  4869. i12 = 0;
  4870. if (++i13 == ne3) {
  4871. i13 = 0;
  4872. }
  4873. }
  4874. }
  4875. }
  4876. }
  4877. }
  4878. i10 += ne00 * (ne01 - ir1);
  4879. while (i10 >= ne0) {
  4880. i10 -= ne0;
  4881. if (++i11 == ne1) {
  4882. i11 = 0;
  4883. if (++i12 == ne2) {
  4884. i12 = 0;
  4885. if (++i13 == ne3) {
  4886. i13 = 0;
  4887. }
  4888. }
  4889. }
  4890. }
  4891. }
  4892. }
  4893. } else {
  4894. GGML_ASSERT(false); // TODO: implement
  4895. }
  4896. }
  4897. static void ggml_compute_forward_dup_f32(
  4898. const struct ggml_compute_params * params,
  4899. const struct ggml_tensor * src0,
  4900. struct ggml_tensor * dst) {
  4901. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  4902. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4903. return;
  4904. }
  4905. const int64_t ne00 = src0->ne[0];
  4906. const int64_t ne01 = src0->ne[1];
  4907. const int64_t ne02 = src0->ne[2];
  4908. const int64_t ne03 = src0->ne[3];
  4909. const int64_t ne0 = dst->ne[0];
  4910. const int64_t ne1 = dst->ne[1];
  4911. const int64_t ne2 = dst->ne[2];
  4912. const int64_t ne3 = dst->ne[3];
  4913. const size_t nb00 = src0->nb[0];
  4914. const size_t nb01 = src0->nb[1];
  4915. const size_t nb02 = src0->nb[2];
  4916. const size_t nb03 = src0->nb[3];
  4917. const size_t nb0 = dst->nb[0];
  4918. const size_t nb1 = dst->nb[1];
  4919. const size_t nb2 = dst->nb[2];
  4920. const size_t nb3 = dst->nb[3];
  4921. const int ith = params->ith; // thread index
  4922. const int nth = params->nth; // number of threads
  4923. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  4924. // parallelize by elements
  4925. const int ne = ggml_nelements(dst);
  4926. const int dr = (ne + nth - 1) / nth;
  4927. const int ie0 = dr * ith;
  4928. const int ie1 = MIN(ie0 + dr, ne);
  4929. memcpy(
  4930. ((char *) dst->data + ie0*nb0),
  4931. ((char *) src0->data + ie0*nb00),
  4932. (ie1 - ie0) * GGML_TYPE_SIZE[src0->type]);
  4933. return;
  4934. }
  4935. // parallelize by rows
  4936. const int nr = ne01;
  4937. // number of rows per thread
  4938. const int dr = (nr + nth - 1) / nth;
  4939. // row range for this thread
  4940. const int ir0 = dr * ith;
  4941. const int ir1 = MIN(ir0 + dr, nr);
  4942. if (src0->type == dst->type &&
  4943. ne00 == ne0 &&
  4944. nb00 == GGML_TYPE_SIZE[src0->type] && nb0 == GGML_TYPE_SIZE[dst->type]) {
  4945. // copy by rows
  4946. const size_t rs = ne00*nb00;
  4947. for (int64_t i03 = 0; i03 < ne03; i03++) {
  4948. for (int64_t i02 = 0; i02 < ne02; i02++) {
  4949. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  4950. memcpy(
  4951. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  4952. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  4953. rs);
  4954. }
  4955. }
  4956. }
  4957. return;
  4958. }
  4959. if (ggml_is_contiguous(dst)) {
  4960. // TODO: simplify
  4961. if (nb00 == sizeof(float)) {
  4962. if (dst->type == GGML_TYPE_F32) {
  4963. size_t id = 0;
  4964. const size_t rs = ne00 * nb00;
  4965. char * dst_ptr = (char *) dst->data;
  4966. for (int i03 = 0; i03 < ne03; i03++) {
  4967. for (int i02 = 0; i02 < ne02; i02++) {
  4968. id += rs * ir0;
  4969. for (int i01 = ir0; i01 < ir1; i01++) {
  4970. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  4971. memcpy(dst_ptr + id, src0_ptr, rs);
  4972. id += rs;
  4973. }
  4974. id += rs * (ne01 - ir1);
  4975. }
  4976. }
  4977. } else if (dst->type == GGML_TYPE_F16) {
  4978. size_t id = 0;
  4979. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  4980. for (int i03 = 0; i03 < ne03; i03++) {
  4981. for (int i02 = 0; i02 < ne02; i02++) {
  4982. id += ne00 * ir0;
  4983. for (int i01 = ir0; i01 < ir1; i01++) {
  4984. for (int i00 = 0; i00 < ne00; i00++) {
  4985. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  4986. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  4987. id++;
  4988. }
  4989. }
  4990. id += ne00 * (ne01 - ir1);
  4991. }
  4992. }
  4993. } else if (ggml_is_quantized(dst->type)) {
  4994. quantize_row_q_t const quantize_row_q = quantize_fns[dst->type].quantize_row_q;
  4995. size_t id = 0;
  4996. size_t rs = nb0 * (ne00 / GGML_BLCK_SIZE[dst->type]);
  4997. char * dst_ptr = (char *) dst->data;
  4998. for (int i03 = 0; i03 < ne03; i03++) {
  4999. for (int i02 = 0; i02 < ne02; i02++) {
  5000. id += rs * ir0;
  5001. for (int i01 = ir0; i01 < ir1; i01++) {
  5002. const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  5003. quantize_row_q(src0_ptr, dst_ptr + id, ne00);
  5004. id += rs;
  5005. }
  5006. id += rs * (ne01 - ir1);
  5007. }
  5008. }
  5009. } else {
  5010. GGML_ASSERT(false); // TODO: implement
  5011. }
  5012. } else {
  5013. //printf("%s: this is not optimal - fix me\n", __func__);
  5014. if (dst->type == GGML_TYPE_F32) {
  5015. size_t id = 0;
  5016. float * dst_ptr = (float *) dst->data;
  5017. for (int i03 = 0; i03 < ne03; i03++) {
  5018. for (int i02 = 0; i02 < ne02; i02++) {
  5019. id += ne00 * ir0;
  5020. for (int i01 = ir0; i01 < ir1; i01++) {
  5021. for (int i00 = 0; i00 < ne00; i00++) {
  5022. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5023. dst_ptr[id] = *src0_ptr;
  5024. id++;
  5025. }
  5026. }
  5027. id += ne00 * (ne01 - ir1);
  5028. }
  5029. }
  5030. } else if (dst->type == GGML_TYPE_F16) {
  5031. size_t id = 0;
  5032. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  5033. for (int i03 = 0; i03 < ne03; i03++) {
  5034. for (int i02 = 0; i02 < ne02; i02++) {
  5035. id += ne00 * ir0;
  5036. for (int i01 = ir0; i01 < ir1; i01++) {
  5037. for (int i00 = 0; i00 < ne00; i00++) {
  5038. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5039. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  5040. id++;
  5041. }
  5042. }
  5043. id += ne00 * (ne01 - ir1);
  5044. }
  5045. }
  5046. } else {
  5047. GGML_ASSERT(false); // TODO: implement
  5048. }
  5049. }
  5050. return;
  5051. }
  5052. // dst counters
  5053. int64_t i10 = 0;
  5054. int64_t i11 = 0;
  5055. int64_t i12 = 0;
  5056. int64_t i13 = 0;
  5057. if (dst->type == GGML_TYPE_F32) {
  5058. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5059. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5060. i10 += ne00 * ir0;
  5061. while (i10 >= ne0) {
  5062. i10 -= ne0;
  5063. i11++;
  5064. if (++i11 == ne1) {
  5065. i11 = 0;
  5066. if (++i12 == ne2) {
  5067. i12 = 0;
  5068. if (++i13 == ne3) {
  5069. i13 = 0;
  5070. }
  5071. }
  5072. }
  5073. }
  5074. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5075. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5076. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5077. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5078. memcpy(dst_ptr, src0_ptr, sizeof(float));
  5079. if (++i10 == ne0) {
  5080. i10 = 0;
  5081. if (++i11 == ne1) {
  5082. i11 = 0;
  5083. if (++i12 == ne2) {
  5084. i12 = 0;
  5085. if (++i13 == ne3) {
  5086. i13 = 0;
  5087. }
  5088. }
  5089. }
  5090. }
  5091. }
  5092. }
  5093. i10 += ne00 * (ne01 - ir1);
  5094. while (i10 >= ne0) {
  5095. i10 -= ne0;
  5096. if (++i11 == ne1) {
  5097. i11 = 0;
  5098. if (++i12 == ne2) {
  5099. i12 = 0;
  5100. if (++i13 == ne3) {
  5101. i13 = 0;
  5102. }
  5103. }
  5104. }
  5105. }
  5106. }
  5107. }
  5108. } else if (dst->type == GGML_TYPE_F16) {
  5109. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5110. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5111. i10 += ne00 * ir0;
  5112. while (i10 >= ne0) {
  5113. i10 -= ne0;
  5114. if (++i11 == ne1) {
  5115. i11 = 0;
  5116. if (++i12 == ne2) {
  5117. i12 = 0;
  5118. if (++i13 == ne3) {
  5119. i13 = 0;
  5120. }
  5121. }
  5122. }
  5123. }
  5124. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5125. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5126. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5127. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5128. *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr);
  5129. if (++i10 == ne0) {
  5130. i10 = 0;
  5131. if (++i11 == ne1) {
  5132. i11 = 0;
  5133. if (++i12 == ne2) {
  5134. i12 = 0;
  5135. if (++i13 == ne3) {
  5136. i13 = 0;
  5137. }
  5138. }
  5139. }
  5140. }
  5141. }
  5142. }
  5143. i10 += ne00 * (ne01 - ir1);
  5144. while (i10 >= ne0) {
  5145. i10 -= ne0;
  5146. if (++i11 == ne1) {
  5147. i11 = 0;
  5148. if (++i12 == ne2) {
  5149. i12 = 0;
  5150. if (++i13 == ne3) {
  5151. i13 = 0;
  5152. }
  5153. }
  5154. }
  5155. }
  5156. }
  5157. }
  5158. } else {
  5159. GGML_ASSERT(false); // TODO: implement
  5160. }
  5161. }
  5162. static void ggml_compute_forward_dup(
  5163. const struct ggml_compute_params * params,
  5164. const struct ggml_tensor * src0,
  5165. struct ggml_tensor * dst) {
  5166. switch (src0->type) {
  5167. case GGML_TYPE_F16:
  5168. {
  5169. ggml_compute_forward_dup_f16(params, src0, dst);
  5170. } break;
  5171. case GGML_TYPE_F32:
  5172. {
  5173. ggml_compute_forward_dup_f32(params, src0, dst);
  5174. } break;
  5175. default:
  5176. {
  5177. GGML_ASSERT(false);
  5178. } break;
  5179. }
  5180. }
  5181. // ggml_compute_forward_add
  5182. static void ggml_compute_forward_add_f32(
  5183. const struct ggml_compute_params * params,
  5184. const struct ggml_tensor * src0,
  5185. const struct ggml_tensor * src1,
  5186. struct ggml_tensor * dst) {
  5187. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  5188. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5189. return;
  5190. }
  5191. const int ith = params->ith;
  5192. const int nth = params->nth;
  5193. const int n = ggml_nrows(src0);
  5194. const int nc = src0->ne[0];
  5195. const size_t nb00 = src0->nb[0];
  5196. const size_t nb01 = src0->nb[1];
  5197. const size_t nb10 = src1->nb[0];
  5198. const size_t nb11 = src1->nb[1];
  5199. const size_t nb0 = dst->nb[0];
  5200. const size_t nb1 = dst->nb[1];
  5201. GGML_ASSERT( nb0 == sizeof(float));
  5202. GGML_ASSERT(nb00 == sizeof(float));
  5203. if (nb10 == sizeof(float)) {
  5204. for (int j = ith; j < n; j += nth) {
  5205. #ifdef GGML_USE_ACCELERATE
  5206. vDSP_vadd(
  5207. (float *) ((char *) src0->data + j*nb01), 1,
  5208. (float *) ((char *) src1->data + j*nb11), 1,
  5209. (float *) ((char *) dst->data + j*nb1), 1, nc);
  5210. #else
  5211. ggml_vec_add_f32(nc,
  5212. (float *) ((char *) dst->data + j*nb1),
  5213. (float *) ((char *) src0->data + j*nb01),
  5214. (float *) ((char *) src1->data + j*nb11));
  5215. #endif
  5216. }
  5217. } else {
  5218. // src1 is not contiguous
  5219. for (int j = ith; j < n; j += nth) {
  5220. float * dst_ptr = (float *) ((char *) dst->data + j*nb1);
  5221. float * src0_ptr = (float *) ((char *) src0->data + j*nb01);
  5222. for (int i = 0; i < nc; i++) {
  5223. float * src1_ptr = (float *) ((char *) src1->data + j*nb11 + i*nb10);
  5224. dst_ptr[i] = src0_ptr[i] + *src1_ptr;
  5225. }
  5226. }
  5227. }
  5228. }
  5229. static void ggml_compute_forward_add_f16_f32(
  5230. const struct ggml_compute_params * params,
  5231. const struct ggml_tensor * src0,
  5232. const struct ggml_tensor * src1,
  5233. struct ggml_tensor * dst) {
  5234. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  5235. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5236. return;
  5237. }
  5238. const int ith = params->ith;
  5239. const int nth = params->nth;
  5240. const int n = ggml_nrows(src0);
  5241. const int nc = src0->ne[0];
  5242. const size_t nb00 = src0->nb[0];
  5243. const size_t nb01 = src0->nb[1];
  5244. const size_t nb10 = src1->nb[0];
  5245. const size_t nb11 = src1->nb[1];
  5246. const size_t nb0 = dst->nb[0];
  5247. const size_t nb1 = dst->nb[1];
  5248. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  5249. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  5250. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  5251. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  5252. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  5253. if (nb10 == sizeof(float)) {
  5254. for (int j = ith; j < n; j += nth) {
  5255. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + j*nb1);
  5256. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j*nb01);
  5257. for (int i = 0; i < nc; i++) {
  5258. float * src1_ptr = (float *) ((char *) src1->data + j*nb11 + i*nb10);
  5259. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + *src1_ptr);
  5260. }
  5261. }
  5262. }
  5263. else {
  5264. // src1 is not contiguous
  5265. GGML_ASSERT(false);
  5266. }
  5267. }
  5268. static void ggml_compute_forward_add_f16_f16(
  5269. const struct ggml_compute_params * params,
  5270. const struct ggml_tensor * src0,
  5271. const struct ggml_tensor * src1,
  5272. struct ggml_tensor * dst) {
  5273. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  5274. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5275. return;
  5276. }
  5277. const int ith = params->ith;
  5278. const int nth = params->nth;
  5279. const int n = ggml_nrows(src0);
  5280. const int nc = src0->ne[0];
  5281. const size_t nb00 = src0->nb[0];
  5282. const size_t nb01 = src0->nb[1];
  5283. const size_t nb10 = src1->nb[0];
  5284. const size_t nb11 = src1->nb[1];
  5285. const size_t nb0 = dst->nb[0];
  5286. const size_t nb1 = dst->nb[1];
  5287. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  5288. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  5289. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  5290. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  5291. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  5292. if (nb10 == sizeof(ggml_fp16_t)) {
  5293. for (int j = ith; j < n; j += nth) {
  5294. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + j*nb1);
  5295. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j*nb01);
  5296. for (int i = 0; i < nc; i++) {
  5297. ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + j*nb11 + i*nb10);
  5298. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(*src1_ptr));
  5299. }
  5300. }
  5301. }
  5302. else {
  5303. // src1 is not contiguous
  5304. GGML_ASSERT(false);
  5305. }
  5306. }
  5307. static void ggml_compute_forward_add_q_f32(
  5308. const struct ggml_compute_params * params,
  5309. const struct ggml_tensor * src0,
  5310. const struct ggml_tensor * src1,
  5311. struct ggml_tensor * dst) {
  5312. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  5313. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5314. return;
  5315. }
  5316. const int64_t ne00 = src0->ne[0];
  5317. const int64_t ne01 = src0->ne[1];
  5318. const int64_t ne02 = src0->ne[2];
  5319. const int64_t ne03 = src0->ne[3];
  5320. //const int64_t ne10 = src1->ne[0];
  5321. //const int64_t ne11 = src1->ne[1];
  5322. const int64_t ne12 = src1->ne[2];
  5323. const int64_t ne13 = src1->ne[3];
  5324. //const int64_t ne0 = dst->ne[0];
  5325. //const int64_t ne1 = dst->ne[1];
  5326. const int64_t ne2 = dst->ne[2];
  5327. const int64_t ne3 = dst->ne[3];
  5328. const int nb00 = src0->nb[0];
  5329. const int nb01 = src0->nb[1];
  5330. const int nb02 = src0->nb[2];
  5331. const int nb03 = src0->nb[3];
  5332. const int nb10 = src1->nb[0];
  5333. const int nb11 = src1->nb[1];
  5334. const int nb12 = src1->nb[2];
  5335. const int nb13 = src1->nb[3];
  5336. const int nb0 = dst->nb[0];
  5337. const int nb1 = dst->nb[1];
  5338. const int nb2 = dst->nb[2];
  5339. const int nb3 = dst->nb[3];
  5340. const int ith = params->ith;
  5341. const int nth = params->nth;
  5342. GGML_ASSERT(ne02 == ne12);
  5343. GGML_ASSERT(ne03 == ne13);
  5344. GGML_ASSERT(ne2 == ne12);
  5345. GGML_ASSERT(ne3 == ne13);
  5346. const enum ggml_type type = src0->type;
  5347. dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
  5348. quantize_row_q_t const quantize_row_q = quantize_fns[type].quantize_row_q;
  5349. // we don't support permuted src0 or src1
  5350. GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[type]);
  5351. GGML_ASSERT(nb10 == sizeof(float));
  5352. // dst cannot be transposed or permuted
  5353. GGML_ASSERT(nb0 <= nb1);
  5354. GGML_ASSERT(nb1 <= nb2);
  5355. GGML_ASSERT(nb2 <= nb3);
  5356. GGML_ASSERT(ggml_is_quantized(src0->type));
  5357. GGML_ASSERT(dst->type == src0->type);
  5358. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  5359. // total rows in src0
  5360. const int nr = ne01*ne02*ne03;
  5361. // rows per thread
  5362. const int dr = (nr + nth - 1)/nth;
  5363. // row range for this thread
  5364. const int ir0 = dr*ith;
  5365. const int ir1 = MIN(ir0 + dr, nr);
  5366. float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  5367. for (int ir = ir0; ir < ir1; ++ir) {
  5368. // src0 indices
  5369. const int i03 = ir/(ne02*ne01);
  5370. const int i02 = (ir - i03*ne02*ne01)/ne01;
  5371. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  5372. // src1 and dst are same shape as src0 => same indices
  5373. const int i13 = i03;
  5374. const int i12 = i02;
  5375. const int i11 = i01;
  5376. const int i3 = i03;
  5377. const int i2 = i02;
  5378. const int i1 = i01;
  5379. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  5380. float * src1_row = (float *)((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13));
  5381. void * dst_row = (void *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb0));
  5382. assert(ne00 % 32 == 0);
  5383. // unquantize row from src0 to temp buffer
  5384. dequantize_row_q(src0_row, wdata, ne00);
  5385. // add src1
  5386. ggml_vec_acc_f32(ne00, wdata, src1_row);
  5387. // quantize row to dst
  5388. quantize_row_q(wdata, dst_row, ne00);
  5389. }
  5390. }
  5391. static void ggml_compute_forward_add(
  5392. const struct ggml_compute_params * params,
  5393. const struct ggml_tensor * src0,
  5394. const struct ggml_tensor * src1,
  5395. struct ggml_tensor * dst) {
  5396. switch (src0->type) {
  5397. case GGML_TYPE_F32:
  5398. {
  5399. ggml_compute_forward_add_f32(params, src0, src1, dst);
  5400. } break;
  5401. case GGML_TYPE_F16:
  5402. {
  5403. if (src1->type == GGML_TYPE_F16) {
  5404. ggml_compute_forward_add_f16_f16(params, src0, src1, dst);
  5405. }
  5406. else if (src1->type == GGML_TYPE_F32) {
  5407. ggml_compute_forward_add_f16_f32(params, src0, src1, dst);
  5408. }
  5409. else {
  5410. GGML_ASSERT(false);
  5411. }
  5412. } break;
  5413. case GGML_TYPE_Q4_0:
  5414. case GGML_TYPE_Q4_1:
  5415. case GGML_TYPE_Q4_2:
  5416. {
  5417. ggml_compute_forward_add_q_f32(params, src0, src1, dst);
  5418. } break;
  5419. default:
  5420. {
  5421. GGML_ASSERT(false);
  5422. } break;
  5423. }
  5424. }
  5425. // ggml_compute_forward_sub
  5426. static void ggml_compute_forward_sub_f32(
  5427. const struct ggml_compute_params * params,
  5428. const struct ggml_tensor * src0,
  5429. const struct ggml_tensor * src1,
  5430. struct ggml_tensor * dst) {
  5431. assert(params->ith == 0);
  5432. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  5433. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5434. return;
  5435. }
  5436. const int n = ggml_nrows(src0);
  5437. const int nc = src0->ne[0];
  5438. assert( dst->nb[0] == sizeof(float));
  5439. assert(src0->nb[0] == sizeof(float));
  5440. assert(src1->nb[0] == sizeof(float));
  5441. for (int i = 0; i < n; i++) {
  5442. ggml_vec_sub_f32(nc,
  5443. (float *) ((char *) dst->data + i*( dst->nb[1])),
  5444. (float *) ((char *) src0->data + i*(src0->nb[1])),
  5445. (float *) ((char *) src1->data + i*(src1->nb[1])));
  5446. }
  5447. }
  5448. static void ggml_compute_forward_sub(
  5449. const struct ggml_compute_params * params,
  5450. const struct ggml_tensor * src0,
  5451. const struct ggml_tensor * src1,
  5452. struct ggml_tensor * dst) {
  5453. switch (src0->type) {
  5454. case GGML_TYPE_F32:
  5455. {
  5456. ggml_compute_forward_sub_f32(params, src0, src1, dst);
  5457. } break;
  5458. default:
  5459. {
  5460. GGML_ASSERT(false);
  5461. } break;
  5462. }
  5463. }
  5464. // ggml_compute_forward_mul
  5465. static void ggml_compute_forward_mul_f32(
  5466. const struct ggml_compute_params * params,
  5467. const struct ggml_tensor * src0,
  5468. const struct ggml_tensor * src1,
  5469. struct ggml_tensor * dst) {
  5470. assert(params->ith == 0);
  5471. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  5472. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5473. return;
  5474. }
  5475. const int n = ggml_nrows(src0);
  5476. const int nc = src0->ne[0];
  5477. assert( dst->nb[0] == sizeof(float));
  5478. assert(src0->nb[0] == sizeof(float));
  5479. assert(src1->nb[0] == sizeof(float));
  5480. for (int i = 0; i < n; i++) {
  5481. ggml_vec_mul_f32(nc,
  5482. (float *) ((char *) dst->data + i*( dst->nb[1])),
  5483. (float *) ((char *) src0->data + i*(src0->nb[1])),
  5484. (float *) ((char *) src1->data + i*(src1->nb[1])));
  5485. }
  5486. }
  5487. static void ggml_compute_forward_mul(
  5488. const struct ggml_compute_params * params,
  5489. const struct ggml_tensor * src0,
  5490. const struct ggml_tensor * src1,
  5491. struct ggml_tensor * dst) {
  5492. switch (src0->type) {
  5493. case GGML_TYPE_F32:
  5494. {
  5495. ggml_compute_forward_mul_f32(params, src0, src1, dst);
  5496. } break;
  5497. default:
  5498. {
  5499. GGML_ASSERT(false);
  5500. } break;
  5501. }
  5502. }
  5503. // ggml_compute_forward_div
  5504. static void ggml_compute_forward_div_f32(
  5505. const struct ggml_compute_params * params,
  5506. const struct ggml_tensor * src0,
  5507. const struct ggml_tensor * src1,
  5508. struct ggml_tensor * dst) {
  5509. assert(params->ith == 0);
  5510. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  5511. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5512. return;
  5513. }
  5514. const int n = ggml_nrows(src0);
  5515. const int nc = src0->ne[0];
  5516. assert( dst->nb[0] == sizeof(float));
  5517. assert(src0->nb[0] == sizeof(float));
  5518. assert(src1->nb[0] == sizeof(float));
  5519. for (int i = 0; i < n; i++) {
  5520. ggml_vec_div_f32(nc,
  5521. (float *) ((char *) dst->data + i*( dst->nb[1])),
  5522. (float *) ((char *) src0->data + i*(src0->nb[1])),
  5523. (float *) ((char *) src1->data + i*(src1->nb[1])));
  5524. }
  5525. }
  5526. static void ggml_compute_forward_div(
  5527. const struct ggml_compute_params * params,
  5528. const struct ggml_tensor * src0,
  5529. const struct ggml_tensor * src1,
  5530. struct ggml_tensor * dst) {
  5531. switch (src0->type) {
  5532. case GGML_TYPE_F32:
  5533. {
  5534. ggml_compute_forward_div_f32(params, src0, src1, dst);
  5535. } break;
  5536. default:
  5537. {
  5538. GGML_ASSERT(false);
  5539. } break;
  5540. }
  5541. }
  5542. // ggml_compute_forward_sqr
  5543. static void ggml_compute_forward_sqr_f32(
  5544. const struct ggml_compute_params * params,
  5545. const struct ggml_tensor * src0,
  5546. struct ggml_tensor * dst) {
  5547. assert(params->ith == 0);
  5548. assert(ggml_are_same_shape(src0, dst));
  5549. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5550. return;
  5551. }
  5552. const int n = ggml_nrows(src0);
  5553. const int nc = src0->ne[0];
  5554. assert( dst->nb[0] == sizeof(float));
  5555. assert(src0->nb[0] == sizeof(float));
  5556. for (int i = 0; i < n; i++) {
  5557. ggml_vec_sqr_f32(nc,
  5558. (float *) ((char *) dst->data + i*( dst->nb[1])),
  5559. (float *) ((char *) src0->data + i*(src0->nb[1])));
  5560. }
  5561. }
  5562. static void ggml_compute_forward_sqr(
  5563. const struct ggml_compute_params * params,
  5564. const struct ggml_tensor * src0,
  5565. struct ggml_tensor * dst) {
  5566. switch (src0->type) {
  5567. case GGML_TYPE_F32:
  5568. {
  5569. ggml_compute_forward_sqr_f32(params, src0, dst);
  5570. } break;
  5571. default:
  5572. {
  5573. GGML_ASSERT(false);
  5574. } break;
  5575. }
  5576. }
  5577. // ggml_compute_forward_sqrt
  5578. static void ggml_compute_forward_sqrt_f32(
  5579. const struct ggml_compute_params * params,
  5580. const struct ggml_tensor * src0,
  5581. struct ggml_tensor * dst) {
  5582. assert(params->ith == 0);
  5583. assert(ggml_are_same_shape(src0, dst));
  5584. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5585. return;
  5586. }
  5587. const int n = ggml_nrows(src0);
  5588. const int nc = src0->ne[0];
  5589. assert( dst->nb[0] == sizeof(float));
  5590. assert(src0->nb[0] == sizeof(float));
  5591. for (int i = 0; i < n; i++) {
  5592. ggml_vec_sqrt_f32(nc,
  5593. (float *) ((char *) dst->data + i*( dst->nb[1])),
  5594. (float *) ((char *) src0->data + i*(src0->nb[1])));
  5595. }
  5596. }
  5597. static void ggml_compute_forward_sqrt(
  5598. const struct ggml_compute_params * params,
  5599. const struct ggml_tensor * src0,
  5600. struct ggml_tensor * dst) {
  5601. switch (src0->type) {
  5602. case GGML_TYPE_F32:
  5603. {
  5604. ggml_compute_forward_sqrt_f32(params, src0, dst);
  5605. } break;
  5606. default:
  5607. {
  5608. GGML_ASSERT(false);
  5609. } break;
  5610. }
  5611. }
  5612. // ggml_compute_forward_sum
  5613. static void ggml_compute_forward_sum_f32(
  5614. const struct ggml_compute_params * params,
  5615. const struct ggml_tensor * src0,
  5616. struct ggml_tensor * dst) {
  5617. assert(params->ith == 0);
  5618. assert(ggml_is_scalar(dst));
  5619. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5620. return;
  5621. }
  5622. assert(ggml_is_scalar(dst));
  5623. assert(src0->nb[0] == sizeof(float));
  5624. const int64_t ne00 = src0->ne[0];
  5625. const int64_t ne01 = src0->ne[1];
  5626. const int64_t ne02 = src0->ne[2];
  5627. const int64_t ne03 = src0->ne[3];
  5628. const size_t nb01 = src0->nb[1];
  5629. const size_t nb02 = src0->nb[2];
  5630. const size_t nb03 = src0->nb[3];
  5631. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5632. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5633. for (int64_t i01 = 0; i01 < ne01; i01++) {
  5634. ggml_vec_sum_f32(ne00,
  5635. (float *) (dst->data),
  5636. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  5637. }
  5638. }
  5639. }
  5640. }
  5641. static void ggml_compute_forward_sum(
  5642. const struct ggml_compute_params * params,
  5643. const struct ggml_tensor * src0,
  5644. struct ggml_tensor * dst) {
  5645. switch (src0->type) {
  5646. case GGML_TYPE_F32:
  5647. {
  5648. ggml_compute_forward_sum_f32(params, src0, dst);
  5649. } break;
  5650. default:
  5651. {
  5652. GGML_ASSERT(false);
  5653. } break;
  5654. }
  5655. }
  5656. // ggml_compute_forward_mean
  5657. static void ggml_compute_forward_mean_f32(
  5658. const struct ggml_compute_params * params,
  5659. const struct ggml_tensor * src0,
  5660. struct ggml_tensor * dst) {
  5661. assert(params->ith == 0);
  5662. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5663. return;
  5664. }
  5665. assert(src0->nb[0] == sizeof(float));
  5666. const int64_t ne00 = src0->ne[0];
  5667. const int64_t ne01 = src0->ne[1];
  5668. const int64_t ne02 = src0->ne[2];
  5669. const int64_t ne03 = src0->ne[3];
  5670. const size_t nb01 = src0->nb[1];
  5671. const size_t nb02 = src0->nb[2];
  5672. const size_t nb03 = src0->nb[3];
  5673. const int64_t ne0 = dst->ne[0];
  5674. const int64_t ne1 = dst->ne[1];
  5675. const int64_t ne2 = dst->ne[2];
  5676. const int64_t ne3 = dst->ne[3];
  5677. assert(ne0 == 1);
  5678. assert(ne1 == ne01);
  5679. assert(ne2 == ne02);
  5680. assert(ne3 == ne03);
  5681. UNUSED(ne0);
  5682. UNUSED(ne1);
  5683. UNUSED(ne2);
  5684. UNUSED(ne3);
  5685. const size_t nb1 = dst->nb[1];
  5686. const size_t nb2 = dst->nb[2];
  5687. const size_t nb3 = dst->nb[3];
  5688. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5689. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5690. for (int64_t i01 = 0; i01 < ne01; i01++) {
  5691. ggml_vec_sum_f32(ne00,
  5692. (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  5693. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  5694. *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00;
  5695. }
  5696. }
  5697. }
  5698. }
  5699. static void ggml_compute_forward_mean(
  5700. const struct ggml_compute_params * params,
  5701. const struct ggml_tensor * src0,
  5702. struct ggml_tensor * dst) {
  5703. switch (src0->type) {
  5704. case GGML_TYPE_F32:
  5705. {
  5706. ggml_compute_forward_mean_f32(params, src0, dst);
  5707. } break;
  5708. default:
  5709. {
  5710. GGML_ASSERT(false);
  5711. } break;
  5712. }
  5713. }
  5714. // ggml_compute_forward_repeat
  5715. static void ggml_compute_forward_repeat_f32(
  5716. const struct ggml_compute_params * params,
  5717. const struct ggml_tensor * src0,
  5718. struct ggml_tensor * dst) {
  5719. assert(params->ith == 0);
  5720. assert(ggml_can_repeat(src0, dst));
  5721. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5722. return;
  5723. }
  5724. // TODO: implement support for rank > 2 tensors
  5725. assert(src0->ne[2] == 1);
  5726. assert(src0->ne[3] == 1);
  5727. assert( dst->ne[2] == 1);
  5728. assert( dst->ne[3] == 1);
  5729. const int nc = dst->ne[0];
  5730. const int nr = dst->ne[1];
  5731. const int nc0 = src0->ne[0];
  5732. const int nr0 = src0->ne[1];
  5733. const int ncr = nc/nc0; // guaranteed to be an integer due to the check in ggml_can_repeat
  5734. const int nrr = nr/nr0; // guaranteed to be an integer due to the check in ggml_can_repeat
  5735. // TODO: support for transposed / permuted tensors
  5736. assert( dst->nb[0] == sizeof(float));
  5737. assert(src0->nb[0] == sizeof(float));
  5738. // TODO: maybe this is not optimal?
  5739. for (int i = 0; i < nrr; i++) {
  5740. for (int j = 0; j < ncr; j++) {
  5741. for (int k = 0; k < nr0; k++) {
  5742. ggml_vec_cpy_f32(nc0,
  5743. (float *) ((char *) dst->data + (i*nr0 + k)*( dst->nb[1]) + j*nc0*( dst->nb[0])),
  5744. (float *) ((char *) src0->data + ( k)*(src0->nb[1])));
  5745. }
  5746. }
  5747. }
  5748. }
  5749. static void ggml_compute_forward_repeat(
  5750. const struct ggml_compute_params * params,
  5751. const struct ggml_tensor * src0,
  5752. struct ggml_tensor * dst) {
  5753. switch (src0->type) {
  5754. case GGML_TYPE_F32:
  5755. {
  5756. ggml_compute_forward_repeat_f32(params, src0, dst);
  5757. } break;
  5758. default:
  5759. {
  5760. GGML_ASSERT(false);
  5761. } break;
  5762. }
  5763. }
  5764. // ggml_compute_forward_abs
  5765. static void ggml_compute_forward_abs_f32(
  5766. const struct ggml_compute_params * params,
  5767. const struct ggml_tensor * src0,
  5768. struct ggml_tensor * dst) {
  5769. assert(params->ith == 0);
  5770. assert(ggml_are_same_shape(src0, dst));
  5771. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5772. return;
  5773. }
  5774. const int n = ggml_nrows(src0);
  5775. const int nc = src0->ne[0];
  5776. assert(dst->nb[0] == sizeof(float));
  5777. assert(src0->nb[0] == sizeof(float));
  5778. for (int i = 0; i < n; i++) {
  5779. ggml_vec_abs_f32(nc,
  5780. (float *) ((char *) dst->data + i*( dst->nb[1])),
  5781. (float *) ((char *) src0->data + i*(src0->nb[1])));
  5782. }
  5783. }
  5784. static void ggml_compute_forward_abs(
  5785. const struct ggml_compute_params * params,
  5786. const struct ggml_tensor * src0,
  5787. struct ggml_tensor * dst) {
  5788. switch (src0->type) {
  5789. case GGML_TYPE_F32:
  5790. {
  5791. ggml_compute_forward_abs_f32(params, src0, dst);
  5792. } break;
  5793. default:
  5794. {
  5795. GGML_ASSERT(false);
  5796. } break;
  5797. }
  5798. }
  5799. // ggml_compute_forward_sgn
  5800. static void ggml_compute_forward_sgn_f32(
  5801. const struct ggml_compute_params * params,
  5802. const struct ggml_tensor * src0,
  5803. struct ggml_tensor * dst) {
  5804. assert(params->ith == 0);
  5805. assert(ggml_are_same_shape(src0, dst));
  5806. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5807. return;
  5808. }
  5809. const int n = ggml_nrows(src0);
  5810. const int nc = src0->ne[0];
  5811. assert(dst->nb[0] == sizeof(float));
  5812. assert(src0->nb[0] == sizeof(float));
  5813. for (int i = 0; i < n; i++) {
  5814. ggml_vec_sgn_f32(nc,
  5815. (float *) ((char *) dst->data + i*( dst->nb[1])),
  5816. (float *) ((char *) src0->data + i*(src0->nb[1])));
  5817. }
  5818. }
  5819. static void ggml_compute_forward_sgn(
  5820. const struct ggml_compute_params * params,
  5821. const struct ggml_tensor * src0,
  5822. struct ggml_tensor * dst) {
  5823. switch (src0->type) {
  5824. case GGML_TYPE_F32:
  5825. {
  5826. ggml_compute_forward_sgn_f32(params, src0, dst);
  5827. } break;
  5828. default:
  5829. {
  5830. GGML_ASSERT(false);
  5831. } break;
  5832. }
  5833. }
  5834. // ggml_compute_forward_neg
  5835. static void ggml_compute_forward_neg_f32(
  5836. const struct ggml_compute_params * params,
  5837. const struct ggml_tensor * src0,
  5838. struct ggml_tensor * dst) {
  5839. assert(params->ith == 0);
  5840. assert(ggml_are_same_shape(src0, dst));
  5841. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5842. return;
  5843. }
  5844. const int n = ggml_nrows(src0);
  5845. const int nc = src0->ne[0];
  5846. assert(dst->nb[0] == sizeof(float));
  5847. assert(src0->nb[0] == sizeof(float));
  5848. for (int i = 0; i < n; i++) {
  5849. ggml_vec_neg_f32(nc,
  5850. (float *) ((char *) dst->data + i*( dst->nb[1])),
  5851. (float *) ((char *) src0->data + i*(src0->nb[1])));
  5852. }
  5853. }
  5854. static void ggml_compute_forward_neg(
  5855. const struct ggml_compute_params * params,
  5856. const struct ggml_tensor * src0,
  5857. struct ggml_tensor * dst) {
  5858. switch (src0->type) {
  5859. case GGML_TYPE_F32:
  5860. {
  5861. ggml_compute_forward_neg_f32(params, src0, dst);
  5862. } break;
  5863. default:
  5864. {
  5865. GGML_ASSERT(false);
  5866. } break;
  5867. }
  5868. }
  5869. // ggml_compute_forward_step
  5870. static void ggml_compute_forward_step_f32(
  5871. const struct ggml_compute_params * params,
  5872. const struct ggml_tensor * src0,
  5873. struct ggml_tensor * dst) {
  5874. assert(params->ith == 0);
  5875. assert(ggml_are_same_shape(src0, dst));
  5876. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5877. return;
  5878. }
  5879. const int n = ggml_nrows(src0);
  5880. const int nc = src0->ne[0];
  5881. assert(dst->nb[0] == sizeof(float));
  5882. assert(src0->nb[0] == sizeof(float));
  5883. for (int i = 0; i < n; i++) {
  5884. ggml_vec_step_f32(nc,
  5885. (float *) ((char *) dst->data + i*( dst->nb[1])),
  5886. (float *) ((char *) src0->data + i*(src0->nb[1])));
  5887. }
  5888. }
  5889. static void ggml_compute_forward_step(
  5890. const struct ggml_compute_params * params,
  5891. const struct ggml_tensor * src0,
  5892. struct ggml_tensor * dst) {
  5893. switch (src0->type) {
  5894. case GGML_TYPE_F32:
  5895. {
  5896. ggml_compute_forward_step_f32(params, src0, dst);
  5897. } break;
  5898. default:
  5899. {
  5900. GGML_ASSERT(false);
  5901. } break;
  5902. }
  5903. }
  5904. // ggml_compute_forward_relu
  5905. static void ggml_compute_forward_relu_f32(
  5906. const struct ggml_compute_params * params,
  5907. const struct ggml_tensor * src0,
  5908. struct ggml_tensor * dst) {
  5909. assert(params->ith == 0);
  5910. assert(ggml_are_same_shape(src0, dst));
  5911. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5912. return;
  5913. }
  5914. const int n = ggml_nrows(src0);
  5915. const int nc = src0->ne[0];
  5916. assert(dst->nb[0] == sizeof(float));
  5917. assert(src0->nb[0] == sizeof(float));
  5918. for (int i = 0; i < n; i++) {
  5919. ggml_vec_relu_f32(nc,
  5920. (float *) ((char *) dst->data + i*( dst->nb[1])),
  5921. (float *) ((char *) src0->data + i*(src0->nb[1])));
  5922. }
  5923. }
  5924. static void ggml_compute_forward_relu(
  5925. const struct ggml_compute_params * params,
  5926. const struct ggml_tensor * src0,
  5927. struct ggml_tensor * dst) {
  5928. switch (src0->type) {
  5929. case GGML_TYPE_F32:
  5930. {
  5931. ggml_compute_forward_relu_f32(params, src0, dst);
  5932. } break;
  5933. default:
  5934. {
  5935. GGML_ASSERT(false);
  5936. } break;
  5937. }
  5938. }
  5939. // ggml_compute_forward_gelu
  5940. static void ggml_compute_forward_gelu_f32(
  5941. const struct ggml_compute_params * params,
  5942. const struct ggml_tensor * src0,
  5943. struct ggml_tensor * dst) {
  5944. GGML_ASSERT(ggml_is_contiguous(src0));
  5945. GGML_ASSERT(ggml_is_contiguous(dst));
  5946. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  5947. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5948. return;
  5949. }
  5950. const int ith = params->ith;
  5951. const int nth = params->nth;
  5952. const int nc = src0->ne[0];
  5953. const int nr = ggml_nrows(src0);
  5954. // rows per thread
  5955. const int dr = (nr + nth - 1)/nth;
  5956. // row range for this thread
  5957. const int ir0 = dr*ith;
  5958. const int ir1 = MIN(ir0 + dr, nr);
  5959. for (int i1 = ir0; i1 < ir1; i1++) {
  5960. ggml_vec_gelu_f32(nc,
  5961. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  5962. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  5963. #ifndef NDEBUG
  5964. for (int k = 0; k < nc; k++) {
  5965. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  5966. UNUSED(x);
  5967. assert(!isnan(x));
  5968. assert(!isinf(x));
  5969. }
  5970. #endif
  5971. }
  5972. }
  5973. static void ggml_compute_forward_gelu(
  5974. const struct ggml_compute_params * params,
  5975. const struct ggml_tensor * src0,
  5976. struct ggml_tensor * dst) {
  5977. switch (src0->type) {
  5978. case GGML_TYPE_F32:
  5979. {
  5980. ggml_compute_forward_gelu_f32(params, src0, dst);
  5981. } break;
  5982. default:
  5983. {
  5984. GGML_ASSERT(false);
  5985. } break;
  5986. }
  5987. //printf("XXXXXXXX gelu\n");
  5988. }
  5989. // ggml_compute_forward_silu
  5990. static void ggml_compute_forward_silu_f32(
  5991. const struct ggml_compute_params * params,
  5992. const struct ggml_tensor * src0,
  5993. struct ggml_tensor * dst) {
  5994. GGML_ASSERT(ggml_is_contiguous(src0));
  5995. GGML_ASSERT(ggml_is_contiguous(dst));
  5996. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  5997. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5998. return;
  5999. }
  6000. const int ith = params->ith;
  6001. const int nth = params->nth;
  6002. const int nc = src0->ne[0];
  6003. const int nr = ggml_nrows(src0);
  6004. // rows per thread
  6005. const int dr = (nr + nth - 1)/nth;
  6006. // row range for this thread
  6007. const int ir0 = dr*ith;
  6008. const int ir1 = MIN(ir0 + dr, nr);
  6009. for (int i1 = ir0; i1 < ir1; i1++) {
  6010. ggml_vec_silu_f32(nc,
  6011. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  6012. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  6013. #ifndef NDEBUG
  6014. for (int k = 0; k < nc; k++) {
  6015. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  6016. UNUSED(x);
  6017. assert(!isnan(x));
  6018. assert(!isinf(x));
  6019. }
  6020. #endif
  6021. }
  6022. }
  6023. static void ggml_compute_forward_silu(
  6024. const struct ggml_compute_params * params,
  6025. const struct ggml_tensor * src0,
  6026. struct ggml_tensor * dst) {
  6027. switch (src0->type) {
  6028. case GGML_TYPE_F32:
  6029. {
  6030. ggml_compute_forward_silu_f32(params, src0, dst);
  6031. } break;
  6032. default:
  6033. {
  6034. GGML_ASSERT(false);
  6035. } break;
  6036. }
  6037. }
  6038. // ggml_compute_forward_norm
  6039. static void ggml_compute_forward_norm_f32(
  6040. const struct ggml_compute_params * params,
  6041. const struct ggml_tensor * src0,
  6042. struct ggml_tensor * dst) {
  6043. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6044. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6045. return;
  6046. }
  6047. GGML_ASSERT(src0->nb[0] == sizeof(float));
  6048. const int ith = params->ith;
  6049. const int nth = params->nth;
  6050. const int64_t ne00 = src0->ne[0];
  6051. const int64_t ne01 = src0->ne[1];
  6052. const int64_t ne02 = src0->ne[2];
  6053. const int64_t ne03 = src0->ne[3];
  6054. const size_t nb01 = src0->nb[1];
  6055. const size_t nb02 = src0->nb[2];
  6056. const size_t nb03 = src0->nb[3];
  6057. const size_t nb1 = dst->nb[1];
  6058. const size_t nb2 = dst->nb[2];
  6059. const size_t nb3 = dst->nb[3];
  6060. const float eps = 1e-5f; // TODO: make this a parameter
  6061. // TODO: optimize
  6062. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6063. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6064. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  6065. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6066. ggml_float sum = 0.0;
  6067. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6068. sum += (ggml_float)x[i00];
  6069. }
  6070. float mean = sum/ne00;
  6071. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  6072. ggml_float sum2 = 0.0;
  6073. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6074. float v = x[i00] - mean;
  6075. y[i00] = v;
  6076. sum2 += (ggml_float)(v*v);
  6077. }
  6078. float variance = sum2/ne00;
  6079. const float scale = 1.0f/sqrtf(variance + eps);
  6080. ggml_vec_scale_f32(ne00, y, scale);
  6081. }
  6082. }
  6083. }
  6084. }
  6085. static void ggml_compute_forward_norm(
  6086. const struct ggml_compute_params * params,
  6087. const struct ggml_tensor * src0,
  6088. struct ggml_tensor * dst) {
  6089. switch (src0->type) {
  6090. case GGML_TYPE_F32:
  6091. {
  6092. ggml_compute_forward_norm_f32(params, src0, dst);
  6093. } break;
  6094. default:
  6095. {
  6096. GGML_ASSERT(false);
  6097. } break;
  6098. }
  6099. }
  6100. static void ggml_compute_forward_rms_norm_f32(
  6101. const struct ggml_compute_params * params,
  6102. const struct ggml_tensor * src0,
  6103. struct ggml_tensor * dst) {
  6104. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6105. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6106. return;
  6107. }
  6108. GGML_ASSERT(src0->nb[0] == sizeof(float));
  6109. const int ith = params->ith;
  6110. const int nth = params->nth;
  6111. const int64_t ne00 = src0->ne[0];
  6112. const int64_t ne01 = src0->ne[1];
  6113. const int64_t ne02 = src0->ne[2];
  6114. const int64_t ne03 = src0->ne[3];
  6115. const size_t nb01 = src0->nb[1];
  6116. const size_t nb02 = src0->nb[2];
  6117. const size_t nb03 = src0->nb[3];
  6118. const size_t nb1 = dst->nb[1];
  6119. const size_t nb2 = dst->nb[2];
  6120. const size_t nb3 = dst->nb[3];
  6121. const float eps = 1e-6f; // TODO: make this a parameter
  6122. // TODO: optimize
  6123. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6124. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6125. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  6126. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6127. ggml_float sum = 0.0;
  6128. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6129. sum += (ggml_float)(x[i00] * x[i00]);
  6130. }
  6131. float mean = sum/ne00;
  6132. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  6133. memcpy(y, x, ne00 * sizeof(float));
  6134. // for (int i00 = 0; i00 < ne00; i00++) {
  6135. // y[i00] = x[i00];
  6136. // }
  6137. const float scale = 1.0f/sqrtf(mean + eps);
  6138. ggml_vec_scale_f32(ne00, y, scale);
  6139. }
  6140. }
  6141. }
  6142. }
  6143. static void ggml_compute_forward_rms_norm(
  6144. const struct ggml_compute_params * params,
  6145. const struct ggml_tensor * src0,
  6146. struct ggml_tensor * dst) {
  6147. switch (src0->type) {
  6148. case GGML_TYPE_F32:
  6149. {
  6150. ggml_compute_forward_rms_norm_f32(params, src0, dst);
  6151. } break;
  6152. default:
  6153. {
  6154. GGML_ASSERT(false);
  6155. } break;
  6156. }
  6157. }
  6158. // ggml_compute_forward_mul_mat
  6159. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  6160. // helper function to determine if it is better to use BLAS or not
  6161. // for large matrices, BLAS is faster
  6162. static bool ggml_compute_forward_mul_mat_use_blas(
  6163. const struct ggml_tensor * src0,
  6164. const struct ggml_tensor * src1,
  6165. struct ggml_tensor * dst) {
  6166. //const int64_t ne00 = src0->ne[0];
  6167. //const int64_t ne01 = src0->ne[1];
  6168. const int64_t ne10 = src1->ne[0];
  6169. const int64_t ne0 = dst->ne[0];
  6170. const int64_t ne1 = dst->ne[1];
  6171. // TODO: find the optimal values for these
  6172. if (ggml_is_contiguous(src0) &&
  6173. ggml_is_contiguous(src1) && ((ne0 >= 32 && ne1 >= 32 && ne10 >= 32))) {
  6174. /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
  6175. return true;
  6176. }
  6177. return false;
  6178. }
  6179. #endif
  6180. static void ggml_compute_forward_mul_mat_f32(
  6181. const struct ggml_compute_params * params,
  6182. const struct ggml_tensor * src0,
  6183. const struct ggml_tensor * src1,
  6184. struct ggml_tensor * dst) {
  6185. int64_t t0 = ggml_perf_time_us();
  6186. UNUSED(t0);
  6187. const int64_t ne00 = src0->ne[0];
  6188. const int64_t ne01 = src0->ne[1];
  6189. const int64_t ne02 = src0->ne[2];
  6190. const int64_t ne03 = src0->ne[3];
  6191. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  6192. const int64_t ne10 = src1->ne[0];
  6193. #endif
  6194. const int64_t ne11 = src1->ne[1];
  6195. #ifndef NDEBUG
  6196. const int64_t ne12 = src1->ne[2];
  6197. const int64_t ne13 = src1->ne[3];
  6198. const int64_t ne0 = dst->ne[0];
  6199. const int64_t ne1 = dst->ne[1];
  6200. const int64_t ne2 = dst->ne[2];
  6201. const int64_t ne3 = dst->ne[3];
  6202. const int nb00 = src0->nb[0];
  6203. #endif
  6204. const int nb01 = src0->nb[1];
  6205. const int nb02 = src0->nb[2];
  6206. const int nb03 = src0->nb[3];
  6207. #ifndef NDEBUG
  6208. const int nb10 = src1->nb[0];
  6209. #endif
  6210. const int nb11 = src1->nb[1];
  6211. const int nb12 = src1->nb[2];
  6212. const int nb13 = src1->nb[3];
  6213. const int nb0 = dst->nb[0];
  6214. const int nb1 = dst->nb[1];
  6215. const int nb2 = dst->nb[2];
  6216. const int nb3 = dst->nb[3];
  6217. const int ith = params->ith;
  6218. const int nth = params->nth;
  6219. assert(ne02 == ne12);
  6220. assert(ne03 == ne13);
  6221. assert(ne2 == ne12);
  6222. assert(ne3 == ne13);
  6223. // we don't support permuted src0 or src1
  6224. assert(nb00 == sizeof(float));
  6225. assert(nb10 == sizeof(float));
  6226. // dst cannot be transposed or permuted
  6227. assert(nb0 == sizeof(float));
  6228. assert(nb0 <= nb1);
  6229. assert(nb1 <= nb2);
  6230. assert(nb2 <= nb3);
  6231. assert(ne0 == ne01);
  6232. assert(ne1 == ne11);
  6233. assert(ne2 == ne02);
  6234. assert(ne3 == ne03);
  6235. // nb01 >= nb00 - src0 is not transposed
  6236. // compute by src0 rows
  6237. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  6238. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  6239. if (params->ith != 0) {
  6240. return;
  6241. }
  6242. if (params->type == GGML_TASK_INIT) {
  6243. return;
  6244. }
  6245. if (params->type == GGML_TASK_FINALIZE) {
  6246. return;
  6247. }
  6248. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6249. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6250. const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03);
  6251. const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
  6252. float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
  6253. // zT = y * xT
  6254. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  6255. ne11, ne01, ne10,
  6256. 1.0f, y, ne10,
  6257. x, ne00,
  6258. 0.0f, d, ne01);
  6259. }
  6260. }
  6261. //printf("CBLAS F32 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  6262. return;
  6263. }
  6264. #endif
  6265. if (params->type == GGML_TASK_INIT) {
  6266. return;
  6267. }
  6268. if (params->type == GGML_TASK_FINALIZE) {
  6269. return;
  6270. }
  6271. // parallelize by src0 rows using ggml_vec_dot_f32
  6272. // total rows in src0
  6273. const int nr = ne01*ne02*ne03;
  6274. // rows per thread
  6275. const int dr = (nr + nth - 1)/nth;
  6276. // row range for this thread
  6277. const int ir0 = dr*ith;
  6278. const int ir1 = MIN(ir0 + dr, nr);
  6279. for (int ir = ir0; ir < ir1; ++ir) {
  6280. // src0 indices
  6281. const int i03 = ir/(ne02*ne01);
  6282. const int i02 = (ir - i03*ne02*ne01)/ne01;
  6283. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6284. for (int64_t ic = 0; ic < ne11; ++ic) {
  6285. // src1 indices
  6286. const int i13 = i03;
  6287. const int i12 = i02;
  6288. const int i11 = ic;
  6289. // dst indices
  6290. const int i0 = i01;
  6291. const int i1 = i11;
  6292. const int i2 = i02;
  6293. const int i3 = i03;
  6294. ggml_vec_dot_f32(ne00,
  6295. (float *) ((char *) dst->data + (i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  6296. (float *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)),
  6297. (float *) ((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13)));
  6298. }
  6299. }
  6300. //int64_t t1 = ggml_perf_time_us();
  6301. //static int64_t acc = 0;
  6302. //acc += t1 - t0;
  6303. //if (t1 - t0 > 10) {
  6304. // printf("\n");
  6305. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  6306. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  6307. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  6308. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  6309. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  6310. //}
  6311. }
  6312. static void ggml_compute_forward_mul_mat_f16_f32(
  6313. const struct ggml_compute_params * params,
  6314. const struct ggml_tensor * src0,
  6315. const struct ggml_tensor * src1,
  6316. struct ggml_tensor * dst) {
  6317. int64_t t0 = ggml_perf_time_us();
  6318. UNUSED(t0);
  6319. const int64_t ne00 = src0->ne[0];
  6320. const int64_t ne01 = src0->ne[1];
  6321. const int64_t ne02 = src0->ne[2];
  6322. const int64_t ne03 = src0->ne[3];
  6323. const int64_t ne10 = src1->ne[0];
  6324. const int64_t ne11 = src1->ne[1];
  6325. const int64_t ne12 = src1->ne[2];
  6326. const int64_t ne13 = src1->ne[3];
  6327. const int64_t ne0 = dst->ne[0];
  6328. const int64_t ne1 = dst->ne[1];
  6329. const int64_t ne2 = dst->ne[2];
  6330. const int64_t ne3 = dst->ne[3];
  6331. //const int64_t ne = ne0*ne1*ne2*ne3;
  6332. const int nb00 = src0->nb[0];
  6333. const int nb01 = src0->nb[1];
  6334. const int nb02 = src0->nb[2];
  6335. const int nb03 = src0->nb[3];
  6336. const int nb10 = src1->nb[0];
  6337. const int nb11 = src1->nb[1];
  6338. const int nb12 = src1->nb[2];
  6339. const int nb13 = src1->nb[3];
  6340. const int nb0 = dst->nb[0];
  6341. const int nb1 = dst->nb[1];
  6342. const int nb2 = dst->nb[2];
  6343. const int nb3 = dst->nb[3];
  6344. const int ith = params->ith;
  6345. const int nth = params->nth;
  6346. GGML_ASSERT(ne02 == ne12);
  6347. GGML_ASSERT(ne03 == ne13);
  6348. GGML_ASSERT(ne2 == ne12);
  6349. GGML_ASSERT(ne3 == ne13);
  6350. // TODO: we don't support permuted src0
  6351. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6352. // dst cannot be transposed or permuted
  6353. GGML_ASSERT(nb0 == sizeof(float));
  6354. GGML_ASSERT(nb0 <= nb1);
  6355. GGML_ASSERT(nb1 <= nb2);
  6356. GGML_ASSERT(nb2 <= nb3);
  6357. GGML_ASSERT(ne0 == ne01);
  6358. GGML_ASSERT(ne1 == ne11);
  6359. GGML_ASSERT(ne2 == ne02);
  6360. GGML_ASSERT(ne3 == ne03);
  6361. // nb01 >= nb00 - src0 is not transposed
  6362. // compute by src0 rows
  6363. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  6364. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  6365. GGML_ASSERT(nb10 == sizeof(float));
  6366. if (params->ith != 0) {
  6367. return;
  6368. }
  6369. if (params->type == GGML_TASK_INIT) {
  6370. return;
  6371. }
  6372. if (params->type == GGML_TASK_FINALIZE) {
  6373. return;
  6374. }
  6375. float * const wdata = params->wdata;
  6376. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6377. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6378. {
  6379. size_t id = 0;
  6380. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  6381. for (int64_t i00 = 0; i00 < ne00; ++i00) {
  6382. wdata[id++] = GGML_FP16_TO_FP32(*(ggml_fp16_t *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00));
  6383. }
  6384. }
  6385. }
  6386. const float * x = wdata;
  6387. const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
  6388. float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
  6389. // zT = y * xT
  6390. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  6391. ne11, ne01, ne10,
  6392. 1.0f, y, ne10,
  6393. x, ne00,
  6394. 0.0f, d, ne01);
  6395. }
  6396. }
  6397. /*printf("CBLAS F16 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);*/
  6398. return;
  6399. }
  6400. #endif
  6401. if (params->type == GGML_TASK_INIT) {
  6402. ggml_fp16_t * const wdata = params->wdata;
  6403. size_t id = 0;
  6404. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  6405. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  6406. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  6407. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  6408. wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10));
  6409. }
  6410. }
  6411. }
  6412. }
  6413. GGML_ASSERT(id*sizeof(ggml_fp16_t) <= params->wsize);
  6414. return;
  6415. }
  6416. if (params->type == GGML_TASK_FINALIZE) {
  6417. return;
  6418. }
  6419. // fp16 -> half the size, so divide by 2
  6420. // TODO: do not support transposed src1
  6421. assert(nb10/2 == sizeof(ggml_fp16_t));
  6422. // parallelize by src0 rows using ggml_vec_dot_f16
  6423. // total rows in src0
  6424. const int nr = ne01*ne02*ne03;
  6425. // rows per thread
  6426. const int dr = (nr + nth - 1)/nth;
  6427. // row range for this thread
  6428. const int ir0 = dr*ith;
  6429. const int ir1 = MIN(ir0 + dr, nr);
  6430. ggml_fp16_t * wdata = params->wdata;
  6431. for (int ir = ir0; ir < ir1; ++ir) {
  6432. // src0 indices
  6433. const int i03 = ir/(ne02*ne01);
  6434. const int i02 = (ir - i03*ne02*ne01)/ne01;
  6435. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6436. const int i13 = i03;
  6437. const int i12 = i02;
  6438. const int i0 = i01;
  6439. const int i2 = i02;
  6440. const int i3 = i03;
  6441. ggml_fp16_t * src0_row = (ggml_fp16_t *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  6442. ggml_fp16_t * src1_col = wdata + ( 0 + i12*ne11 + i13*ne12*ne11)*ne00;
  6443. float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3));
  6444. for (int64_t ic = 0; ic < ne11; ++ic) {
  6445. ggml_vec_dot_f16(ne00, &dst_col[ic*ne0], src0_row, src1_col + ic*ne00);
  6446. }
  6447. }
  6448. //int64_t t1 = ggml_time_us();
  6449. //static int64_t acc = 0;
  6450. //acc += t1 - t0;
  6451. //if (t1 - t0 > 10) {
  6452. // printf("\n");
  6453. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  6454. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  6455. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  6456. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  6457. //}
  6458. }
  6459. static void ggml_compute_forward_mul_mat_q_f32(
  6460. const struct ggml_compute_params * params,
  6461. const struct ggml_tensor * src0,
  6462. const struct ggml_tensor * src1,
  6463. struct ggml_tensor * dst) {
  6464. int64_t t0 = ggml_perf_time_us();
  6465. UNUSED(t0);
  6466. const int64_t ne00 = src0->ne[0];
  6467. const int64_t ne01 = src0->ne[1];
  6468. const int64_t ne02 = src0->ne[2];
  6469. const int64_t ne03 = src0->ne[3];
  6470. const int64_t ne10 = src1->ne[0];
  6471. const int64_t ne11 = src1->ne[1];
  6472. const int64_t ne12 = src1->ne[2];
  6473. const int64_t ne13 = src1->ne[3];
  6474. const int64_t ne0 = dst->ne[0];
  6475. const int64_t ne1 = dst->ne[1];
  6476. const int64_t ne2 = dst->ne[2];
  6477. const int64_t ne3 = dst->ne[3];
  6478. const int nb00 = src0->nb[0];
  6479. const int nb01 = src0->nb[1];
  6480. const int nb02 = src0->nb[2];
  6481. const int nb03 = src0->nb[3];
  6482. const int nb10 = src1->nb[0];
  6483. const int nb11 = src1->nb[1];
  6484. const int nb12 = src1->nb[2];
  6485. const int nb13 = src1->nb[3];
  6486. const int nb0 = dst->nb[0];
  6487. const int nb1 = dst->nb[1];
  6488. const int nb2 = dst->nb[2];
  6489. const int nb3 = dst->nb[3];
  6490. const int ith = params->ith;
  6491. const int nth = params->nth;
  6492. GGML_ASSERT(ne02 == ne12);
  6493. GGML_ASSERT(ne03 == ne13);
  6494. GGML_ASSERT(ne2 == ne12);
  6495. GGML_ASSERT(ne3 == ne13);
  6496. const enum ggml_type type = src0->type;
  6497. quantize_row_q_t const quantize_row_q_dot = quantize_fns[type].quantize_row_q_dot;
  6498. vec_dot_q_t const vec_dot_q = quantize_fns[type].vec_dot_q;
  6499. // we don't support permuted src0 or src1
  6500. GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[type]);
  6501. GGML_ASSERT(nb10 == sizeof(float));
  6502. // dst cannot be transposed or permuted
  6503. GGML_ASSERT(nb0 == sizeof(float));
  6504. GGML_ASSERT(nb0 <= nb1);
  6505. GGML_ASSERT(nb1 <= nb2);
  6506. GGML_ASSERT(nb2 <= nb3);
  6507. GGML_ASSERT(ne0 == ne01);
  6508. GGML_ASSERT(ne1 == ne11);
  6509. GGML_ASSERT(ne2 == ne02);
  6510. GGML_ASSERT(ne3 == ne03);
  6511. // nb01 >= nb00 - src0 is not transposed
  6512. // compute by src0 rows
  6513. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  6514. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  6515. if (params->ith != 0) {
  6516. return;
  6517. }
  6518. if (params->type == GGML_TASK_INIT) {
  6519. return;
  6520. }
  6521. if (params->type == GGML_TASK_FINALIZE) {
  6522. return;
  6523. }
  6524. float * const wdata = params->wdata;
  6525. dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
  6526. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6527. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6528. {
  6529. size_t id = 0;
  6530. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  6531. dequantize_row_q((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00);
  6532. id += ne00;
  6533. }
  6534. }
  6535. const float * x = wdata;
  6536. const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
  6537. float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
  6538. // zT = y * xT
  6539. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  6540. ne11, ne01, ne10,
  6541. 1.0f, y, ne10,
  6542. x, ne00,
  6543. 0.0f, d, ne01);
  6544. }
  6545. }
  6546. //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  6547. return;
  6548. }
  6549. #endif
  6550. if (params->type == GGML_TASK_INIT) {
  6551. char * wdata = params->wdata;
  6552. const size_t row_size = ne10*GGML_TYPE_SIZE[GGML_TYPE_Q8_0]/GGML_BLCK_SIZE[GGML_TYPE_Q8_0];
  6553. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  6554. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  6555. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  6556. quantize_row_q_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  6557. wdata += row_size;
  6558. }
  6559. }
  6560. }
  6561. return;
  6562. }
  6563. if (params->type == GGML_TASK_FINALIZE) {
  6564. return;
  6565. }
  6566. // parallelize by src0 rows using ggml_vec_dot_q
  6567. // total rows in src0
  6568. const int nr = ne01*ne02*ne03;
  6569. // rows per thread
  6570. const int dr = (nr + nth - 1)/nth;
  6571. // row range for this thread
  6572. const int ir0 = dr*ith;
  6573. const int ir1 = MIN(ir0 + dr, nr);
  6574. void * wdata = params->wdata;
  6575. const size_t row_size = ne00*GGML_TYPE_SIZE[GGML_TYPE_Q8_0]/GGML_BLCK_SIZE[GGML_TYPE_Q8_0];
  6576. for (int ir = ir0; ir < ir1; ++ir) {
  6577. // src0 indices
  6578. const int i03 = ir/(ne02*ne01);
  6579. const int i02 = (ir - i03*ne02*ne01)/ne01;
  6580. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6581. const int i13 = i03;
  6582. const int i12 = i02;
  6583. const int i0 = i01;
  6584. const int i2 = i02;
  6585. const int i3 = i03;
  6586. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  6587. char * src1_col = ((char *) wdata + ( (0 + i12*ne11 + i13*ne12*ne11)*row_size));
  6588. float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3));
  6589. assert(ne00 % 32 == 0);
  6590. for (int64_t ic = 0; ic < ne11; ++ic) {
  6591. vec_dot_q(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size));
  6592. }
  6593. }
  6594. //int64_t t1 = ggml_time_us();
  6595. //static int64_t acc = 0;
  6596. //acc += t1 - t0;
  6597. //if (t1 - t0 > 10) {
  6598. // printf("\n");
  6599. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  6600. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  6601. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  6602. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  6603. //}
  6604. }
  6605. static void ggml_compute_forward_mul_mat(
  6606. const struct ggml_compute_params * params,
  6607. const struct ggml_tensor * src0,
  6608. const struct ggml_tensor * src1,
  6609. struct ggml_tensor * dst) {
  6610. switch (src0->type) {
  6611. case GGML_TYPE_Q4_0:
  6612. case GGML_TYPE_Q4_1:
  6613. case GGML_TYPE_Q4_2:
  6614. case GGML_TYPE_Q8_0:
  6615. {
  6616. ggml_compute_forward_mul_mat_q_f32(params, src0, src1, dst);
  6617. } break;
  6618. case GGML_TYPE_F16:
  6619. {
  6620. ggml_compute_forward_mul_mat_f16_f32(params, src0, src1, dst);
  6621. } break;
  6622. case GGML_TYPE_F32:
  6623. {
  6624. ggml_compute_forward_mul_mat_f32(params, src0, src1, dst);
  6625. } break;
  6626. default:
  6627. {
  6628. GGML_ASSERT(false);
  6629. } break;
  6630. }
  6631. #if 0
  6632. if (src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_Q4_1) {
  6633. static int first = 8;
  6634. printf("src0: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src0->ne[0], src0->ne[1], src0->ne[2]);
  6635. printf("src1: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src1->ne[0], src1->ne[1], src1->ne[2]);
  6636. printf("dst: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  6637. if (first) {
  6638. --first;
  6639. } else {
  6640. for (int k = 0; k < dst->ne[1]; ++k) {
  6641. for (int j = 0; j < dst->ne[0]/16; ++j) {
  6642. for (int i = 0; i < 16; ++i) {
  6643. printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  6644. }
  6645. printf("\n");
  6646. }
  6647. printf("\n");
  6648. }
  6649. printf("\n");
  6650. exit(0);
  6651. }
  6652. } else {
  6653. printf("aaaa src0: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src0->ne[0], src0->ne[1], src0->ne[2]);
  6654. printf("aaaa src1: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src1->ne[0], src1->ne[1], src1->ne[2]);
  6655. printf("aaaa dst: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  6656. }
  6657. #endif
  6658. }
  6659. // ggml_compute_forward_scale
  6660. static void ggml_compute_forward_scale_f32(
  6661. const struct ggml_compute_params * params,
  6662. const struct ggml_tensor * src0,
  6663. const struct ggml_tensor * src1,
  6664. struct ggml_tensor * dst) {
  6665. GGML_ASSERT(ggml_is_contiguous(src0));
  6666. GGML_ASSERT(ggml_is_contiguous(dst));
  6667. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6668. GGML_ASSERT(ggml_is_scalar(src1));
  6669. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6670. return;
  6671. }
  6672. // scale factor
  6673. const float v = *(float *) src1->data;
  6674. const int ith = params->ith;
  6675. const int nth = params->nth;
  6676. const int nc = src0->ne[0];
  6677. const int nr = ggml_nrows(src0);
  6678. // rows per thread
  6679. const int dr = (nr + nth - 1)/nth;
  6680. // row range for this thread
  6681. const int ir0 = dr*ith;
  6682. const int ir1 = MIN(ir0 + dr, nr);
  6683. for (int i1 = ir0; i1 < ir1; i1++) {
  6684. ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), v);
  6685. }
  6686. }
  6687. static void ggml_compute_forward_scale(
  6688. const struct ggml_compute_params * params,
  6689. const struct ggml_tensor * src0,
  6690. const struct ggml_tensor * src1,
  6691. struct ggml_tensor * dst) {
  6692. switch (src0->type) {
  6693. case GGML_TYPE_F32:
  6694. {
  6695. ggml_compute_forward_scale_f32(params, src0, src1, dst);
  6696. } break;
  6697. default:
  6698. {
  6699. GGML_ASSERT(false);
  6700. } break;
  6701. }
  6702. }
  6703. // ggml_compute_forward_cpy
  6704. static void ggml_compute_forward_cpy(
  6705. const struct ggml_compute_params * params,
  6706. const struct ggml_tensor * src0,
  6707. struct ggml_tensor * dst) {
  6708. ggml_compute_forward_dup(params, src0, dst);
  6709. }
  6710. // ggml_compute_forward_cont
  6711. static void ggml_compute_forward_cont(
  6712. const struct ggml_compute_params * params,
  6713. const struct ggml_tensor * src0,
  6714. struct ggml_tensor * dst) {
  6715. ggml_compute_forward_dup(params, src0, dst);
  6716. }
  6717. // ggml_compute_forward_reshape
  6718. static void ggml_compute_forward_reshape(
  6719. const struct ggml_compute_params * params,
  6720. const struct ggml_tensor * src0,
  6721. struct ggml_tensor * dst) {
  6722. // NOP
  6723. UNUSED(params);
  6724. UNUSED(src0);
  6725. UNUSED(dst);
  6726. }
  6727. // ggml_compute_forward_view
  6728. static void ggml_compute_forward_view(
  6729. const struct ggml_compute_params * params,
  6730. const struct ggml_tensor * src0) {
  6731. // NOP
  6732. UNUSED(params);
  6733. UNUSED(src0);
  6734. }
  6735. // ggml_compute_forward_permute
  6736. static void ggml_compute_forward_permute(
  6737. const struct ggml_compute_params * params,
  6738. const struct ggml_tensor * src0) {
  6739. // NOP
  6740. UNUSED(params);
  6741. UNUSED(src0);
  6742. }
  6743. // ggml_compute_forward_transpose
  6744. static void ggml_compute_forward_transpose(
  6745. const struct ggml_compute_params * params,
  6746. const struct ggml_tensor * src0) {
  6747. // NOP
  6748. UNUSED(params);
  6749. UNUSED(src0);
  6750. }
  6751. // ggml_compute_forward_get_rows
  6752. static void ggml_compute_forward_get_rows_q(
  6753. const struct ggml_compute_params * params,
  6754. const struct ggml_tensor * src0,
  6755. const struct ggml_tensor * src1,
  6756. struct ggml_tensor * dst) {
  6757. assert(params->ith == 0);
  6758. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6759. return;
  6760. }
  6761. const int nc = src0->ne[0];
  6762. const int nr = ggml_nelements(src1);
  6763. const enum ggml_type type = src0->type;
  6764. dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
  6765. assert( dst->ne[0] == nc);
  6766. assert( dst->ne[1] == nr);
  6767. assert(src0->nb[0] == GGML_TYPE_SIZE[type]);
  6768. for (int i = 0; i < nr; ++i) {
  6769. const int r = ((int32_t *) src1->data)[i];
  6770. dequantize_row_q(
  6771. (const void *) ((char *) src0->data + r*src0->nb[1]),
  6772. (float *) ((char *) dst->data + i*dst->nb[1]), nc);
  6773. }
  6774. }
  6775. static void ggml_compute_forward_get_rows_f16(
  6776. const struct ggml_compute_params * params,
  6777. const struct ggml_tensor * src0,
  6778. const struct ggml_tensor * src1,
  6779. struct ggml_tensor * dst) {
  6780. assert(params->ith == 0);
  6781. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6782. return;
  6783. }
  6784. const int nc = src0->ne[0];
  6785. const int nr = ggml_nelements(src1);
  6786. assert( dst->ne[0] == nc);
  6787. assert( dst->ne[1] == nr);
  6788. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  6789. for (int i = 0; i < nr; ++i) {
  6790. const int r = ((int32_t *) src1->data)[i];
  6791. for (int j = 0; j < nc; ++j) {
  6792. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + r*src0->nb[1]))[j];
  6793. ((float *) ((char *) dst->data + i*dst->nb[1]))[j] = GGML_FP16_TO_FP32(v);
  6794. }
  6795. }
  6796. }
  6797. static void ggml_compute_forward_get_rows_f32(
  6798. const struct ggml_compute_params * params,
  6799. const struct ggml_tensor * src0,
  6800. const struct ggml_tensor * src1,
  6801. struct ggml_tensor * dst) {
  6802. assert(params->ith == 0);
  6803. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6804. return;
  6805. }
  6806. const int nc = src0->ne[0];
  6807. const int nr = ggml_nelements(src1);
  6808. assert( dst->ne[0] == nc);
  6809. assert( dst->ne[1] == nr);
  6810. assert(src0->nb[0] == sizeof(float));
  6811. for (int i = 0; i < nr; ++i) {
  6812. const int r = ((int32_t *) src1->data)[i];
  6813. ggml_vec_cpy_f32(nc,
  6814. (float *) ((char *) dst->data + i*dst->nb[1]),
  6815. (float *) ((char *) src0->data + r*src0->nb[1]));
  6816. }
  6817. }
  6818. static void ggml_compute_forward_get_rows(
  6819. const struct ggml_compute_params * params,
  6820. const struct ggml_tensor * src0,
  6821. const struct ggml_tensor * src1,
  6822. struct ggml_tensor * dst) {
  6823. switch (src0->type) {
  6824. case GGML_TYPE_Q4_0:
  6825. case GGML_TYPE_Q4_1:
  6826. case GGML_TYPE_Q4_2:
  6827. case GGML_TYPE_Q8_0:
  6828. {
  6829. ggml_compute_forward_get_rows_q(params, src0, src1, dst);
  6830. } break;
  6831. case GGML_TYPE_F16:
  6832. {
  6833. ggml_compute_forward_get_rows_f16(params, src0, src1, dst);
  6834. } break;
  6835. case GGML_TYPE_F32:
  6836. {
  6837. ggml_compute_forward_get_rows_f32(params, src0, src1, dst);
  6838. } break;
  6839. default:
  6840. {
  6841. GGML_ASSERT(false);
  6842. } break;
  6843. }
  6844. //static bool first = true;
  6845. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  6846. //if (first) {
  6847. // first = false;
  6848. //} else {
  6849. // for (int k = 0; k < dst->ne[1]; ++k) {
  6850. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  6851. // for (int i = 0; i < 16; ++i) {
  6852. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  6853. // }
  6854. // printf("\n");
  6855. // }
  6856. // printf("\n");
  6857. // }
  6858. // printf("\n");
  6859. // exit(0);
  6860. //}
  6861. }
  6862. // ggml_compute_forward_diag_mask_inf
  6863. static void ggml_compute_forward_diag_mask_inf_f32(
  6864. const struct ggml_compute_params * params,
  6865. const struct ggml_tensor * src0,
  6866. const struct ggml_tensor * src1,
  6867. struct ggml_tensor * dst) {
  6868. assert(params->ith == 0);
  6869. assert(src1->type == GGML_TYPE_I32);
  6870. assert(ggml_nelements(src1) == 1);
  6871. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6872. return;
  6873. }
  6874. const int n_past = ((int32_t *) src1->data)[0];
  6875. // TODO: handle transposed/permuted matrices
  6876. const int n = ggml_nrows(src0);
  6877. const int nc = src0->ne[0];
  6878. const int nr = src0->ne[1];
  6879. const int nz = n/nr;
  6880. assert( dst->nb[0] == sizeof(float));
  6881. assert(src0->nb[0] == sizeof(float));
  6882. for (int k = 0; k < nz; k++) {
  6883. for (int j = 0; j < nr; j++) {
  6884. for (int i = n_past; i < nc; i++) {
  6885. if (i > n_past + j) {
  6886. *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = -INFINITY;
  6887. }
  6888. }
  6889. }
  6890. }
  6891. }
  6892. static void ggml_compute_forward_diag_mask_inf(
  6893. const struct ggml_compute_params * params,
  6894. const struct ggml_tensor * src0,
  6895. const struct ggml_tensor * src1,
  6896. struct ggml_tensor * dst) {
  6897. switch (src0->type) {
  6898. case GGML_TYPE_F32:
  6899. {
  6900. ggml_compute_forward_diag_mask_inf_f32(params, src0, src1, dst);
  6901. } break;
  6902. default:
  6903. {
  6904. GGML_ASSERT(false);
  6905. } break;
  6906. }
  6907. }
  6908. // ggml_compute_forward_soft_max
  6909. static void ggml_compute_forward_soft_max_f32(
  6910. const struct ggml_compute_params * params,
  6911. const struct ggml_tensor * src0,
  6912. struct ggml_tensor * dst) {
  6913. GGML_ASSERT(ggml_is_contiguous(src0));
  6914. GGML_ASSERT(ggml_is_contiguous(dst));
  6915. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6916. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6917. return;
  6918. }
  6919. // TODO: handle transposed/permuted matrices
  6920. const int ith = params->ith;
  6921. const int nth = params->nth;
  6922. const int nc = src0->ne[0];
  6923. const int nr = ggml_nrows(src0);
  6924. // rows per thread
  6925. const int dr = (nr + nth - 1)/nth;
  6926. // row range for this thread
  6927. const int ir0 = dr*ith;
  6928. const int ir1 = MIN(ir0 + dr, nr);
  6929. for (int i1 = ir0; i1 < ir1; i1++) {
  6930. float *p = (float *)((char *) dst->data + i1*dst->nb[1]);
  6931. #ifndef NDEBUG
  6932. for (int i = 0; i < nc; ++i) {
  6933. //printf("p[%d] = %f\n", i, p[i]);
  6934. assert(!isnan(p[i]));
  6935. }
  6936. #endif
  6937. float max = -INFINITY;
  6938. ggml_vec_max_f32(nc, &max, p);
  6939. ggml_float sum = 0.0;
  6940. uint16_t scvt;
  6941. for (int i = 0; i < nc; i++) {
  6942. if (p[i] == -INFINITY) {
  6943. p[i] = 0.0f;
  6944. } else {
  6945. //const float val = (p[i] == -INFINITY) ? 0.0 : exp(p[i] - max);
  6946. ggml_fp16_t s = GGML_FP32_TO_FP16(p[i] - max);
  6947. memcpy(&scvt, &s, sizeof(scvt));
  6948. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  6949. sum += (ggml_float)val;
  6950. p[i] = val;
  6951. }
  6952. }
  6953. assert(sum > 0.0);
  6954. sum = 1.0/sum;
  6955. ggml_vec_scale_f32(nc, p, sum);
  6956. #ifndef NDEBUG
  6957. for (int i = 0; i < nc; ++i) {
  6958. assert(!isnan(p[i]));
  6959. assert(!isinf(p[i]));
  6960. }
  6961. #endif
  6962. }
  6963. }
  6964. static void ggml_compute_forward_soft_max(
  6965. const struct ggml_compute_params * params,
  6966. const struct ggml_tensor * src0,
  6967. struct ggml_tensor * dst) {
  6968. switch (src0->type) {
  6969. case GGML_TYPE_F32:
  6970. {
  6971. ggml_compute_forward_soft_max_f32(params, src0, dst);
  6972. } break;
  6973. default:
  6974. {
  6975. GGML_ASSERT(false);
  6976. } break;
  6977. }
  6978. }
  6979. // ggml_compute_forward_rope
  6980. static void ggml_compute_forward_rope_f32(
  6981. const struct ggml_compute_params * params,
  6982. const struct ggml_tensor * src0,
  6983. const struct ggml_tensor * src1,
  6984. struct ggml_tensor * dst) {
  6985. assert(src1->type == GGML_TYPE_I32);
  6986. assert(ggml_nelements(src1) == 3);
  6987. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6988. return;
  6989. }
  6990. const int n_past = ((int32_t *) src1->data)[0];
  6991. const int n_dims = ((int32_t *) src1->data)[1];
  6992. const int mode = ((int32_t *) src1->data)[2];
  6993. //const int64_t ne0 = src0->ne[0];
  6994. const int64_t ne1 = src0->ne[1];
  6995. const int64_t ne2 = src0->ne[2];
  6996. const int64_t ne3 = src0->ne[3];
  6997. const int nb0 = src0->nb[0];
  6998. const int nb1 = src0->nb[1];
  6999. const int nb2 = src0->nb[2];
  7000. const int nb3 = src0->nb[3];
  7001. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  7002. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  7003. assert(nb0 == sizeof(float));
  7004. const int ith = params->ith;
  7005. const int nth = params->nth;
  7006. const int nr = ggml_nrows(src0);
  7007. // rows per thread
  7008. const int dr = (nr + nth - 1)/nth;
  7009. // row range for this thread
  7010. const int ir0 = dr*ith;
  7011. const int ir1 = MIN(ir0 + dr, nr);
  7012. // row index used to determine which thread to use
  7013. int ir = 0;
  7014. const float theta_scale = powf(10000.0, -2.0f/n_dims);
  7015. for (int64_t i3 = 0; i3 < ne3; i3++) {
  7016. for (int64_t i2 = (mode == 0 ? 0 : n_past); i2 < ne2; i2++) {
  7017. const int p = (mode == 0 ? n_past + i2 : i2);
  7018. for (int64_t i1 = 0; i1 < ne1; i1++) {
  7019. if (ir++ < ir0) continue;
  7020. if (ir > ir1) break;
  7021. float theta = (float)p;
  7022. for (int i0 = 0; i0 < n_dims; i0 += 2) {
  7023. const float cos_theta = cosf(theta);
  7024. const float sin_theta = sinf(theta);
  7025. theta *= theta_scale;
  7026. const float * const src = (float *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  7027. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  7028. const float x0 = src[0];
  7029. const float x1 = src[1];
  7030. dst_data[0] = x0*cos_theta - x1*sin_theta;
  7031. dst_data[1] = x0*sin_theta + x1*cos_theta;
  7032. }
  7033. }
  7034. }
  7035. }
  7036. }
  7037. static void ggml_compute_forward_rope_f16(
  7038. const struct ggml_compute_params * params,
  7039. const struct ggml_tensor * src0,
  7040. const struct ggml_tensor * src1,
  7041. struct ggml_tensor * dst) {
  7042. assert(src1->type == GGML_TYPE_I32);
  7043. assert(ggml_nelements(src1) == 3);
  7044. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7045. return;
  7046. }
  7047. const int n_past = ((int32_t *) src1->data)[0];
  7048. const int n_dims = ((int32_t *) src1->data)[1];
  7049. const int mode = ((int32_t *) src1->data)[2];
  7050. //const int64_t ne0 = src0->ne[0];
  7051. const int64_t ne1 = src0->ne[1];
  7052. const int64_t ne2 = src0->ne[2];
  7053. const int64_t ne3 = src0->ne[3];
  7054. const int nb0 = src0->nb[0];
  7055. const int nb1 = src0->nb[1];
  7056. const int nb2 = src0->nb[2];
  7057. const int nb3 = src0->nb[3];
  7058. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  7059. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  7060. assert(nb0 == sizeof(ggml_fp16_t));
  7061. const int ith = params->ith;
  7062. const int nth = params->nth;
  7063. const int nr = ggml_nrows(src0);
  7064. // rows per thread
  7065. const int dr = (nr + nth - 1)/nth;
  7066. // row range for this thread
  7067. const int ir0 = dr*ith;
  7068. const int ir1 = MIN(ir0 + dr, nr);
  7069. // row index used to determine which thread to use
  7070. int ir = 0;
  7071. const float theta_scale = powf(10000.0, -2.0f/n_dims);
  7072. for (int64_t i3 = 0; i3 < ne3; i3++) {
  7073. for (int64_t i2 = (mode == 0 ? 0 : n_past); i2 < ne2; i2++) {
  7074. const int p = (mode == 0 ? n_past + i2 : i2);
  7075. for (int64_t i1 = 0; i1 < ne1; i1++) {
  7076. if (ir++ < ir0) continue;
  7077. if (ir > ir1) break;
  7078. float theta = (float)p;
  7079. for (int i0 = 0; i0 < n_dims; i0 += 2) {
  7080. const float cos_theta = cosf(theta);
  7081. const float sin_theta = sinf(theta);
  7082. theta *= theta_scale;
  7083. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  7084. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  7085. const float x0 = GGML_FP16_TO_FP32(src[0]);
  7086. const float x1 = GGML_FP16_TO_FP32(src[1]);
  7087. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  7088. dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  7089. }
  7090. }
  7091. }
  7092. }
  7093. }
  7094. static void ggml_compute_forward_rope(
  7095. const struct ggml_compute_params * params,
  7096. const struct ggml_tensor * src0,
  7097. const struct ggml_tensor * src1,
  7098. struct ggml_tensor * dst) {
  7099. switch (src0->type) {
  7100. case GGML_TYPE_F16:
  7101. {
  7102. ggml_compute_forward_rope_f16(params, src0, src1, dst);
  7103. } break;
  7104. case GGML_TYPE_F32:
  7105. {
  7106. ggml_compute_forward_rope_f32(params, src0, src1, dst);
  7107. } break;
  7108. default:
  7109. {
  7110. GGML_ASSERT(false);
  7111. } break;
  7112. }
  7113. }
  7114. // ggml_compute_forward_conv_1d_1s
  7115. static void ggml_compute_forward_conv_1d_1s_f16_f32(
  7116. const struct ggml_compute_params * params,
  7117. const struct ggml_tensor * src0,
  7118. const struct ggml_tensor * src1,
  7119. struct ggml_tensor * dst) {
  7120. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7121. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7122. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  7123. int64_t t0 = ggml_perf_time_us();
  7124. UNUSED(t0);
  7125. const int64_t ne00 = src0->ne[0];
  7126. const int64_t ne01 = src0->ne[1];
  7127. const int64_t ne02 = src0->ne[2];
  7128. //const int64_t ne03 = src0->ne[3];
  7129. const int64_t ne10 = src1->ne[0];
  7130. const int64_t ne11 = src1->ne[1];
  7131. //const int64_t ne12 = src1->ne[2];
  7132. //const int64_t ne13 = src1->ne[3];
  7133. //const int64_t ne0 = dst->ne[0];
  7134. //const int64_t ne1 = dst->ne[1];
  7135. //const int64_t ne2 = dst->ne[2];
  7136. //const int64_t ne3 = dst->ne[3];
  7137. //const int64_t ne = ne0*ne1*ne2*ne3;
  7138. const int nb00 = src0->nb[0];
  7139. const int nb01 = src0->nb[1];
  7140. const int nb02 = src0->nb[2];
  7141. //const int nb03 = src0->nb[3];
  7142. const int nb10 = src1->nb[0];
  7143. const int nb11 = src1->nb[1];
  7144. //const int nb12 = src1->nb[2];
  7145. //const int nb13 = src1->nb[3];
  7146. //const int nb0 = dst->nb[0];
  7147. const int nb1 = dst->nb[1];
  7148. //const int nb2 = dst->nb[2];
  7149. //const int nb3 = dst->nb[3];
  7150. const int ith = params->ith;
  7151. const int nth = params->nth;
  7152. const int nk = ne00;
  7153. const int nh = nk/2;
  7154. const int ew0 = ggml_up32(ne01);
  7155. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  7156. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7157. GGML_ASSERT(nb10 == sizeof(float));
  7158. if (params->type == GGML_TASK_INIT) {
  7159. // TODO: fix this memset (wsize is overestimated)
  7160. memset(params->wdata, 0, params->wsize);
  7161. // prepare kernel data (src0)
  7162. {
  7163. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  7164. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7165. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7166. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  7167. ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
  7168. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7169. dst_data[i00*ew0 + i01] = src[i00];
  7170. }
  7171. }
  7172. }
  7173. }
  7174. // prepare source data (src1)
  7175. {
  7176. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
  7177. for (int64_t i11 = 0; i11 < ne11; i11++) {
  7178. const float * const src = (float *)((char *) src1->data + i11*nb11);
  7179. ggml_fp16_t * dst_data = wdata;
  7180. for (int64_t i10 = 0; i10 < ne10; i10++) {
  7181. dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
  7182. }
  7183. }
  7184. }
  7185. return;
  7186. }
  7187. if (params->type == GGML_TASK_FINALIZE) {
  7188. return;
  7189. }
  7190. // total rows in dst
  7191. const int nr = ne02;
  7192. // rows per thread
  7193. const int dr = (nr + nth - 1)/nth;
  7194. // row range for this thread
  7195. const int ir0 = dr*ith;
  7196. const int ir1 = MIN(ir0 + dr, nr);
  7197. for (int i1 = ir0; i1 < ir1; i1++) {
  7198. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  7199. for (int64_t i0 = 0; i0 < ne10; ++i0) {
  7200. dst_data[i0] = 0;
  7201. for (int k = -nh; k <= nh; k++) {
  7202. float v = 0.0f;
  7203. ggml_vec_dot_f16(ew0, &v,
  7204. (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  7205. (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  7206. dst_data[i0] += v;
  7207. }
  7208. }
  7209. }
  7210. }
  7211. static void ggml_compute_forward_conv_1d_1s_f32(
  7212. const struct ggml_compute_params * params,
  7213. const struct ggml_tensor * src0,
  7214. const struct ggml_tensor * src1,
  7215. struct ggml_tensor * dst) {
  7216. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  7217. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7218. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  7219. int64_t t0 = ggml_perf_time_us();
  7220. UNUSED(t0);
  7221. const int64_t ne00 = src0->ne[0];
  7222. const int64_t ne01 = src0->ne[1];
  7223. const int64_t ne02 = src0->ne[2];
  7224. //const int64_t ne03 = src0->ne[3];
  7225. const int64_t ne10 = src1->ne[0];
  7226. const int64_t ne11 = src1->ne[1];
  7227. //const int64_t ne12 = src1->ne[2];
  7228. //const int64_t ne13 = src1->ne[3];
  7229. //const int64_t ne0 = dst->ne[0];
  7230. //const int64_t ne1 = dst->ne[1];
  7231. //const int64_t ne2 = dst->ne[2];
  7232. //const int64_t ne3 = dst->ne[3];
  7233. //const int64_t ne = ne0*ne1*ne2*ne3;
  7234. const int nb00 = src0->nb[0];
  7235. const int nb01 = src0->nb[1];
  7236. const int nb02 = src0->nb[2];
  7237. //const int nb03 = src0->nb[3];
  7238. const int nb10 = src1->nb[0];
  7239. const int nb11 = src1->nb[1];
  7240. //const int nb12 = src1->nb[2];
  7241. //const int nb13 = src1->nb[3];
  7242. //const int nb0 = dst->nb[0];
  7243. const int nb1 = dst->nb[1];
  7244. //const int nb2 = dst->nb[2];
  7245. //const int nb3 = dst->nb[3];
  7246. const int ith = params->ith;
  7247. const int nth = params->nth;
  7248. const int nk = ne00;
  7249. const int nh = nk/2;
  7250. const int ew0 = ggml_up32(ne01);
  7251. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  7252. GGML_ASSERT(nb00 == sizeof(float));
  7253. GGML_ASSERT(nb10 == sizeof(float));
  7254. if (params->type == GGML_TASK_INIT) {
  7255. // TODO: fix this memset (wsize is overestimated)
  7256. memset(params->wdata, 0, params->wsize);
  7257. // prepare kernel data (src0)
  7258. {
  7259. float * const wdata = (float *) params->wdata + 0;
  7260. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7261. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7262. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  7263. float * dst_data = wdata + i02*ew0*ne00;
  7264. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7265. dst_data[i00*ew0 + i01] = src[i00];
  7266. }
  7267. }
  7268. }
  7269. }
  7270. // prepare source data (src1)
  7271. {
  7272. float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
  7273. for (int64_t i11 = 0; i11 < ne11; i11++) {
  7274. const float * const src = (float *)((char *) src1->data + i11*nb11);
  7275. float * dst_data = wdata;
  7276. for (int64_t i10 = 0; i10 < ne10; i10++) {
  7277. dst_data[(i10 + nh)*ew0 + i11] = src[i10];
  7278. }
  7279. }
  7280. }
  7281. return;
  7282. }
  7283. if (params->type == GGML_TASK_FINALIZE) {
  7284. return;
  7285. }
  7286. // total rows in dst
  7287. const int nr = ne02;
  7288. // rows per thread
  7289. const int dr = (nr + nth - 1)/nth;
  7290. // row range for this thread
  7291. const int ir0 = dr*ith;
  7292. const int ir1 = MIN(ir0 + dr, nr);
  7293. for (int i1 = ir0; i1 < ir1; i1++) {
  7294. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  7295. for (int64_t i0 = 0; i0 < ne10; ++i0) {
  7296. dst_data[i0] = 0;
  7297. for (int k = -nh; k <= nh; k++) {
  7298. float v = 0.0f;
  7299. ggml_vec_dot_f32(ew0, &v,
  7300. (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  7301. (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  7302. dst_data[i0] += v;
  7303. }
  7304. }
  7305. }
  7306. }
  7307. static void ggml_compute_forward_conv_1d_1s(
  7308. const struct ggml_compute_params * params,
  7309. const struct ggml_tensor * src0,
  7310. const struct ggml_tensor * src1,
  7311. struct ggml_tensor * dst) {
  7312. switch (src0->type) {
  7313. case GGML_TYPE_F16:
  7314. {
  7315. ggml_compute_forward_conv_1d_1s_f16_f32(params, src0, src1, dst);
  7316. } break;
  7317. case GGML_TYPE_F32:
  7318. {
  7319. ggml_compute_forward_conv_1d_1s_f32(params, src0, src1, dst);
  7320. } break;
  7321. default:
  7322. {
  7323. GGML_ASSERT(false);
  7324. } break;
  7325. }
  7326. }
  7327. // ggml_compute_forward_conv_1d_2s
  7328. static void ggml_compute_forward_conv_1d_2s_f16_f32(
  7329. const struct ggml_compute_params * params,
  7330. const struct ggml_tensor * src0,
  7331. const struct ggml_tensor * src1,
  7332. struct ggml_tensor * dst) {
  7333. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7334. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7335. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  7336. int64_t t0 = ggml_perf_time_us();
  7337. UNUSED(t0);
  7338. const int64_t ne00 = src0->ne[0];
  7339. const int64_t ne01 = src0->ne[1];
  7340. const int64_t ne02 = src0->ne[2];
  7341. //const int64_t ne03 = src0->ne[3];
  7342. const int64_t ne10 = src1->ne[0];
  7343. const int64_t ne11 = src1->ne[1];
  7344. //const int64_t ne12 = src1->ne[2];
  7345. //const int64_t ne13 = src1->ne[3];
  7346. //const int64_t ne0 = dst->ne[0];
  7347. //const int64_t ne1 = dst->ne[1];
  7348. //const int64_t ne2 = dst->ne[2];
  7349. //const int64_t ne3 = dst->ne[3];
  7350. //const int64_t ne = ne0*ne1*ne2*ne3;
  7351. const int nb00 = src0->nb[0];
  7352. const int nb01 = src0->nb[1];
  7353. const int nb02 = src0->nb[2];
  7354. //const int nb03 = src0->nb[3];
  7355. const int nb10 = src1->nb[0];
  7356. const int nb11 = src1->nb[1];
  7357. //const int nb12 = src1->nb[2];
  7358. //const int nb13 = src1->nb[3];
  7359. //const int nb0 = dst->nb[0];
  7360. const int nb1 = dst->nb[1];
  7361. //const int nb2 = dst->nb[2];
  7362. //const int nb3 = dst->nb[3];
  7363. const int ith = params->ith;
  7364. const int nth = params->nth;
  7365. const int nk = ne00;
  7366. const int nh = nk/2;
  7367. const int ew0 = ggml_up32(ne01);
  7368. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  7369. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7370. GGML_ASSERT(nb10 == sizeof(float));
  7371. if (params->type == GGML_TASK_INIT) {
  7372. // TODO: fix this memset (wsize is overestimated)
  7373. memset(params->wdata, 0, params->wsize);
  7374. // prepare kernel data (src0)
  7375. {
  7376. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  7377. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7378. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7379. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  7380. ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
  7381. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7382. dst_data[i00*ew0 + i01] = src[i00];
  7383. }
  7384. }
  7385. }
  7386. }
  7387. // prepare source data (src1)
  7388. {
  7389. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
  7390. for (int64_t i11 = 0; i11 < ne11; i11++) {
  7391. const float * const src = (float *)((char *) src1->data + i11*nb11);
  7392. ggml_fp16_t * dst_data = wdata;
  7393. for (int64_t i10 = 0; i10 < ne10; i10++) {
  7394. dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
  7395. }
  7396. }
  7397. }
  7398. return;
  7399. }
  7400. if (params->type == GGML_TASK_FINALIZE) {
  7401. return;
  7402. }
  7403. // total rows in dst
  7404. const int nr = ne02;
  7405. // rows per thread
  7406. const int dr = (nr + nth - 1)/nth;
  7407. // row range for this thread
  7408. const int ir0 = dr*ith;
  7409. const int ir1 = MIN(ir0 + dr, nr);
  7410. for (int i1 = ir0; i1 < ir1; i1++) {
  7411. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  7412. for (int64_t i0 = 0; i0 < ne10; i0 += 2) {
  7413. dst_data[i0/2] = 0;
  7414. for (int k = -nh; k <= nh; k++) {
  7415. float v = 0.0f;
  7416. ggml_vec_dot_f16(ew0, &v,
  7417. (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  7418. (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  7419. dst_data[i0/2] += v;
  7420. }
  7421. }
  7422. }
  7423. }
  7424. static void ggml_compute_forward_conv_1d_2s_f32(
  7425. const struct ggml_compute_params * params,
  7426. const struct ggml_tensor * src0,
  7427. const struct ggml_tensor * src1,
  7428. struct ggml_tensor * dst) {
  7429. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  7430. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7431. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  7432. int64_t t0 = ggml_perf_time_us();
  7433. UNUSED(t0);
  7434. const int64_t ne00 = src0->ne[0];
  7435. const int64_t ne01 = src0->ne[1];
  7436. const int64_t ne02 = src0->ne[2];
  7437. //const int64_t ne03 = src0->ne[3];
  7438. const int64_t ne10 = src1->ne[0];
  7439. const int64_t ne11 = src1->ne[1];
  7440. //const int64_t ne12 = src1->ne[2];
  7441. //const int64_t ne13 = src1->ne[3];
  7442. //const int64_t ne0 = dst->ne[0];
  7443. //const int64_t ne1 = dst->ne[1];
  7444. //const int64_t ne2 = dst->ne[2];
  7445. //const int64_t ne3 = dst->ne[3];
  7446. //const int64_t ne = ne0*ne1*ne2*ne3;
  7447. const int nb00 = src0->nb[0];
  7448. const int nb01 = src0->nb[1];
  7449. const int nb02 = src0->nb[2];
  7450. //const int nb03 = src0->nb[3];
  7451. const int nb10 = src1->nb[0];
  7452. const int nb11 = src1->nb[1];
  7453. //const int nb12 = src1->nb[2];
  7454. //const int nb13 = src1->nb[3];
  7455. //const int nb0 = dst->nb[0];
  7456. const int nb1 = dst->nb[1];
  7457. //const int nb2 = dst->nb[2];
  7458. //const int nb3 = dst->nb[3];
  7459. const int ith = params->ith;
  7460. const int nth = params->nth;
  7461. const int nk = ne00;
  7462. const int nh = nk/2;
  7463. const int ew0 = ggml_up32(ne01);
  7464. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  7465. GGML_ASSERT(nb00 == sizeof(float));
  7466. GGML_ASSERT(nb10 == sizeof(float));
  7467. if (params->type == GGML_TASK_INIT) {
  7468. // TODO: fix this memset (wsize is overestimated)
  7469. memset(params->wdata, 0, params->wsize);
  7470. // prepare kernel data (src0)
  7471. {
  7472. float * const wdata = (float *) params->wdata + 0;
  7473. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7474. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7475. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  7476. float * dst_data = wdata + i02*ew0*ne00;
  7477. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7478. dst_data[i00*ew0 + i01] = src[i00];
  7479. }
  7480. }
  7481. }
  7482. }
  7483. // prepare source data (src1)
  7484. {
  7485. float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
  7486. for (int64_t i11 = 0; i11 < ne11; i11++) {
  7487. const float * const src = (float *)((char *) src1->data + i11*nb11);
  7488. float * dst_data = wdata;
  7489. for (int64_t i10 = 0; i10 < ne10; i10++) {
  7490. dst_data[(i10 + nh)*ew0 + i11] = src[i10];
  7491. }
  7492. }
  7493. }
  7494. return;
  7495. }
  7496. if (params->type == GGML_TASK_FINALIZE) {
  7497. return;
  7498. }
  7499. // total rows in dst
  7500. const int nr = ne02;
  7501. // rows per thread
  7502. const int dr = (nr + nth - 1)/nth;
  7503. // row range for this thread
  7504. const int ir0 = dr*ith;
  7505. const int ir1 = MIN(ir0 + dr, nr);
  7506. for (int i1 = ir0; i1 < ir1; i1++) {
  7507. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  7508. for (int64_t i0 = 0; i0 < ne10; i0 += 2) {
  7509. dst_data[i0/2] = 0;
  7510. for (int k = -nh; k <= nh; k++) {
  7511. float v = 0.0f;
  7512. ggml_vec_dot_f32(ew0, &v,
  7513. (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  7514. (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  7515. dst_data[i0/2] += v;
  7516. }
  7517. }
  7518. }
  7519. }
  7520. static void ggml_compute_forward_conv_1d_2s(
  7521. const struct ggml_compute_params * params,
  7522. const struct ggml_tensor * src0,
  7523. const struct ggml_tensor * src1,
  7524. struct ggml_tensor * dst) {
  7525. switch (src0->type) {
  7526. case GGML_TYPE_F16:
  7527. {
  7528. ggml_compute_forward_conv_1d_2s_f16_f32(params, src0, src1, dst);
  7529. } break;
  7530. case GGML_TYPE_F32:
  7531. {
  7532. ggml_compute_forward_conv_1d_2s_f32(params, src0, src1, dst);
  7533. } break;
  7534. default:
  7535. {
  7536. GGML_ASSERT(false);
  7537. } break;
  7538. }
  7539. }
  7540. // ggml_compute_forward_flash_attn
  7541. static void ggml_compute_forward_flash_attn_f32(
  7542. const struct ggml_compute_params * params,
  7543. const struct ggml_tensor * q,
  7544. const struct ggml_tensor * k,
  7545. const struct ggml_tensor * v,
  7546. const bool masked,
  7547. struct ggml_tensor * dst) {
  7548. int64_t t0 = ggml_perf_time_us();
  7549. UNUSED(t0);
  7550. const int64_t neq0 = q->ne[0];
  7551. const int64_t neq1 = q->ne[1];
  7552. const int64_t neq2 = q->ne[2];
  7553. const int64_t neq3 = q->ne[3];
  7554. const int64_t nek0 = k->ne[0];
  7555. const int64_t nek1 = k->ne[1];
  7556. //const int64_t nek2 = k->ne[2];
  7557. //const int64_t nek3 = k->ne[3];
  7558. //const int64_t nev0 = v->ne[0];
  7559. const int64_t nev1 = v->ne[1];
  7560. //const int64_t nev2 = v->ne[2];
  7561. //const int64_t nev3 = v->ne[3];
  7562. const int64_t ne0 = dst->ne[0];
  7563. const int64_t ne1 = dst->ne[1];
  7564. //const int64_t ne2 = dst->ne[2];
  7565. //const int64_t ne3 = dst->ne[3];
  7566. const int nbk0 = k->nb[0];
  7567. const int nbk1 = k->nb[1];
  7568. const int nbk2 = k->nb[2];
  7569. const int nbk3 = k->nb[3];
  7570. const int nbq0 = q->nb[0];
  7571. const int nbq1 = q->nb[1];
  7572. const int nbq2 = q->nb[2];
  7573. const int nbq3 = q->nb[3];
  7574. const int nbv0 = v->nb[0];
  7575. const int nbv1 = v->nb[1];
  7576. const int nbv2 = v->nb[2];
  7577. const int nbv3 = v->nb[3];
  7578. const int nb0 = dst->nb[0];
  7579. const int nb1 = dst->nb[1];
  7580. const int nb2 = dst->nb[2];
  7581. const int nb3 = dst->nb[3];
  7582. const int ith = params->ith;
  7583. const int nth = params->nth;
  7584. const int64_t D = neq0;
  7585. const int64_t N = neq1;
  7586. const int64_t P = nek1 - N;
  7587. const int64_t M = P + N;
  7588. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  7589. GGML_ASSERT(ne0 == D);
  7590. GGML_ASSERT(ne1 == N);
  7591. GGML_ASSERT(P >= 0);
  7592. GGML_ASSERT(nbq0 == sizeof(float));
  7593. GGML_ASSERT(nbk0 == sizeof(float));
  7594. GGML_ASSERT(nbv0 == sizeof(float));
  7595. GGML_ASSERT(neq0 == D);
  7596. GGML_ASSERT(nek0 == D);
  7597. GGML_ASSERT(nev1 == D);
  7598. GGML_ASSERT(neq1 == N);
  7599. GGML_ASSERT(nek1 == N + P);
  7600. GGML_ASSERT(nev1 == D);
  7601. // dst cannot be transposed or permuted
  7602. GGML_ASSERT(nb0 == sizeof(float));
  7603. GGML_ASSERT(nb0 <= nb1);
  7604. GGML_ASSERT(nb1 <= nb2);
  7605. GGML_ASSERT(nb2 <= nb3);
  7606. if (params->type == GGML_TASK_INIT) {
  7607. return;
  7608. }
  7609. if (params->type == GGML_TASK_FINALIZE) {
  7610. return;
  7611. }
  7612. // parallelize by q rows using ggml_vec_dot_f32
  7613. // total rows in q
  7614. const int nr = neq1*neq2*neq3;
  7615. // rows per thread
  7616. const int dr = (nr + nth - 1)/nth;
  7617. // row range for this thread
  7618. const int ir0 = dr*ith;
  7619. const int ir1 = MIN(ir0 + dr, nr);
  7620. const float scale = 1.0f/sqrtf(D);
  7621. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  7622. for (int ir = ir0; ir < ir1; ++ir) {
  7623. // q indices
  7624. const int iq3 = ir/(neq2*neq1);
  7625. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  7626. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  7627. float * S = (float *) params->wdata + ith*(Mup + CACHE_LINE_SIZE_F32);
  7628. for (int i = M; i < Mup; ++i) {
  7629. S[i] = -INFINITY;
  7630. }
  7631. for (int64_t ic = 0; ic < nek1; ++ic) {
  7632. // k indices
  7633. const int ik3 = iq3;
  7634. const int ik2 = iq2;
  7635. const int ik1 = ic;
  7636. // S indices
  7637. const int i1 = ik1;
  7638. ggml_vec_dot_f32(neq0,
  7639. S + i1,
  7640. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  7641. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  7642. }
  7643. // scale
  7644. ggml_vec_scale_f32(nek1, S, scale);
  7645. if (masked) {
  7646. for (int64_t i = P; i < M; i++) {
  7647. if (i > P + iq1) {
  7648. S[i] = -INFINITY;
  7649. }
  7650. }
  7651. }
  7652. // softmax
  7653. {
  7654. float max = -INFINITY;
  7655. ggml_vec_max_f32(M, &max, S);
  7656. ggml_float sum = 0.0;
  7657. {
  7658. #ifdef GGML_SOFT_MAX_ACCELERATE
  7659. max = -max;
  7660. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  7661. vvexpf(S, S, &Mup);
  7662. ggml_vec_sum_f32(Mup, &sum, S);
  7663. #else
  7664. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  7665. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  7666. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  7667. float * SS = S + i;
  7668. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  7669. if (SS[j] == -INFINITY) {
  7670. SS[j] = 0.0f;
  7671. } else {
  7672. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  7673. memcpy(&scvt[j], &s, sizeof(uint16_t));
  7674. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  7675. sump[j] += (ggml_float)val;
  7676. SS[j] = val;
  7677. }
  7678. }
  7679. }
  7680. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  7681. sum += sump[i];
  7682. }
  7683. #endif
  7684. }
  7685. assert(sum > 0.0);
  7686. sum = 1.0/sum;
  7687. ggml_vec_scale_f32(M, S, sum);
  7688. #ifndef NDEBUG
  7689. for (int i = 0; i < M; ++i) {
  7690. assert(!isnan(S[i]));
  7691. assert(!isinf(S[i]));
  7692. }
  7693. #endif
  7694. }
  7695. for (int64_t ic = 0; ic < nev1; ++ic) {
  7696. // dst indices
  7697. const int i1 = iq1;
  7698. const int i2 = iq2;
  7699. const int i3 = iq3;
  7700. ggml_vec_dot_f32(nek1,
  7701. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  7702. (float *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  7703. S);
  7704. }
  7705. }
  7706. }
  7707. static void ggml_compute_forward_flash_attn_f16(
  7708. const struct ggml_compute_params * params,
  7709. const struct ggml_tensor * q,
  7710. const struct ggml_tensor * k,
  7711. const struct ggml_tensor * v,
  7712. const bool masked,
  7713. struct ggml_tensor * dst) {
  7714. int64_t t0 = ggml_perf_time_us();
  7715. UNUSED(t0);
  7716. const int64_t neq0 = q->ne[0];
  7717. const int64_t neq1 = q->ne[1];
  7718. const int64_t neq2 = q->ne[2];
  7719. const int64_t neq3 = q->ne[3];
  7720. const int64_t nek0 = k->ne[0];
  7721. const int64_t nek1 = k->ne[1];
  7722. //const int64_t nek2 = k->ne[2];
  7723. //const int64_t nek3 = k->ne[3];
  7724. //const int64_t nev0 = v->ne[0];
  7725. const int64_t nev1 = v->ne[1];
  7726. //const int64_t nev2 = v->ne[2];
  7727. //const int64_t nev3 = v->ne[3];
  7728. const int64_t ne0 = dst->ne[0];
  7729. const int64_t ne1 = dst->ne[1];
  7730. //const int64_t ne2 = dst->ne[2];
  7731. //const int64_t ne3 = dst->ne[3];
  7732. const int nbk0 = k->nb[0];
  7733. const int nbk1 = k->nb[1];
  7734. const int nbk2 = k->nb[2];
  7735. const int nbk3 = k->nb[3];
  7736. const int nbq0 = q->nb[0];
  7737. const int nbq1 = q->nb[1];
  7738. const int nbq2 = q->nb[2];
  7739. const int nbq3 = q->nb[3];
  7740. const int nbv0 = v->nb[0];
  7741. const int nbv1 = v->nb[1];
  7742. const int nbv2 = v->nb[2];
  7743. const int nbv3 = v->nb[3];
  7744. const int nb0 = dst->nb[0];
  7745. const int nb1 = dst->nb[1];
  7746. const int nb2 = dst->nb[2];
  7747. const int nb3 = dst->nb[3];
  7748. const int ith = params->ith;
  7749. const int nth = params->nth;
  7750. const int64_t D = neq0;
  7751. const int64_t N = neq1;
  7752. const int64_t P = nek1 - N;
  7753. const int64_t M = P + N;
  7754. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  7755. GGML_ASSERT(ne0 == D);
  7756. GGML_ASSERT(ne1 == N);
  7757. GGML_ASSERT(P >= 0);
  7758. GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t));
  7759. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  7760. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  7761. GGML_ASSERT(neq0 == D);
  7762. GGML_ASSERT(nek0 == D);
  7763. GGML_ASSERT(nev1 == D);
  7764. GGML_ASSERT(neq1 == N);
  7765. GGML_ASSERT(nek1 == N + P);
  7766. GGML_ASSERT(nev1 == D);
  7767. // dst cannot be transposed or permuted
  7768. GGML_ASSERT(nb0 == sizeof(float));
  7769. GGML_ASSERT(nb0 <= nb1);
  7770. GGML_ASSERT(nb1 <= nb2);
  7771. GGML_ASSERT(nb2 <= nb3);
  7772. if (params->type == GGML_TASK_INIT) {
  7773. return;
  7774. }
  7775. if (params->type == GGML_TASK_FINALIZE) {
  7776. return;
  7777. }
  7778. // parallelize by q rows using ggml_vec_dot_f32
  7779. // total rows in q
  7780. const int nr = neq1*neq2*neq3;
  7781. // rows per thread
  7782. const int dr = (nr + nth - 1)/nth;
  7783. // row range for this thread
  7784. const int ir0 = dr*ith;
  7785. const int ir1 = MIN(ir0 + dr, nr);
  7786. const float scale = 1.0f/sqrtf(D);
  7787. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  7788. for (int ir = ir0; ir < ir1; ++ir) {
  7789. // q indices
  7790. const int iq3 = ir/(neq2*neq1);
  7791. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  7792. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  7793. float * S = (float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32);
  7794. for (int i = M; i < Mup; ++i) {
  7795. S[i] = -INFINITY;
  7796. }
  7797. if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) {
  7798. for (int64_t ic = 0; ic < nek1; ++ic) {
  7799. // k indices
  7800. const int ik3 = iq3;
  7801. const int ik2 = iq2;
  7802. const int ik1 = ic;
  7803. // S indices
  7804. const int i1 = ik1;
  7805. ggml_vec_dot_f16(neq0,
  7806. S + i1,
  7807. (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  7808. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  7809. }
  7810. } else {
  7811. for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) {
  7812. // k indices
  7813. const int ik3 = iq3;
  7814. const int ik2 = iq2;
  7815. const int ik1 = ic;
  7816. // S indices
  7817. const int i1 = ik1;
  7818. ggml_vec_dot_f16_unroll(neq0, nbk1,
  7819. S + i1,
  7820. ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  7821. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  7822. }
  7823. }
  7824. // scale
  7825. ggml_vec_scale_f32(nek1, S, scale);
  7826. if (masked) {
  7827. for (int64_t i = P; i < M; i++) {
  7828. if (i > P + iq1) {
  7829. S[i] = -INFINITY;
  7830. }
  7831. }
  7832. }
  7833. // softmax
  7834. {
  7835. float max = -INFINITY;
  7836. ggml_vec_max_f32(M, &max, S);
  7837. ggml_float sum = 0.0;
  7838. {
  7839. #ifdef GGML_SOFT_MAX_ACCELERATE
  7840. max = -max;
  7841. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  7842. vvexpf(S, S, &Mup);
  7843. ggml_vec_sum_f32(Mup, &sum, S);
  7844. #else
  7845. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  7846. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  7847. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  7848. float * SS = S + i;
  7849. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  7850. if (SS[j] == -INFINITY) {
  7851. SS[j] = 0.0f;
  7852. } else {
  7853. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  7854. memcpy(&scvt[j], &s, sizeof(uint16_t));
  7855. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  7856. sump[j] += (ggml_float)val;
  7857. SS[j] = val;
  7858. }
  7859. }
  7860. }
  7861. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  7862. sum += sump[i];
  7863. }
  7864. #endif
  7865. }
  7866. assert(sum > 0.0);
  7867. sum = 1.0/sum;
  7868. ggml_vec_scale_f32(M, S, sum);
  7869. #ifndef NDEBUG
  7870. for (int i = 0; i < M; ++i) {
  7871. assert(!isnan(S[i]));
  7872. assert(!isinf(S[i]));
  7873. }
  7874. #endif
  7875. }
  7876. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup);
  7877. for (int64_t i = 0; i < M; i++) {
  7878. S16[i] = GGML_FP32_TO_FP16(S[i]);
  7879. }
  7880. if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) {
  7881. for (int64_t ic = 0; ic < nev1; ++ic) {
  7882. // dst indices
  7883. const int i1 = iq1;
  7884. const int i2 = iq2;
  7885. const int i3 = iq3;
  7886. ggml_vec_dot_f16(nek1,
  7887. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  7888. (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  7889. S16);
  7890. }
  7891. } else {
  7892. for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) {
  7893. // dst indices
  7894. const int i1 = iq1;
  7895. const int i2 = iq2;
  7896. const int i3 = iq3;
  7897. ggml_vec_dot_f16_unroll(nek1, nbv1,
  7898. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  7899. ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  7900. S16);
  7901. }
  7902. }
  7903. }
  7904. }
  7905. static void ggml_compute_forward_flash_attn(
  7906. const struct ggml_compute_params * params,
  7907. const struct ggml_tensor * q,
  7908. const struct ggml_tensor * k,
  7909. const struct ggml_tensor * v,
  7910. const bool masked,
  7911. struct ggml_tensor * dst) {
  7912. switch (q->type) {
  7913. case GGML_TYPE_F16:
  7914. {
  7915. ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst);
  7916. } break;
  7917. case GGML_TYPE_F32:
  7918. {
  7919. ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst);
  7920. } break;
  7921. default:
  7922. {
  7923. GGML_ASSERT(false);
  7924. } break;
  7925. }
  7926. }
  7927. // ggml_compute_forward_flash_ff
  7928. static void ggml_compute_forward_flash_ff_f16(
  7929. const struct ggml_compute_params * params,
  7930. const struct ggml_tensor * a, // F16
  7931. const struct ggml_tensor * b0, // F16 fc_w
  7932. const struct ggml_tensor * b1, // F32 fc_b
  7933. const struct ggml_tensor * c0, // F16 proj_w
  7934. const struct ggml_tensor * c1, // F32 proj_b
  7935. struct ggml_tensor * dst) {
  7936. int64_t t0 = ggml_perf_time_us();
  7937. UNUSED(t0);
  7938. const int64_t nea0 = a->ne[0];
  7939. const int64_t nea1 = a->ne[1];
  7940. const int64_t nea2 = a->ne[2];
  7941. const int64_t nea3 = a->ne[3];
  7942. const int64_t neb00 = b0->ne[0];
  7943. const int64_t neb01 = b0->ne[1];
  7944. //const int64_t neb02 = b0->ne[2];
  7945. //const int64_t neb03 = b0->ne[3];
  7946. const int64_t neb10 = b1->ne[0];
  7947. const int64_t neb11 = b1->ne[1];
  7948. //const int64_t neb12 = b1->ne[2];
  7949. //const int64_t neb13 = b1->ne[3];
  7950. const int64_t nec00 = c0->ne[0];
  7951. const int64_t nec01 = c0->ne[1];
  7952. //const int64_t nec02 = c0->ne[2];
  7953. //const int64_t nec03 = c0->ne[3];
  7954. const int64_t nec10 = c1->ne[0];
  7955. const int64_t nec11 = c1->ne[1];
  7956. //const int64_t nec12 = c1->ne[2];
  7957. //const int64_t nec13 = c1->ne[3];
  7958. const int64_t ne0 = dst->ne[0];
  7959. const int64_t ne1 = dst->ne[1];
  7960. const int64_t ne2 = dst->ne[2];
  7961. //const int64_t ne3 = dst->ne[3];
  7962. const int nba0 = a->nb[0];
  7963. const int nba1 = a->nb[1];
  7964. const int nba2 = a->nb[2];
  7965. const int nba3 = a->nb[3];
  7966. const int nbb00 = b0->nb[0];
  7967. const int nbb01 = b0->nb[1];
  7968. const int nbb02 = b0->nb[2];
  7969. const int nbb03 = b0->nb[3];
  7970. const int nbb10 = b1->nb[0];
  7971. //const int nbb11 = b1->nb[1];
  7972. //const int nbb12 = b1->nb[2];
  7973. //const int nbb13 = b1->nb[3];
  7974. const int nbc00 = c0->nb[0];
  7975. const int nbc01 = c0->nb[1];
  7976. const int nbc02 = c0->nb[2];
  7977. const int nbc03 = c0->nb[3];
  7978. const int nbc10 = c1->nb[0];
  7979. //const int nbc11 = c1->nb[1];
  7980. //const int nbc12 = c1->nb[2];
  7981. //const int nbc13 = c1->nb[3];
  7982. const int nb0 = dst->nb[0];
  7983. const int nb1 = dst->nb[1];
  7984. const int nb2 = dst->nb[2];
  7985. const int nb3 = dst->nb[3];
  7986. const int ith = params->ith;
  7987. const int nth = params->nth;
  7988. const int64_t D = nea0;
  7989. //const int64_t N = nea1;
  7990. const int64_t M = neb01;
  7991. GGML_ASSERT(ne0 == nea0);
  7992. GGML_ASSERT(ne1 == nea1);
  7993. GGML_ASSERT(ne2 == nea2);
  7994. GGML_ASSERT(nba0 == sizeof(ggml_fp16_t));
  7995. GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t));
  7996. GGML_ASSERT(nbb10 == sizeof(float));
  7997. GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t));
  7998. GGML_ASSERT(nbc10 == sizeof(float));
  7999. GGML_ASSERT(neb00 == D);
  8000. GGML_ASSERT(neb01 == M);
  8001. GGML_ASSERT(neb10 == M);
  8002. GGML_ASSERT(neb11 == 1);
  8003. GGML_ASSERT(nec00 == M);
  8004. GGML_ASSERT(nec01 == D);
  8005. GGML_ASSERT(nec10 == D);
  8006. GGML_ASSERT(nec11 == 1);
  8007. // dst cannot be transposed or permuted
  8008. GGML_ASSERT(nb0 == sizeof(float));
  8009. GGML_ASSERT(nb0 <= nb1);
  8010. GGML_ASSERT(nb1 <= nb2);
  8011. GGML_ASSERT(nb2 <= nb3);
  8012. if (params->type == GGML_TASK_INIT) {
  8013. return;
  8014. }
  8015. if (params->type == GGML_TASK_FINALIZE) {
  8016. return;
  8017. }
  8018. // parallelize by a rows using ggml_vec_dot_f32
  8019. // total rows in a
  8020. const int nr = nea1*nea2*nea3;
  8021. // rows per thread
  8022. const int dr = (nr + nth - 1)/nth;
  8023. // row range for this thread
  8024. const int ir0 = dr*ith;
  8025. const int ir1 = MIN(ir0 + dr, nr);
  8026. for (int ir = ir0; ir < ir1; ++ir) {
  8027. // a indices
  8028. const int ia3 = ir/(nea2*nea1);
  8029. const int ia2 = (ir - ia3*nea2*nea1)/nea1;
  8030. const int ia1 = (ir - ia3*nea2*nea1 - ia2*nea1);
  8031. float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32);
  8032. for (int64_t ic = 0; ic < neb01; ++ic) {
  8033. // b0 indices
  8034. const int ib03 = ia3;
  8035. const int ib02 = ia2;
  8036. const int ib01 = ic;
  8037. // S indices
  8038. const int i1 = ib01;
  8039. ggml_vec_dot_f16(nea0,
  8040. S + i1,
  8041. (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)),
  8042. (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3)));
  8043. }
  8044. ggml_vec_add_f32(neb01, S, S, (float *) b1->data);
  8045. //ggml_vec_gelu_f32(neb01, S, S);
  8046. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M);
  8047. for (int64_t i = 0; i < M; i++) {
  8048. S16[i] = GGML_FP32_TO_FP16(S[i]);
  8049. }
  8050. ggml_vec_gelu_f16(neb01, S16, S16);
  8051. {
  8052. // dst indices
  8053. const int i1 = ia1;
  8054. const int i2 = ia2;
  8055. const int i3 = ia3;
  8056. for (int64_t ic = 0; ic < nec01; ++ic) {
  8057. ggml_vec_dot_f16(neb01,
  8058. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  8059. (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)),
  8060. S16);
  8061. }
  8062. ggml_vec_add_f32(nec01,
  8063. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  8064. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  8065. (float *) c1->data);
  8066. }
  8067. }
  8068. }
  8069. static void ggml_compute_forward_flash_ff(
  8070. const struct ggml_compute_params * params,
  8071. const struct ggml_tensor * a,
  8072. const struct ggml_tensor * b0,
  8073. const struct ggml_tensor * b1,
  8074. const struct ggml_tensor * c0,
  8075. const struct ggml_tensor * c1,
  8076. struct ggml_tensor * dst) {
  8077. switch (b0->type) {
  8078. case GGML_TYPE_F16:
  8079. {
  8080. ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst);
  8081. } break;
  8082. case GGML_TYPE_F32:
  8083. {
  8084. GGML_ASSERT(false); // TODO
  8085. } break;
  8086. default:
  8087. {
  8088. GGML_ASSERT(false);
  8089. } break;
  8090. }
  8091. }
  8092. // ggml_compute_forward_map_unary
  8093. static void ggml_compute_forward_map_unary_f32(
  8094. const struct ggml_compute_params * params,
  8095. const struct ggml_tensor * src0,
  8096. struct ggml_tensor * dst,
  8097. const ggml_unary_op_f32_t fun) {
  8098. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8099. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8100. return;
  8101. }
  8102. const int n = ggml_nrows(src0);
  8103. const int nc = src0->ne[0];
  8104. assert( dst->nb[0] == sizeof(float));
  8105. assert(src0->nb[0] == sizeof(float));
  8106. for (int i = 0; i < n; i++) {
  8107. fun(nc,
  8108. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8109. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8110. }
  8111. }
  8112. static void ggml_compute_forward_map_unary(
  8113. const struct ggml_compute_params * params,
  8114. const struct ggml_tensor * src0,
  8115. struct ggml_tensor * dst,
  8116. const ggml_unary_op_f32_t fun) {
  8117. switch (src0->type) {
  8118. case GGML_TYPE_F32:
  8119. {
  8120. ggml_compute_forward_map_unary_f32(params, src0, dst, fun);
  8121. } break;
  8122. default:
  8123. {
  8124. GGML_ASSERT(false);
  8125. } break;
  8126. }
  8127. }
  8128. // ggml_compute_forward_map_binary
  8129. static void ggml_compute_forward_map_binary_f32(
  8130. const struct ggml_compute_params * params,
  8131. const struct ggml_tensor * src0,
  8132. const struct ggml_tensor * src1,
  8133. struct ggml_tensor * dst,
  8134. const ggml_binary_op_f32_t fun) {
  8135. assert(params->ith == 0);
  8136. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  8137. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8138. return;
  8139. }
  8140. const int n = ggml_nrows(src0);
  8141. const int nc = src0->ne[0];
  8142. assert( dst->nb[0] == sizeof(float));
  8143. assert(src0->nb[0] == sizeof(float));
  8144. assert(src1->nb[0] == sizeof(float));
  8145. for (int i = 0; i < n; i++) {
  8146. fun(nc,
  8147. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8148. (float *) ((char *) src0->data + i*(src0->nb[1])),
  8149. (float *) ((char *) src1->data + i*(src1->nb[1])));
  8150. }
  8151. }
  8152. static void ggml_compute_forward_map_binary(
  8153. const struct ggml_compute_params * params,
  8154. const struct ggml_tensor * src0,
  8155. const struct ggml_tensor * src1,
  8156. struct ggml_tensor * dst,
  8157. const ggml_binary_op_f32_t fun) {
  8158. switch (src0->type) {
  8159. case GGML_TYPE_F32:
  8160. {
  8161. ggml_compute_forward_map_binary_f32(params, src0, src1, dst, fun);
  8162. } break;
  8163. default:
  8164. {
  8165. GGML_ASSERT(false);
  8166. } break;
  8167. }
  8168. }
  8169. /////////////////////////////////
  8170. static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
  8171. GGML_ASSERT(params);
  8172. switch (tensor->op) {
  8173. case GGML_OP_DUP:
  8174. {
  8175. ggml_compute_forward_dup(params, tensor->src0, tensor);
  8176. } break;
  8177. case GGML_OP_ADD:
  8178. {
  8179. ggml_compute_forward_add(params, tensor->src0, tensor->src1, tensor);
  8180. } break;
  8181. case GGML_OP_SUB:
  8182. {
  8183. ggml_compute_forward_sub(params, tensor->src0, tensor->src1, tensor);
  8184. } break;
  8185. case GGML_OP_MUL:
  8186. {
  8187. ggml_compute_forward_mul(params, tensor->src0, tensor->src1, tensor);
  8188. } break;
  8189. case GGML_OP_DIV:
  8190. {
  8191. ggml_compute_forward_div(params, tensor->src0, tensor->src1, tensor);
  8192. } break;
  8193. case GGML_OP_SQR:
  8194. {
  8195. ggml_compute_forward_sqr(params, tensor->src0, tensor);
  8196. } break;
  8197. case GGML_OP_SQRT:
  8198. {
  8199. ggml_compute_forward_sqrt(params, tensor->src0, tensor);
  8200. } break;
  8201. case GGML_OP_SUM:
  8202. {
  8203. ggml_compute_forward_sum(params, tensor->src0, tensor);
  8204. } break;
  8205. case GGML_OP_MEAN:
  8206. {
  8207. ggml_compute_forward_mean(params, tensor->src0, tensor);
  8208. } break;
  8209. case GGML_OP_REPEAT:
  8210. {
  8211. ggml_compute_forward_repeat(params, tensor->src0, tensor);
  8212. } break;
  8213. case GGML_OP_ABS:
  8214. {
  8215. ggml_compute_forward_abs(params, tensor->src0, tensor);
  8216. } break;
  8217. case GGML_OP_SGN:
  8218. {
  8219. ggml_compute_forward_sgn(params, tensor->src0, tensor);
  8220. } break;
  8221. case GGML_OP_NEG:
  8222. {
  8223. ggml_compute_forward_neg(params, tensor->src0, tensor);
  8224. } break;
  8225. case GGML_OP_STEP:
  8226. {
  8227. ggml_compute_forward_step(params, tensor->src0, tensor);
  8228. } break;
  8229. case GGML_OP_RELU:
  8230. {
  8231. ggml_compute_forward_relu(params, tensor->src0, tensor);
  8232. } break;
  8233. case GGML_OP_GELU:
  8234. {
  8235. ggml_compute_forward_gelu(params, tensor->src0, tensor);
  8236. } break;
  8237. case GGML_OP_SILU:
  8238. {
  8239. ggml_compute_forward_silu(params, tensor->src0, tensor);
  8240. } break;
  8241. case GGML_OP_NORM:
  8242. {
  8243. ggml_compute_forward_norm(params, tensor->src0, tensor);
  8244. } break;
  8245. case GGML_OP_RMS_NORM:
  8246. {
  8247. ggml_compute_forward_rms_norm(params, tensor->src0, tensor);
  8248. } break;
  8249. case GGML_OP_MUL_MAT:
  8250. {
  8251. ggml_compute_forward_mul_mat(params, tensor->src0, tensor->src1, tensor);
  8252. } break;
  8253. case GGML_OP_SCALE:
  8254. {
  8255. ggml_compute_forward_scale(params, tensor->src0, tensor->src1, tensor);
  8256. } break;
  8257. case GGML_OP_CPY:
  8258. {
  8259. ggml_compute_forward_cpy(params, tensor->src0, tensor);
  8260. } break;
  8261. case GGML_OP_CONT:
  8262. {
  8263. ggml_compute_forward_cont(params, tensor->src0, tensor);
  8264. } break;
  8265. case GGML_OP_RESHAPE:
  8266. {
  8267. ggml_compute_forward_reshape(params, tensor->src0, tensor);
  8268. } break;
  8269. case GGML_OP_VIEW:
  8270. {
  8271. ggml_compute_forward_view(params, tensor->src0);
  8272. } break;
  8273. case GGML_OP_PERMUTE:
  8274. {
  8275. ggml_compute_forward_permute(params, tensor->src0);
  8276. } break;
  8277. case GGML_OP_TRANSPOSE:
  8278. {
  8279. ggml_compute_forward_transpose(params, tensor->src0);
  8280. } break;
  8281. case GGML_OP_GET_ROWS:
  8282. {
  8283. ggml_compute_forward_get_rows(params, tensor->src0, tensor->src1, tensor);
  8284. } break;
  8285. case GGML_OP_DIAG_MASK_INF:
  8286. {
  8287. ggml_compute_forward_diag_mask_inf(params, tensor->src0, tensor->src1, tensor);
  8288. } break;
  8289. case GGML_OP_SOFT_MAX:
  8290. {
  8291. ggml_compute_forward_soft_max(params, tensor->src0, tensor);
  8292. } break;
  8293. case GGML_OP_ROPE:
  8294. {
  8295. ggml_compute_forward_rope(params, tensor->src0, tensor->src1, tensor);
  8296. } break;
  8297. case GGML_OP_CONV_1D_1S:
  8298. {
  8299. ggml_compute_forward_conv_1d_1s(params, tensor->src0, tensor->src1, tensor);
  8300. } break;
  8301. case GGML_OP_CONV_1D_2S:
  8302. {
  8303. ggml_compute_forward_conv_1d_2s(params, tensor->src0, tensor->src1, tensor);
  8304. } break;
  8305. case GGML_OP_FLASH_ATTN:
  8306. {
  8307. int32_t t = ggml_get_i32_1d(tensor->opt[1], 0);
  8308. GGML_ASSERT(t == 0 || t == 1);
  8309. bool masked = t != 0;
  8310. ggml_compute_forward_flash_attn(params, tensor->src0, tensor->src1, tensor->opt[0], masked, tensor);
  8311. } break;
  8312. case GGML_OP_FLASH_FF:
  8313. {
  8314. ggml_compute_forward_flash_ff(params, tensor->src0, tensor->src1, tensor->opt[0], tensor->opt[1], tensor->opt[2], tensor);
  8315. } break;
  8316. case GGML_OP_MAP_UNARY:
  8317. {
  8318. const ggml_unary_op_f32_t fun = *((ggml_unary_op_f32_t *)tensor->opt[0]->data);
  8319. ggml_compute_forward_map_unary(params, tensor->src0, tensor, fun);
  8320. }
  8321. break;
  8322. case GGML_OP_MAP_BINARY:
  8323. {
  8324. const ggml_binary_op_f32_t fun = *((ggml_binary_op_f32_t *)tensor->opt[0]->data);
  8325. ggml_compute_forward_map_binary(params, tensor->src0, tensor->src1, tensor, fun);
  8326. }
  8327. break;
  8328. case GGML_OP_NONE:
  8329. {
  8330. // nop
  8331. } break;
  8332. case GGML_OP_COUNT:
  8333. {
  8334. GGML_ASSERT(false);
  8335. } break;
  8336. }
  8337. }
  8338. ////////////////////////////////////////////////////////////////////////////////
  8339. static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, bool inplace) {
  8340. struct ggml_tensor * src0 = tensor->src0;
  8341. struct ggml_tensor * src1 = tensor->src1;
  8342. switch (tensor->op) {
  8343. case GGML_OP_DUP:
  8344. {
  8345. if (src0->grad) {
  8346. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  8347. }
  8348. } break;
  8349. case GGML_OP_ADD:
  8350. {
  8351. if (src0->grad) {
  8352. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  8353. }
  8354. if (src1->grad) {
  8355. src1->grad = ggml_add_impl(ctx, src1->grad, tensor->grad, inplace);
  8356. }
  8357. } break;
  8358. case GGML_OP_SUB:
  8359. {
  8360. if (src0->grad) {
  8361. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  8362. }
  8363. if (src1->grad) {
  8364. src1->grad = ggml_sub_impl(ctx, src1->grad, tensor->grad, inplace);
  8365. }
  8366. } break;
  8367. case GGML_OP_MUL:
  8368. {
  8369. if (src0->grad) {
  8370. src0->grad =
  8371. ggml_add_impl(ctx,
  8372. src0->grad,
  8373. ggml_mul(ctx, src1, tensor->grad),
  8374. inplace);
  8375. }
  8376. if (src1->grad) {
  8377. src1->grad =
  8378. ggml_add_impl(ctx,
  8379. src1->grad,
  8380. ggml_mul(ctx, src0, tensor->grad),
  8381. inplace);
  8382. }
  8383. } break;
  8384. case GGML_OP_DIV:
  8385. {
  8386. if (src0->grad) {
  8387. src0->grad =
  8388. ggml_add_impl(ctx,
  8389. src0->grad,
  8390. ggml_div(ctx, tensor->grad, src1),
  8391. inplace);
  8392. }
  8393. if (src1->grad) {
  8394. src1->grad =
  8395. ggml_sub_impl(ctx,
  8396. src1->grad,
  8397. ggml_mul(ctx,
  8398. tensor->grad,
  8399. ggml_div(ctx, tensor, src1)),
  8400. inplace);
  8401. }
  8402. } break;
  8403. case GGML_OP_SQR:
  8404. {
  8405. if (src0->grad) {
  8406. src0->grad =
  8407. ggml_add_impl(ctx,
  8408. src0->grad,
  8409. ggml_mul(ctx,
  8410. ggml_mul(ctx, src0, tensor->grad),
  8411. ggml_repeat(ctx, ggml_new_f32(ctx, 2.0f), src0)),
  8412. inplace);
  8413. }
  8414. } break;
  8415. case GGML_OP_SQRT:
  8416. {
  8417. if (src0->grad) {
  8418. src0->grad =
  8419. ggml_add_impl(ctx,
  8420. src0->grad,
  8421. ggml_div(ctx,
  8422. ggml_repeat(ctx, ggml_new_f32(ctx, 0.5f), tensor),
  8423. tensor),
  8424. inplace);
  8425. }
  8426. } break;
  8427. case GGML_OP_SUM:
  8428. {
  8429. if (src0->grad) {
  8430. src0->grad =
  8431. ggml_add_impl(ctx,
  8432. src0->grad,
  8433. ggml_repeat(ctx, tensor->grad, src0->grad),
  8434. inplace);
  8435. }
  8436. } break;
  8437. case GGML_OP_MEAN:
  8438. {
  8439. GGML_ASSERT(false); // TODO: implement
  8440. } break;
  8441. case GGML_OP_REPEAT:
  8442. {
  8443. if (src0->grad) {
  8444. src0->grad =
  8445. ggml_add_impl(ctx,
  8446. src0->grad,
  8447. ggml_sum(ctx, tensor->grad),
  8448. inplace);
  8449. }
  8450. } break;
  8451. case GGML_OP_ABS:
  8452. {
  8453. if (src0->grad) {
  8454. src0->grad =
  8455. ggml_add_impl(ctx,
  8456. src0->grad,
  8457. ggml_mul(ctx,
  8458. ggml_sgn(ctx, src0),
  8459. tensor->grad),
  8460. inplace);
  8461. }
  8462. } break;
  8463. case GGML_OP_SGN:
  8464. {
  8465. if (src0->grad) {
  8466. // noop
  8467. }
  8468. } break;
  8469. case GGML_OP_NEG:
  8470. {
  8471. if (src0->grad) {
  8472. src0->grad = ggml_sub_impl(ctx, src0->grad, tensor->grad, inplace);
  8473. }
  8474. } break;
  8475. case GGML_OP_STEP:
  8476. {
  8477. if (src0->grad) {
  8478. // noop
  8479. }
  8480. } break;
  8481. case GGML_OP_RELU:
  8482. {
  8483. if (src0->grad) {
  8484. src0->grad = ggml_sub_impl(ctx,
  8485. src0->grad,
  8486. ggml_mul(ctx,
  8487. ggml_step(ctx, src0),
  8488. tensor->grad),
  8489. inplace);
  8490. }
  8491. } break;
  8492. case GGML_OP_GELU:
  8493. {
  8494. GGML_ASSERT(false); // TODO: not implemented
  8495. } break;
  8496. case GGML_OP_SILU:
  8497. {
  8498. GGML_ASSERT(false); // TODO: not implemented
  8499. } break;
  8500. case GGML_OP_NORM:
  8501. {
  8502. GGML_ASSERT(false); // TODO: not implemented
  8503. } break;
  8504. case GGML_OP_RMS_NORM:
  8505. {
  8506. GGML_ASSERT(false); // TODO: not implemented
  8507. } break;
  8508. case GGML_OP_MUL_MAT:
  8509. {
  8510. if (src0->grad) {
  8511. // TODO: this requires outer product - ggml_out_prod(ctx, src1, tensor->grad);
  8512. GGML_ASSERT(false);
  8513. }
  8514. if (src1->grad) {
  8515. src1->grad =
  8516. ggml_add_impl(ctx,
  8517. src1->grad,
  8518. ggml_mul_mat(ctx,
  8519. ggml_cont(ctx, ggml_transpose(ctx, src0)),
  8520. tensor->grad),
  8521. inplace);
  8522. }
  8523. } break;
  8524. case GGML_OP_SCALE:
  8525. {
  8526. GGML_ASSERT(false); // TODO: not implemented
  8527. } break;
  8528. case GGML_OP_CPY:
  8529. {
  8530. GGML_ASSERT(false); // TODO: not implemented
  8531. } break;
  8532. case GGML_OP_CONT:
  8533. {
  8534. GGML_ASSERT(false); // TODO: not implemented
  8535. } break;
  8536. case GGML_OP_RESHAPE:
  8537. {
  8538. GGML_ASSERT(false); // TODO: not implemented
  8539. } break;
  8540. case GGML_OP_VIEW:
  8541. {
  8542. GGML_ASSERT(false); // not supported
  8543. } break;
  8544. case GGML_OP_PERMUTE:
  8545. {
  8546. GGML_ASSERT(false); // TODO: not implemented
  8547. } break;
  8548. case GGML_OP_TRANSPOSE:
  8549. {
  8550. GGML_ASSERT(false); // TODO: not implemented
  8551. } break;
  8552. case GGML_OP_GET_ROWS:
  8553. {
  8554. GGML_ASSERT(false); // TODO: not implemented
  8555. } break;
  8556. case GGML_OP_DIAG_MASK_INF:
  8557. {
  8558. GGML_ASSERT(false); // TODO: not implemented
  8559. } break;
  8560. case GGML_OP_SOFT_MAX:
  8561. {
  8562. GGML_ASSERT(false); // TODO: not implemented
  8563. } break;
  8564. case GGML_OP_ROPE:
  8565. {
  8566. GGML_ASSERT(false); // TODO: not implemented
  8567. } break;
  8568. case GGML_OP_CONV_1D_1S:
  8569. {
  8570. GGML_ASSERT(false); // TODO: not implemented
  8571. } break;
  8572. case GGML_OP_CONV_1D_2S:
  8573. {
  8574. GGML_ASSERT(false); // TODO: not implemented
  8575. } break;
  8576. case GGML_OP_FLASH_ATTN:
  8577. {
  8578. GGML_ASSERT(false); // not supported
  8579. } break;
  8580. case GGML_OP_FLASH_FF:
  8581. {
  8582. GGML_ASSERT(false); // not supported
  8583. } break;
  8584. case GGML_OP_MAP_UNARY:
  8585. case GGML_OP_MAP_BINARY:
  8586. {
  8587. GGML_ASSERT(false); // not supported
  8588. } break;
  8589. case GGML_OP_NONE:
  8590. {
  8591. // nop
  8592. } break;
  8593. case GGML_OP_COUNT:
  8594. {
  8595. GGML_ASSERT(false);
  8596. } break;
  8597. }
  8598. }
  8599. static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) {
  8600. if (node->grad == NULL) {
  8601. // this usually happens when we generate intermediate nodes from constants in the backward pass
  8602. // it can also happen during forward pass, if the user performs computations with constants
  8603. if (node->op != GGML_OP_NONE) {
  8604. //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op);
  8605. }
  8606. }
  8607. // check if already visited
  8608. for (int i = 0; i < cgraph->n_nodes; i++) {
  8609. if (cgraph->nodes[i] == node) {
  8610. return;
  8611. }
  8612. }
  8613. for (int i = 0; i < cgraph->n_leafs; i++) {
  8614. if (cgraph->leafs[i] == node) {
  8615. return;
  8616. }
  8617. }
  8618. if (node->src0) {
  8619. ggml_visit_parents(cgraph, node->src0);
  8620. }
  8621. if (node->src1) {
  8622. ggml_visit_parents(cgraph, node->src1);
  8623. }
  8624. for (int i = 0; i < GGML_MAX_OPT; ++i) {
  8625. if (node->opt[i]) {
  8626. ggml_visit_parents(cgraph, node->opt[i]);
  8627. }
  8628. }
  8629. if (node->op == GGML_OP_NONE && node->grad == NULL) {
  8630. // reached a leaf node, not part of the gradient graph (e.g. a constant)
  8631. GGML_ASSERT(cgraph->n_leafs < GGML_MAX_NODES);
  8632. cgraph->leafs[cgraph->n_leafs] = node;
  8633. cgraph->n_leafs++;
  8634. } else {
  8635. GGML_ASSERT(cgraph->n_nodes < GGML_MAX_NODES);
  8636. cgraph->nodes[cgraph->n_nodes] = node;
  8637. cgraph->grads[cgraph->n_nodes] = node->grad;
  8638. cgraph->n_nodes++;
  8639. }
  8640. }
  8641. static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) {
  8642. if (!expand) {
  8643. cgraph->n_nodes = 0;
  8644. cgraph->n_leafs = 0;
  8645. }
  8646. const int n0 = cgraph->n_nodes;
  8647. UNUSED(n0);
  8648. ggml_visit_parents(cgraph, tensor);
  8649. const int n_new = cgraph->n_nodes - n0;
  8650. GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new);
  8651. if (n_new > 0) {
  8652. // the last added node should always be starting point
  8653. GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
  8654. }
  8655. }
  8656. void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
  8657. ggml_build_forward_impl(cgraph, tensor, true);
  8658. }
  8659. struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) {
  8660. struct ggml_cgraph result = {
  8661. /*.n_nodes =*/ 0,
  8662. /*.n_leafs =*/ 0,
  8663. /*.n_threads =*/ GGML_DEFAULT_N_THREADS,
  8664. /*.work_size =*/ 0,
  8665. /*.work =*/ NULL,
  8666. /*.nodes =*/ { NULL },
  8667. /*.grads =*/ { NULL },
  8668. /*.leafs =*/ { NULL },
  8669. /*.perf_runs =*/ 0,
  8670. /*.perf_cycles =*/ 0,
  8671. /*.perf_time_us =*/ 0,
  8672. };
  8673. ggml_build_forward_impl(&result, tensor, false);
  8674. return result;
  8675. }
  8676. struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) {
  8677. struct ggml_cgraph result = *gf;
  8678. GGML_ASSERT(gf->n_nodes > 0);
  8679. // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
  8680. if (keep) {
  8681. for (int i = 0; i < gf->n_nodes; i++) {
  8682. struct ggml_tensor * node = gf->nodes[i];
  8683. if (node->grad) {
  8684. node->grad = ggml_dup_tensor(ctx, node);
  8685. gf->grads[i] = node->grad;
  8686. }
  8687. }
  8688. }
  8689. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  8690. struct ggml_tensor * node = gf->nodes[i];
  8691. // because we detached the grad nodes from the original graph, we can afford inplace operations
  8692. if (node->grad) {
  8693. ggml_compute_backward(ctx, node, keep);
  8694. }
  8695. }
  8696. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  8697. struct ggml_tensor * node = gf->nodes[i];
  8698. if (node->is_param) {
  8699. GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
  8700. ggml_build_forward_impl(&result, node->grad, true);
  8701. }
  8702. }
  8703. return result;
  8704. }
  8705. //
  8706. // thread data
  8707. //
  8708. // synchronization is done via busy loops
  8709. // I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops
  8710. //
  8711. #ifdef __APPLE__
  8712. //#include <os/lock.h>
  8713. //
  8714. //typedef os_unfair_lock ggml_lock_t;
  8715. //
  8716. //#define ggml_lock_init(x) UNUSED(x)
  8717. //#define ggml_lock_destroy(x) UNUSED(x)
  8718. //#define ggml_lock_lock os_unfair_lock_lock
  8719. //#define ggml_lock_unlock os_unfair_lock_unlock
  8720. //
  8721. //#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT
  8722. typedef int ggml_lock_t;
  8723. #define ggml_lock_init(x) UNUSED(x)
  8724. #define ggml_lock_destroy(x) UNUSED(x)
  8725. #define ggml_lock_lock(x) UNUSED(x)
  8726. #define ggml_lock_unlock(x) UNUSED(x)
  8727. #define GGML_LOCK_INITIALIZER 0
  8728. typedef pthread_t ggml_thread_t;
  8729. #define ggml_thread_create pthread_create
  8730. #define ggml_thread_join pthread_join
  8731. #else
  8732. //typedef pthread_spinlock_t ggml_lock_t;
  8733. //#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE)
  8734. //#define ggml_lock_destroy pthread_spin_destroy
  8735. //#define ggml_lock_lock pthread_spin_lock
  8736. //#define ggml_lock_unlock pthread_spin_unlock
  8737. typedef int ggml_lock_t;
  8738. #define ggml_lock_init(x) UNUSED(x)
  8739. #define ggml_lock_destroy(x) UNUSED(x)
  8740. #define ggml_lock_lock(x) UNUSED(x)
  8741. #define ggml_lock_unlock(x) UNUSED(x)
  8742. #define GGML_LOCK_INITIALIZER 0
  8743. typedef pthread_t ggml_thread_t;
  8744. #define ggml_thread_create pthread_create
  8745. #define ggml_thread_join pthread_join
  8746. #endif
  8747. struct ggml_compute_state_shared {
  8748. ggml_lock_t spin;
  8749. int n_threads;
  8750. // synchronization primitives
  8751. atomic_int n_ready;
  8752. atomic_bool has_work;
  8753. atomic_bool stop; // stop all threads
  8754. };
  8755. struct ggml_compute_state {
  8756. ggml_thread_t thrd;
  8757. struct ggml_compute_params params;
  8758. struct ggml_tensor * node;
  8759. struct ggml_compute_state_shared * shared;
  8760. };
  8761. static thread_ret_t ggml_graph_compute_thread(void * data) {
  8762. struct ggml_compute_state * state = (struct ggml_compute_state *) data;
  8763. const int n_threads = state->shared->n_threads;
  8764. while (true) {
  8765. if (atomic_fetch_add(&state->shared->n_ready, 1) == n_threads - 1) {
  8766. atomic_store(&state->shared->has_work, false);
  8767. } else {
  8768. while (atomic_load(&state->shared->has_work)) {
  8769. if (atomic_load(&state->shared->stop)) {
  8770. return 0;
  8771. }
  8772. ggml_lock_lock (&state->shared->spin);
  8773. ggml_lock_unlock(&state->shared->spin);
  8774. }
  8775. }
  8776. atomic_fetch_sub(&state->shared->n_ready, 1);
  8777. // wait for work
  8778. while (!atomic_load(&state->shared->has_work)) {
  8779. if (atomic_load(&state->shared->stop)) {
  8780. return 0;
  8781. }
  8782. ggml_lock_lock (&state->shared->spin);
  8783. ggml_lock_unlock(&state->shared->spin);
  8784. }
  8785. // check if we should stop
  8786. if (atomic_load(&state->shared->stop)) {
  8787. break;
  8788. }
  8789. if (state->node) {
  8790. if (state->params.ith < state->params.nth) {
  8791. ggml_compute_forward(&state->params, state->node);
  8792. }
  8793. state->node = NULL;
  8794. } else {
  8795. break;
  8796. }
  8797. }
  8798. return 0;
  8799. }
  8800. void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) {
  8801. const int n_threads = cgraph->n_threads;
  8802. struct ggml_compute_state_shared state_shared = {
  8803. /*.spin =*/ GGML_LOCK_INITIALIZER,
  8804. /*.n_threads =*/ n_threads,
  8805. /*.n_ready =*/ 0,
  8806. /*.has_work =*/ false,
  8807. /*.stop =*/ false,
  8808. };
  8809. struct ggml_compute_state * workers = n_threads > 1 ? alloca(sizeof(struct ggml_compute_state)*(n_threads - 1)) : NULL;
  8810. // create thread pool
  8811. if (n_threads > 1) {
  8812. ggml_lock_init(&state_shared.spin);
  8813. atomic_store(&state_shared.has_work, true);
  8814. for (int j = 0; j < n_threads - 1; j++) {
  8815. workers[j] = (struct ggml_compute_state) {
  8816. .thrd = 0,
  8817. .params = {
  8818. .type = GGML_TASK_COMPUTE,
  8819. .ith = j + 1,
  8820. .nth = n_threads,
  8821. .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0,
  8822. .wdata = cgraph->work ? cgraph->work->data : NULL,
  8823. },
  8824. .node = NULL,
  8825. .shared = &state_shared,
  8826. };
  8827. int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]);
  8828. GGML_ASSERT(rc == 0);
  8829. UNUSED(rc);
  8830. }
  8831. }
  8832. // initialize tasks + work buffer
  8833. {
  8834. size_t work_size = 0;
  8835. // thread scheduling for the different operations
  8836. for (int i = 0; i < cgraph->n_nodes; i++) {
  8837. struct ggml_tensor * node = cgraph->nodes[i];
  8838. switch (node->op) {
  8839. case GGML_OP_CPY:
  8840. case GGML_OP_DUP:
  8841. {
  8842. node->n_tasks = n_threads;
  8843. size_t cur = 0;
  8844. if (ggml_is_quantized(node->type)) {
  8845. cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->ne[0] * n_threads;
  8846. }
  8847. work_size = MAX(work_size, cur);
  8848. } break;
  8849. case GGML_OP_ADD:
  8850. {
  8851. node->n_tasks = n_threads;
  8852. size_t cur = 0;
  8853. if (ggml_is_quantized(node->src0->type)) {
  8854. cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->src0->ne[0] * n_threads;
  8855. }
  8856. work_size = MAX(work_size, cur);
  8857. } break;
  8858. case GGML_OP_SUB:
  8859. case GGML_OP_MUL:
  8860. case GGML_OP_DIV:
  8861. case GGML_OP_SQR:
  8862. case GGML_OP_SQRT:
  8863. case GGML_OP_SUM:
  8864. case GGML_OP_MEAN:
  8865. case GGML_OP_REPEAT:
  8866. case GGML_OP_ABS:
  8867. case GGML_OP_SGN:
  8868. case GGML_OP_NEG:
  8869. case GGML_OP_STEP:
  8870. case GGML_OP_RELU:
  8871. {
  8872. node->n_tasks = 1;
  8873. } break;
  8874. case GGML_OP_GELU:
  8875. {
  8876. node->n_tasks = n_threads;
  8877. } break;
  8878. case GGML_OP_SILU:
  8879. {
  8880. node->n_tasks = n_threads;
  8881. } break;
  8882. case GGML_OP_NORM:
  8883. case GGML_OP_RMS_NORM:
  8884. {
  8885. node->n_tasks = n_threads;
  8886. } break;
  8887. case GGML_OP_MUL_MAT:
  8888. {
  8889. node->n_tasks = n_threads;
  8890. // TODO: use different scheduling for different matrix sizes
  8891. //const int nr0 = ggml_nrows(node->src0);
  8892. //const int nr1 = ggml_nrows(node->src1);
  8893. //node->n_tasks = MIN(n_threads, MAX(1, nr0/128));
  8894. //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks = %d\n", nr0, nr1, nr0*nr1, node->n_tasks);
  8895. size_t cur = 0;
  8896. if (node->src0->type == GGML_TYPE_F16 && node->src1->type == GGML_TYPE_F32) {
  8897. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8898. if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
  8899. node->n_tasks = 1; // TODO: this actually is doing nothing
  8900. // the threads are still spinning
  8901. cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
  8902. //printf("src0: ne0 = %d, ne1 = %d, ne = %d\n", node->src0->ne[0], node->src0->ne[1], node->src0->ne[0]*node->src0->ne[1]);
  8903. //printf("src1: ne0 = %d, ne1 = %d, ne = %d\n", node->src1->ne[0], node->src1->ne[1], node->src1->ne[0]*node->src1->ne[1]);
  8904. //printf("cur = %zu\n", cur);
  8905. } else {
  8906. cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1);
  8907. }
  8908. #else
  8909. cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1);
  8910. #endif
  8911. } else if (node->src0->type == GGML_TYPE_F32 && node->src1->type == GGML_TYPE_F32) {
  8912. cur = 0;
  8913. } else if (quantize_fns[node->src0->type].vec_dot_q && node->src1->type == GGML_TYPE_F32) {
  8914. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8915. if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
  8916. node->n_tasks = 1;
  8917. cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
  8918. } else
  8919. #endif
  8920. {
  8921. cur = GGML_TYPE_SIZE[GGML_TYPE_Q8_0]*ggml_nelements(node->src1)/GGML_BLCK_SIZE[GGML_TYPE_Q8_0];
  8922. }
  8923. } else {
  8924. GGML_ASSERT(false);
  8925. }
  8926. work_size = MAX(work_size, cur);
  8927. } break;
  8928. case GGML_OP_SCALE:
  8929. {
  8930. node->n_tasks = n_threads;
  8931. } break;
  8932. case GGML_OP_CONT:
  8933. case GGML_OP_RESHAPE:
  8934. case GGML_OP_VIEW:
  8935. case GGML_OP_PERMUTE:
  8936. case GGML_OP_TRANSPOSE:
  8937. case GGML_OP_GET_ROWS:
  8938. case GGML_OP_DIAG_MASK_INF:
  8939. {
  8940. node->n_tasks = 1;
  8941. } break;
  8942. case GGML_OP_SOFT_MAX:
  8943. {
  8944. node->n_tasks = n_threads;
  8945. } break;
  8946. case GGML_OP_ROPE:
  8947. {
  8948. node->n_tasks = n_threads;
  8949. } break;
  8950. case GGML_OP_CONV_1D_1S:
  8951. case GGML_OP_CONV_1D_2S:
  8952. {
  8953. node->n_tasks = n_threads;
  8954. GGML_ASSERT(node->src0->ne[3] == 1);
  8955. GGML_ASSERT(node->src1->ne[2] == 1);
  8956. GGML_ASSERT(node->src1->ne[3] == 1);
  8957. size_t cur = 0;
  8958. const int nk = node->src0->ne[0];
  8959. if (node->src0->type == GGML_TYPE_F16 &&
  8960. node->src1->type == GGML_TYPE_F32) {
  8961. cur = sizeof(ggml_fp16_t)*(
  8962. nk*ggml_up32(node->src0->ne[1])*node->src0->ne[2] +
  8963. ( 2*(nk/2) + node->src1->ne[0])*node->src1->ne[1]
  8964. );
  8965. } else if (node->src0->type == GGML_TYPE_F32 &&
  8966. node->src1->type == GGML_TYPE_F32) {
  8967. cur = sizeof(float)*(
  8968. nk*ggml_up32(node->src0->ne[1])*node->src0->ne[2] +
  8969. ( 2*(nk/2) + node->src1->ne[0])*node->src1->ne[1]
  8970. );
  8971. } else {
  8972. GGML_ASSERT(false);
  8973. }
  8974. work_size = MAX(work_size, cur);
  8975. } break;
  8976. case GGML_OP_FLASH_ATTN:
  8977. {
  8978. node->n_tasks = n_threads;
  8979. size_t cur = 0;
  8980. const int64_t ne11 = ggml_up(node->src1->ne[1], GGML_SOFT_MAX_UNROLL);
  8981. if (node->src1->type == GGML_TYPE_F32) {
  8982. cur = sizeof(float)*ne11*node->n_tasks; // TODO: this can become (n_tasks-1)
  8983. cur += sizeof(float)*ne11*node->n_tasks; // this is overestimated by x2
  8984. }
  8985. if (node->src1->type == GGML_TYPE_F16) {
  8986. cur = sizeof(float)*ne11*node->n_tasks; // TODO: this can become (n_tasks-1)
  8987. cur += sizeof(float)*ne11*node->n_tasks; // this is overestimated by x2
  8988. }
  8989. work_size = MAX(work_size, cur);
  8990. } break;
  8991. case GGML_OP_FLASH_FF:
  8992. {
  8993. node->n_tasks = n_threads;
  8994. size_t cur = 0;
  8995. if (node->src1->type == GGML_TYPE_F32) {
  8996. cur = sizeof(float)*node->src1->ne[1]*node->n_tasks; // TODO: this can become (n_tasks-1)
  8997. cur += sizeof(float)*node->src1->ne[1]*node->n_tasks; // this is overestimated by x2
  8998. }
  8999. if (node->src1->type == GGML_TYPE_F16) {
  9000. cur = sizeof(float)*node->src1->ne[1]*node->n_tasks; // TODO: this can become (n_tasks-1)
  9001. cur += sizeof(float)*node->src1->ne[1]*node->n_tasks; // this is overestimated by x2
  9002. }
  9003. work_size = MAX(work_size, cur);
  9004. } break;
  9005. case GGML_OP_MAP_UNARY:
  9006. case GGML_OP_MAP_BINARY:
  9007. {
  9008. node->n_tasks = 1;
  9009. } break;
  9010. case GGML_OP_NONE:
  9011. {
  9012. node->n_tasks = 1;
  9013. } break;
  9014. case GGML_OP_COUNT:
  9015. {
  9016. GGML_ASSERT(false);
  9017. } break;
  9018. }
  9019. }
  9020. if (cgraph->work != NULL && work_size > cgraph->work_size) {
  9021. GGML_ASSERT(false); // TODO: better handling
  9022. }
  9023. if (work_size > 0 && cgraph->work == NULL) {
  9024. cgraph->work_size = work_size + CACHE_LINE_SIZE*(n_threads - 1);
  9025. GGML_PRINT_DEBUG("%s: allocating work buffer for graph (%zu bytes)\n", __func__, cgraph->work_size);
  9026. cgraph->work = ggml_new_tensor_1d(ctx, GGML_TYPE_I8, cgraph->work_size);
  9027. }
  9028. }
  9029. const int64_t perf_start_cycles = ggml_perf_cycles();
  9030. const int64_t perf_start_time_us = ggml_perf_time_us();
  9031. for (int i = 0; i < cgraph->n_nodes; i++) {
  9032. GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, i, cgraph->n_nodes);
  9033. struct ggml_tensor * node = cgraph->nodes[i];
  9034. // TODO: this could be used to avoid unnecessary computations, but it needs to be improved
  9035. //if (node->grad == NULL && node->perf_runs > 0) {
  9036. // continue;
  9037. //}
  9038. const int64_t perf_node_start_cycles = ggml_perf_cycles();
  9039. const int64_t perf_node_start_time_us = ggml_perf_time_us();
  9040. // INIT
  9041. struct ggml_compute_params params = {
  9042. /*.type =*/ GGML_TASK_INIT,
  9043. /*.ith =*/ 0,
  9044. /*.nth =*/ node->n_tasks,
  9045. /*.wsize =*/ cgraph->work ? ggml_nbytes(cgraph->work) : 0,
  9046. /*.wdata =*/ cgraph->work ? cgraph->work->data : NULL,
  9047. };
  9048. ggml_compute_forward(&params, node);
  9049. // COMPUTE
  9050. if (node->n_tasks > 1) {
  9051. if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) {
  9052. atomic_store(&state_shared.has_work, false);
  9053. }
  9054. while (atomic_load(&state_shared.has_work)) {
  9055. ggml_lock_lock (&state_shared.spin);
  9056. ggml_lock_unlock(&state_shared.spin);
  9057. }
  9058. // launch thread pool
  9059. for (int j = 0; j < n_threads - 1; j++) {
  9060. workers[j].params = (struct ggml_compute_params) {
  9061. .type = GGML_TASK_COMPUTE,
  9062. .ith = j + 1,
  9063. .nth = node->n_tasks,
  9064. .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0,
  9065. .wdata = cgraph->work ? cgraph->work->data : NULL,
  9066. };
  9067. workers[j].node = node;
  9068. }
  9069. atomic_fetch_sub(&state_shared.n_ready, 1);
  9070. while (atomic_load(&state_shared.n_ready) > 0) {
  9071. ggml_lock_lock (&state_shared.spin);
  9072. ggml_lock_unlock(&state_shared.spin);
  9073. }
  9074. atomic_store(&state_shared.has_work, true);
  9075. }
  9076. params.type = GGML_TASK_COMPUTE;
  9077. ggml_compute_forward(&params, node);
  9078. // wait for thread pool
  9079. if (node->n_tasks > 1) {
  9080. if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) {
  9081. atomic_store(&state_shared.has_work, false);
  9082. }
  9083. while (atomic_load(&state_shared.has_work)) {
  9084. ggml_lock_lock (&state_shared.spin);
  9085. ggml_lock_unlock(&state_shared.spin);
  9086. }
  9087. atomic_fetch_sub(&state_shared.n_ready, 1);
  9088. while (atomic_load(&state_shared.n_ready) != 0) {
  9089. ggml_lock_lock (&state_shared.spin);
  9090. ggml_lock_unlock(&state_shared.spin);
  9091. }
  9092. }
  9093. // FINALIZE
  9094. if (node->n_tasks > 1) {
  9095. if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) {
  9096. atomic_store(&state_shared.has_work, false);
  9097. }
  9098. while (atomic_load(&state_shared.has_work)) {
  9099. ggml_lock_lock (&state_shared.spin);
  9100. ggml_lock_unlock(&state_shared.spin);
  9101. }
  9102. // launch thread pool
  9103. for (int j = 0; j < n_threads - 1; j++) {
  9104. workers[j].params = (struct ggml_compute_params) {
  9105. .type = GGML_TASK_FINALIZE,
  9106. .ith = j + 1,
  9107. .nth = node->n_tasks,
  9108. .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0,
  9109. .wdata = cgraph->work ? cgraph->work->data : NULL,
  9110. };
  9111. workers[j].node = node;
  9112. }
  9113. atomic_fetch_sub(&state_shared.n_ready, 1);
  9114. while (atomic_load(&state_shared.n_ready) > 0) {
  9115. ggml_lock_lock (&state_shared.spin);
  9116. ggml_lock_unlock(&state_shared.spin);
  9117. }
  9118. atomic_store(&state_shared.has_work, true);
  9119. }
  9120. params.type = GGML_TASK_FINALIZE;
  9121. ggml_compute_forward(&params, node);
  9122. // wait for thread pool
  9123. if (node->n_tasks > 1) {
  9124. if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) {
  9125. atomic_store(&state_shared.has_work, false);
  9126. }
  9127. while (atomic_load(&state_shared.has_work)) {
  9128. ggml_lock_lock (&state_shared.spin);
  9129. ggml_lock_unlock(&state_shared.spin);
  9130. }
  9131. atomic_fetch_sub(&state_shared.n_ready, 1);
  9132. while (atomic_load(&state_shared.n_ready) != 0) {
  9133. ggml_lock_lock (&state_shared.spin);
  9134. ggml_lock_unlock(&state_shared.spin);
  9135. }
  9136. }
  9137. // performance stats (node)
  9138. {
  9139. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_node_start_cycles;
  9140. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_node_start_time_us;
  9141. node->perf_runs++;
  9142. node->perf_cycles += perf_cycles_cur;
  9143. node->perf_time_us += perf_time_us_cur;
  9144. }
  9145. }
  9146. // join thread pool
  9147. if (n_threads > 1) {
  9148. atomic_store(&state_shared.stop, true);
  9149. atomic_store(&state_shared.has_work, true);
  9150. for (int j = 0; j < n_threads - 1; j++) {
  9151. int rc = ggml_thread_join(workers[j].thrd, NULL);
  9152. GGML_ASSERT(rc == 0);
  9153. UNUSED(rc);
  9154. }
  9155. ggml_lock_destroy(&state_shared.spin);
  9156. }
  9157. // performance stats (graph)
  9158. {
  9159. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles;
  9160. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us;
  9161. cgraph->perf_runs++;
  9162. cgraph->perf_cycles += perf_cycles_cur;
  9163. cgraph->perf_time_us += perf_time_us_cur;
  9164. GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n",
  9165. __func__, cgraph->perf_runs,
  9166. (double) perf_cycles_cur / (double) ggml_cycles_per_ms(),
  9167. (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs,
  9168. (double) perf_time_us_cur / 1000.0,
  9169. (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs);
  9170. }
  9171. }
  9172. void ggml_graph_reset(struct ggml_cgraph * cgraph) {
  9173. for (int i = 0; i < cgraph->n_nodes; i++) {
  9174. struct ggml_tensor * grad = cgraph->grads[i];
  9175. if (grad) {
  9176. ggml_set_zero(grad);
  9177. }
  9178. }
  9179. }
  9180. void ggml_graph_print(const struct ggml_cgraph * cgraph) {
  9181. int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0};
  9182. GGML_PRINT("=== GRAPH ===\n");
  9183. GGML_PRINT_DEBUG("n_threads = %d\n", cgraph->n_threads);
  9184. GGML_PRINT_DEBUG("total work size = %zu bytes\n", cgraph->work_size);
  9185. GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
  9186. for (int i = 0; i < cgraph->n_nodes; i++) {
  9187. struct ggml_tensor * node = cgraph->nodes[i];
  9188. perf_total_per_op_us[node->op] += node->perf_time_us;
  9189. GGML_PRINT(" - %3d: [ %" PRId64 ", %" PRId64 ", %" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n",
  9190. i,
  9191. node->ne[0], node->ne[1], node->ne[2],
  9192. GGML_OP_LABEL[node->op], node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs,
  9193. (double) node->perf_cycles / (double) ggml_cycles_per_ms(),
  9194. (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs,
  9195. (double) node->perf_time_us / 1000.0,
  9196. (double) node->perf_time_us / 1000.0 / node->perf_runs);
  9197. }
  9198. GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs);
  9199. for (int i = 0; i < cgraph->n_leafs; i++) {
  9200. struct ggml_tensor * node = cgraph->leafs[i];
  9201. GGML_PRINT(" - %3d: [ %" PRId64 ", %" PRId64 "] %8s\n",
  9202. i,
  9203. node->ne[0], node->ne[1],
  9204. GGML_OP_LABEL[node->op]);
  9205. }
  9206. for (int i = 0; i < GGML_OP_COUNT; i++) {
  9207. GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", GGML_OP_LABEL[i], (double) perf_total_per_op_us[i] / 1000.0);
  9208. }
  9209. GGML_PRINT("========================================\n");
  9210. }
  9211. // check if node is part of the graph
  9212. static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  9213. if (cgraph == NULL) {
  9214. return true;
  9215. }
  9216. for (int i = 0; i < cgraph->n_nodes; i++) {
  9217. if (cgraph->nodes[i] == node) {
  9218. return true;
  9219. }
  9220. }
  9221. return false;
  9222. }
  9223. static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  9224. for (int i = 0; i < cgraph->n_nodes; i++) {
  9225. struct ggml_tensor * parent = cgraph->nodes[i];
  9226. if (parent->grad == node) {
  9227. return parent;
  9228. }
  9229. }
  9230. return NULL;
  9231. }
  9232. void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) {
  9233. char color[16];
  9234. FILE * fp = fopen(filename, "w");
  9235. GGML_ASSERT(fp);
  9236. fprintf(fp, "digraph G {\n");
  9237. fprintf(fp, " newrank = true;\n");
  9238. fprintf(fp, " rankdir = LR;\n");
  9239. for (int i = 0; i < gb->n_nodes; i++) {
  9240. struct ggml_tensor * node = gb->nodes[i];
  9241. if (ggml_graph_get_parent(gb, node) != NULL) {
  9242. continue;
  9243. }
  9244. if (node->is_param) {
  9245. snprintf(color, sizeof(color), "yellow");
  9246. } else if (node->grad) {
  9247. if (ggml_graph_find(gf, node)) {
  9248. snprintf(color, sizeof(color), "green");
  9249. } else {
  9250. snprintf(color, sizeof(color), "lightblue");
  9251. }
  9252. } else {
  9253. snprintf(color, sizeof(color), "white");
  9254. }
  9255. fprintf(fp, " \"%p\" [ \
  9256. style = filled; fillcolor = %s; shape = record; \
  9257. label=\"%d [%" PRId64 ", %" PRId64 "] | <x>%s",
  9258. (void *) node, color,
  9259. i, node->ne[0], node->ne[1],
  9260. GGML_OP_SYMBOL[node->op]);
  9261. if (node->grad) {
  9262. fprintf(fp, " | <g>%s\"; ]\n", GGML_OP_SYMBOL[node->grad->op]);
  9263. } else {
  9264. fprintf(fp, "\"; ]\n");
  9265. }
  9266. }
  9267. for (int i = 0; i < gb->n_leafs; i++) {
  9268. struct ggml_tensor * node = gb->leafs[i];
  9269. snprintf(color, sizeof(color), "pink");
  9270. if (ggml_nelements(node) == 1) {
  9271. fprintf(fp, " \"%p\" [ \
  9272. style = filled; fillcolor = %s; shape = record; \
  9273. label=\"<x>%.1e\"; ]\n",
  9274. (void *) node, color, (double)ggml_get_f32_1d(node, 0));
  9275. } else {
  9276. fprintf(fp, " \"%p\" [ \
  9277. style = filled; fillcolor = %s; shape = record; \
  9278. label=\"<x>CONST %d [%" PRId64 ", %" PRId64 "]\"; ]\n",
  9279. (void *) node, color,
  9280. i, node->ne[0], node->ne[1]);
  9281. }
  9282. }
  9283. for (int i = 0; i < gb->n_nodes; i++) {
  9284. struct ggml_tensor * node = gb->nodes[i];
  9285. struct ggml_tensor * parent = ggml_graph_get_parent(gb, node);
  9286. if (node->src0) {
  9287. struct ggml_tensor * parent0 = ggml_graph_get_parent(gb, node->src0);
  9288. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"x\"; ]\n",
  9289. parent0 ? (void *) parent0 : (void *) node->src0,
  9290. parent0 ? "g" : "x",
  9291. parent ? (void *) parent : (void *) node,
  9292. parent ? "g" : "x",
  9293. parent ? "empty" : "vee",
  9294. parent ? "dashed" : "solid");
  9295. }
  9296. if (node->src1) {
  9297. struct ggml_tensor * parent1 = ggml_graph_get_parent(gb, node->src1);
  9298. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"y\"; ]\n",
  9299. parent1 ? (void *) parent1 : (void *) node->src1,
  9300. parent1 ? "g" : "x",
  9301. parent ? (void *) parent : (void *) node,
  9302. parent ? "g" : "x",
  9303. parent ? "empty" : "vee",
  9304. parent ? "dashed" : "solid");
  9305. }
  9306. }
  9307. for (int i = 0; i < gb->n_leafs; i++) {
  9308. struct ggml_tensor * node = gb->leafs[i];
  9309. if (node->src0) {
  9310. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"x\"; ]\n",
  9311. (void *) node->src0, "x",
  9312. (void *) node, "x");
  9313. }
  9314. if (node->src1) {
  9315. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"y\"; ]\n",
  9316. (void *) node->src1, "x",
  9317. (void *) node, "x");
  9318. }
  9319. }
  9320. fprintf(fp, "}\n");
  9321. fclose(fp);
  9322. GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename);
  9323. }
  9324. ////////////////////////////////////////////////////////////////////////////////
  9325. static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) {
  9326. int i = 0;
  9327. for (int p = 0; p < np; ++p) {
  9328. const int64_t ne = ggml_nelements(ps[p]) ;
  9329. // TODO: add function to set tensor from array
  9330. for (int64_t j = 0; j < ne; ++j) {
  9331. ggml_set_f32_1d(ps[p], j, x[i++]);
  9332. }
  9333. }
  9334. }
  9335. static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) {
  9336. int i = 0;
  9337. for (int p = 0; p < np; ++p) {
  9338. const int64_t ne = ggml_nelements(ps[p]) ;
  9339. // TODO: add function to get all elements at once
  9340. for (int64_t j = 0; j < ne; ++j) {
  9341. x[i++] = ggml_get_f32_1d(ps[p], j);
  9342. }
  9343. }
  9344. }
  9345. static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
  9346. int i = 0;
  9347. for (int p = 0; p < np; ++p) {
  9348. const int64_t ne = ggml_nelements(ps[p]) ;
  9349. // TODO: add function to get all elements at once
  9350. for (int64_t j = 0; j < ne; ++j) {
  9351. g[i++] = ggml_get_f32_1d(ps[p]->grad, j);
  9352. }
  9353. }
  9354. }
  9355. //
  9356. // ADAM
  9357. //
  9358. // ref: https://arxiv.org/pdf/1412.6980.pdf
  9359. //
  9360. static enum ggml_opt_result ggml_opt_adam(
  9361. struct ggml_context * ctx,
  9362. struct ggml_opt_params params,
  9363. struct ggml_tensor * f,
  9364. struct ggml_cgraph * gf,
  9365. struct ggml_cgraph * gb) {
  9366. GGML_ASSERT(ggml_is_scalar(f));
  9367. gf->n_threads = params.n_threads;
  9368. gb->n_threads = params.n_threads;
  9369. // these will store the parameters we want to optimize
  9370. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  9371. int np = 0;
  9372. int nx = 0;
  9373. for (int i = 0; i < gf->n_nodes; ++i) {
  9374. if (gf->nodes[i]->is_param) {
  9375. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  9376. GGML_ASSERT(np < GGML_MAX_PARAMS);
  9377. ps[np++] = gf->nodes[i];
  9378. nx += ggml_nelements(gf->nodes[i]);
  9379. }
  9380. }
  9381. // constants
  9382. const float alpha = params.adam.alpha;
  9383. const float beta1 = params.adam.beta1;
  9384. const float beta2 = params.adam.beta2;
  9385. const float eps = params.adam.eps;
  9386. float * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // view of the parameters
  9387. float * g1 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // gradient
  9388. float * g2 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // gradient squared
  9389. float * m = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // first moment
  9390. float * v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // second moment
  9391. float * mh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // first moment hat
  9392. float * vh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // second moment hat
  9393. float * pf = params.past > 0 ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)->data : NULL; // past function values
  9394. // initialize
  9395. ggml_vec_set_f32(nx, m, 0.0f);
  9396. ggml_vec_set_f32(nx, v, 0.0f);
  9397. // update view
  9398. ggml_opt_get_params(np, ps, x);
  9399. // compute the function value
  9400. ggml_graph_reset (gf);
  9401. ggml_set_f32 (f->grad, 1.0f);
  9402. ggml_graph_compute(ctx, gb);
  9403. float fx_prev = ggml_get_f32_1d(f, 0);
  9404. if (pf) {
  9405. pf[0] = fx_prev;
  9406. }
  9407. int n_no_improvement = 0;
  9408. float fx_best = fx_prev;
  9409. // run the optimizer
  9410. for (int t = 0; t < params.adam.n_iter; ++t) {
  9411. GGML_PRINT_DEBUG ("=== iter %d ===\n", t);
  9412. GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  9413. GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0));
  9414. GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0));
  9415. for (int i = 0; i < np; ++i) {
  9416. GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i,
  9417. ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0));
  9418. }
  9419. const int64_t t_start_wall = ggml_time_us();
  9420. const int64_t t_start_cpu = ggml_cycles();
  9421. UNUSED(t_start_wall);
  9422. UNUSED(t_start_cpu);
  9423. {
  9424. // update the gradient
  9425. ggml_opt_get_grad(np, ps, g1);
  9426. // m_t = beta1*m_t-1 + (1 - beta1)*g_t
  9427. ggml_vec_scale_f32(nx, m, beta1);
  9428. ggml_vec_mad_f32 (nx, m, g1, 1.0f - beta1);
  9429. // g2 = g1^2
  9430. ggml_vec_sqr_f32 (nx, g2, g1);
  9431. // v_t = beta2*v_t-1 + (1 - beta2)*g_t^2
  9432. ggml_vec_scale_f32(nx, v, beta2);
  9433. ggml_vec_mad_f32 (nx, v, g2, 1.0f - beta2);
  9434. // m^hat = m_t / (1 - beta1^t)
  9435. // v^hat = v_t / (1 - beta2^t)
  9436. // x_t = x_t-1 - alpha*m^hat/(sqrt(v^hat) + eps)
  9437. ggml_vec_cpy_f32 (nx, mh, m);
  9438. ggml_vec_cpy_f32 (nx, vh, v);
  9439. ggml_vec_scale_f32(nx, mh, alpha/(1.0f - powf(beta1, t + 1)));
  9440. ggml_vec_scale_f32(nx, vh, 1.0f/(1.0f - powf(beta2, t + 1)));
  9441. ggml_vec_sqrt_f32 (nx, vh, vh);
  9442. ggml_vec_acc1_f32 (nx, vh, eps);
  9443. ggml_vec_div_f32 (nx, mh, mh, vh);
  9444. ggml_vec_sub_f32 (nx, x, x, mh);
  9445. // update the parameters
  9446. ggml_opt_set_params(np, ps, x);
  9447. }
  9448. ggml_graph_reset (gf);
  9449. ggml_set_f32 (f->grad, 1.0f);
  9450. ggml_graph_compute(ctx, gb);
  9451. const float fx = ggml_get_f32_1d(f, 0);
  9452. // check convergence
  9453. if (fabsf(fx - fx_prev)/fx < params.adam.eps_f) {
  9454. GGML_PRINT_DEBUG("converged\n");
  9455. return GGML_OPT_OK;
  9456. }
  9457. // delta-based convergence test
  9458. if (pf != NULL) {
  9459. // need at least params.past iterations to start checking for convergence
  9460. if (params.past <= t) {
  9461. const float rate = (pf[t%params.past] - fx)/fx;
  9462. if (fabsf(rate) < params.delta) {
  9463. return GGML_OPT_OK;
  9464. }
  9465. }
  9466. pf[t%params.past] = fx;
  9467. }
  9468. // check for improvement
  9469. if (params.max_no_improvement > 0) {
  9470. if (fx_best > fx) {
  9471. fx_best = fx;
  9472. n_no_improvement = 0;
  9473. } else {
  9474. ++n_no_improvement;
  9475. if (n_no_improvement >= params.max_no_improvement) {
  9476. return GGML_OPT_OK;
  9477. }
  9478. }
  9479. }
  9480. fx_prev = fx;
  9481. {
  9482. const int64_t t_end_cpu = ggml_cycles();
  9483. GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC);
  9484. UNUSED(t_end_cpu);
  9485. const int64_t t_end_wall = ggml_time_us();
  9486. GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6);
  9487. UNUSED(t_end_wall);
  9488. }
  9489. }
  9490. return GGML_OPT_DID_NOT_CONVERGE;
  9491. }
  9492. //
  9493. // L-BFGS
  9494. //
  9495. // the L-BFGS implementation below is based on the following implementation:
  9496. //
  9497. // https://github.com/chokkan/liblbfgs
  9498. //
  9499. struct ggml_lbfgs_iteration_data {
  9500. float alpha;
  9501. float ys;
  9502. float * s;
  9503. float * y;
  9504. };
  9505. static enum ggml_opt_result linesearch_backtracking(
  9506. struct ggml_context * ctx,
  9507. const struct ggml_opt_params * params,
  9508. int nx,
  9509. float * x,
  9510. float * fx,
  9511. float * g,
  9512. float * d,
  9513. float * step,
  9514. const float * xp,
  9515. struct ggml_tensor * f,
  9516. struct ggml_cgraph * gf,
  9517. struct ggml_cgraph * gb,
  9518. const int np,
  9519. struct ggml_tensor * ps[]) {
  9520. int count = 0;
  9521. float width = 0.0f;
  9522. float dg = 0.0f;
  9523. float finit = 0.0f;
  9524. float dginit = 0.0f;
  9525. float dgtest = 0.0f;
  9526. const float dec = 0.5f;
  9527. const float inc = 2.1f;
  9528. if (*step <= 0.f) {
  9529. return GGML_LINESEARCH_INVALID_PARAMETERS;
  9530. }
  9531. // compute the initial gradient in the search direction
  9532. ggml_vec_dot_f32(nx, &dginit, g, d);
  9533. // make sure that d points to a descent direction
  9534. if (0 < dginit) {
  9535. return GGML_LINESEARCH_FAIL;
  9536. }
  9537. // initialize local variables
  9538. finit = *fx;
  9539. dgtest = params->lbfgs.ftol*dginit;
  9540. while (true) {
  9541. ggml_vec_cpy_f32(nx, x, xp);
  9542. ggml_vec_mad_f32(nx, x, d, *step);
  9543. // evaluate the function and gradient values
  9544. {
  9545. ggml_opt_set_params(np, ps, x);
  9546. ggml_graph_reset (gf);
  9547. ggml_set_f32 (f->grad, 1.0f);
  9548. ggml_graph_compute(ctx, gb);
  9549. ggml_opt_get_grad(np, ps, g);
  9550. *fx = ggml_get_f32_1d(f, 0);
  9551. }
  9552. ++count;
  9553. if (*fx > finit + (*step)*dgtest) {
  9554. width = dec;
  9555. } else {
  9556. // Armijo condition is satisfied
  9557. if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) {
  9558. return count;
  9559. }
  9560. ggml_vec_dot_f32(nx, &dg, g, d);
  9561. // check the Wolfe condition
  9562. if (dg < params->lbfgs.wolfe * dginit) {
  9563. width = inc;
  9564. } else {
  9565. if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) {
  9566. // regular Wolfe conditions
  9567. return count;
  9568. }
  9569. if(dg > -params->lbfgs.wolfe*dginit) {
  9570. width = dec;
  9571. } else {
  9572. // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE)
  9573. return count;
  9574. }
  9575. return count;
  9576. }
  9577. }
  9578. if (*step < params->lbfgs.min_step) {
  9579. return GGML_LINESEARCH_MINIMUM_STEP;
  9580. }
  9581. if (*step > params->lbfgs.max_step) {
  9582. return GGML_LINESEARCH_MAXIMUM_STEP;
  9583. }
  9584. if (params->lbfgs.max_linesearch <= count) {
  9585. return GGML_LINESEARCH_MAXIMUM_ITERATIONS;
  9586. }
  9587. (*step) *= width;
  9588. }
  9589. return GGML_LINESEARCH_FAIL;
  9590. }
  9591. static enum ggml_opt_result ggml_opt_lbfgs(
  9592. struct ggml_context * ctx,
  9593. struct ggml_opt_params params,
  9594. struct ggml_tensor * f,
  9595. struct ggml_cgraph * gf,
  9596. struct ggml_cgraph * gb) {
  9597. if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
  9598. params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
  9599. if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
  9600. return GGML_OPT_INVALID_WOLFE;
  9601. }
  9602. }
  9603. gf->n_threads = params.n_threads;
  9604. gb->n_threads = params.n_threads;
  9605. const int m = params.lbfgs.m;
  9606. // these will store the parameters we want to optimize
  9607. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  9608. int np = 0;
  9609. int nx = 0;
  9610. for (int i = 0; i < gf->n_nodes; ++i) {
  9611. if (gf->nodes[i]->is_param) {
  9612. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  9613. GGML_ASSERT(np < GGML_MAX_PARAMS);
  9614. ps[np++] = gf->nodes[i];
  9615. nx += ggml_nelements(gf->nodes[i]);
  9616. }
  9617. }
  9618. float * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // current parameters
  9619. float * xp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // previous parameters
  9620. float * g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // current gradient
  9621. float * gp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // previous gradient
  9622. float * d = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // search direction
  9623. float * pf = params.past > 0 ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)->data : NULL; // past function values
  9624. float fx = 0.0f; // cost function value
  9625. float xnorm = 0.0f; // ||x||
  9626. float gnorm = 0.0f; // ||g||
  9627. float step = 0.0f;
  9628. // initialize x from the graph nodes
  9629. ggml_opt_get_params(np, ps, x);
  9630. // the L-BFGS memory
  9631. struct ggml_lbfgs_iteration_data * lm = alloca(sizeof(struct ggml_lbfgs_iteration_data)*m);
  9632. for (int i = 0; i < m; ++i) {
  9633. lm[i].alpha = 0.0f;
  9634. lm[i].ys = 0.0f;
  9635. lm[i].s = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data;
  9636. lm[i].y = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data;
  9637. }
  9638. // evaluate the function value and its gradient
  9639. {
  9640. ggml_opt_set_params(np, ps, x);
  9641. ggml_graph_reset (gf);
  9642. ggml_set_f32 (f->grad, 1.0f);
  9643. ggml_graph_compute(ctx, gb);
  9644. ggml_opt_get_grad(np, ps, g);
  9645. fx = ggml_get_f32_1d(f, 0);
  9646. }
  9647. if (pf) {
  9648. pf[0] = fx;
  9649. }
  9650. float fx_best = fx;
  9651. // search direction = -gradient
  9652. ggml_vec_neg_f32(nx, d, g);
  9653. // ||x||, ||g||
  9654. ggml_vec_norm_f32(nx, &xnorm, x);
  9655. ggml_vec_norm_f32(nx, &gnorm, g);
  9656. if (xnorm < 1.0f) {
  9657. xnorm = 1.0f;
  9658. }
  9659. // already optimized
  9660. if (gnorm/xnorm <= params.lbfgs.eps) {
  9661. return GGML_OPT_OK;
  9662. }
  9663. // initial step
  9664. ggml_vec_norm_inv_f32(nx, &step, d);
  9665. int j = 0;
  9666. int k = 1;
  9667. int ls = 0;
  9668. int end = 0;
  9669. int bound = 0;
  9670. int n_no_improvement = 0;
  9671. float ys = 0.0f;
  9672. float yy = 0.0f;
  9673. float beta = 0.0f;
  9674. while (true) {
  9675. // store the current position and gradient vectors
  9676. ggml_vec_cpy_f32(nx, xp, x);
  9677. ggml_vec_cpy_f32(nx, gp, g);
  9678. ls = linesearch_backtracking(ctx, &params, nx, x, &fx, g, d, &step, xp, f, gf, gb, np, ps);
  9679. if (ls < 0) {
  9680. // linesearch failed - go back to the previous point and return
  9681. ggml_vec_cpy_f32(nx, x, xp);
  9682. ggml_vec_cpy_f32(nx, g, gp);
  9683. return ls;
  9684. }
  9685. ggml_vec_norm_f32(nx, &xnorm, x);
  9686. ggml_vec_norm_f32(nx, &gnorm, g);
  9687. GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  9688. if (xnorm < 1.0f) {
  9689. xnorm = 1.0f;
  9690. }
  9691. if (gnorm/xnorm <= params.lbfgs.eps) {
  9692. // converged
  9693. return GGML_OPT_OK;
  9694. }
  9695. // delta-based convergence test
  9696. if (pf != NULL) {
  9697. // need at least params.past iterations to start checking for convergence
  9698. if (params.past <= k) {
  9699. const float rate = (pf[k%params.past] - fx)/fx;
  9700. if (fabsf(rate) < params.delta) {
  9701. return GGML_OPT_OK;
  9702. }
  9703. }
  9704. pf[k%params.past] = fx;
  9705. }
  9706. // check for improvement
  9707. if (params.max_no_improvement > 0) {
  9708. if (fx < fx_best) {
  9709. fx_best = fx;
  9710. n_no_improvement = 0;
  9711. } else {
  9712. n_no_improvement++;
  9713. if (n_no_improvement >= params.max_no_improvement) {
  9714. return GGML_OPT_OK;
  9715. }
  9716. }
  9717. }
  9718. if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < k + 1) {
  9719. // reached the maximum number of iterations
  9720. return GGML_OPT_DID_NOT_CONVERGE;
  9721. }
  9722. // update vectors s and y:
  9723. // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}.
  9724. // y_{k+1} = g_{k+1} - g_{k}.
  9725. //
  9726. ggml_vec_sub_f32(nx, lm[end].s, x, xp);
  9727. ggml_vec_sub_f32(nx, lm[end].y, g, gp);
  9728. // compute scalars ys and yy:
  9729. // ys = y^t \cdot s -> 1 / \rho.
  9730. // yy = y^t \cdot y.
  9731. //
  9732. ggml_vec_dot_f32(nx, &ys, lm[end].y, lm[end].s);
  9733. ggml_vec_dot_f32(nx, &yy, lm[end].y, lm[end].y);
  9734. lm[end].ys = ys;
  9735. // find new search direction
  9736. // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS
  9737. bound = (m <= k) ? m : k;
  9738. k++;
  9739. end = (end + 1)%m;
  9740. // initialize search direction with -g
  9741. ggml_vec_neg_f32(nx, d, g);
  9742. j = end;
  9743. for (int i = 0; i < bound; ++i) {
  9744. j = (j + m - 1) % m;
  9745. // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}
  9746. ggml_vec_dot_f32(nx, &lm[j].alpha, lm[j].s, d);
  9747. lm[j].alpha /= lm[j].ys;
  9748. // q_{i} = q_{i+1} - \alpha_{i} y_{i}
  9749. ggml_vec_mad_f32(nx, d, lm[j].y, -lm[j].alpha);
  9750. }
  9751. ggml_vec_scale_f32(nx, d, ys/yy);
  9752. for (int i = 0; i < bound; ++i) {
  9753. // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}
  9754. ggml_vec_dot_f32(nx, &beta, lm[j].y, d);
  9755. beta /= lm[j].ys;
  9756. // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}
  9757. ggml_vec_mad_f32(nx, d, lm[j].s, lm[j].alpha - beta);
  9758. j = (j + 1)%m;
  9759. }
  9760. step = 1.0;
  9761. }
  9762. return GGML_OPT_DID_NOT_CONVERGE;
  9763. }
  9764. struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
  9765. struct ggml_opt_params result;
  9766. switch (type) {
  9767. case GGML_OPT_ADAM:
  9768. {
  9769. result = (struct ggml_opt_params) {
  9770. .type = GGML_OPT_ADAM,
  9771. .n_threads = 1,
  9772. .past = 0,
  9773. .delta = 1e-5f,
  9774. .max_no_improvement = 100,
  9775. .print_forward_graph = true,
  9776. .print_backward_graph = true,
  9777. .adam = {
  9778. .n_iter = 10000,
  9779. .alpha = 0.001f,
  9780. .beta1 = 0.9f,
  9781. .beta2 = 0.999f,
  9782. .eps = 1e-8f,
  9783. .eps_f = 1e-5f,
  9784. .eps_g = 1e-3f,
  9785. },
  9786. };
  9787. } break;
  9788. case GGML_OPT_LBFGS:
  9789. {
  9790. result = (struct ggml_opt_params) {
  9791. .type = GGML_OPT_LBFGS,
  9792. .n_threads = 1,
  9793. .past = 0,
  9794. .delta = 1e-5f,
  9795. .max_no_improvement = 0,
  9796. .print_forward_graph = true,
  9797. .print_backward_graph = true,
  9798. .lbfgs = {
  9799. .m = 6,
  9800. .n_iter = 100,
  9801. .max_linesearch = 20,
  9802. .eps = 1e-5f,
  9803. .ftol = 1e-4f,
  9804. .wolfe = 0.9f,
  9805. .min_step = 1e-20f,
  9806. .max_step = 1e+20f,
  9807. .linesearch = GGML_LINESEARCH_DEFAULT,
  9808. },
  9809. };
  9810. } break;
  9811. }
  9812. return result;
  9813. }
  9814. enum ggml_opt_result ggml_opt(
  9815. struct ggml_context * ctx,
  9816. struct ggml_opt_params params,
  9817. struct ggml_tensor * f) {
  9818. bool free_ctx = false;
  9819. if (ctx == NULL) {
  9820. struct ggml_init_params params_ctx = {
  9821. .mem_size = 16*1024*1024,
  9822. .mem_buffer = NULL,
  9823. .no_alloc = false,
  9824. };
  9825. ctx = ggml_init(params_ctx);
  9826. if (ctx == NULL) {
  9827. return GGML_OPT_NO_CONTEXT;
  9828. }
  9829. free_ctx = true;
  9830. }
  9831. enum ggml_opt_result result = GGML_OPT_OK;
  9832. // build forward + backward compute graphs
  9833. struct ggml_cgraph gf = ggml_build_forward (f);
  9834. struct ggml_cgraph gb = ggml_build_backward(ctx, &gf, false);
  9835. switch (params.type) {
  9836. case GGML_OPT_ADAM:
  9837. {
  9838. result = ggml_opt_adam(ctx, params, f, &gf, &gb);
  9839. } break;
  9840. case GGML_OPT_LBFGS:
  9841. {
  9842. result = ggml_opt_lbfgs(ctx, params, f, &gf, &gb);
  9843. } break;
  9844. }
  9845. if (params.print_forward_graph) {
  9846. ggml_graph_print (&gf);
  9847. ggml_graph_dump_dot(&gf, NULL, "opt-forward.dot");
  9848. }
  9849. if (params.print_backward_graph) {
  9850. ggml_graph_print (&gb);
  9851. ggml_graph_dump_dot(&gb, &gf, "opt-backward.dot");
  9852. }
  9853. if (free_ctx) {
  9854. ggml_free(ctx);
  9855. }
  9856. return result;
  9857. }
  9858. ////////////////////////////////////////////////////////////////////////////////
  9859. size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  9860. assert(k % QK4_0 == 0);
  9861. const int nb = k / QK4_0;
  9862. for (int j = 0; j < n; j += k) {
  9863. block_q4_0 * restrict y = (block_q4_0 *)dst + j/QK4_0;
  9864. quantize_row_q4_0_reference(src + j, y, k);
  9865. for (int i = 0; i < nb; i++) {
  9866. for (int l = 0; l < QK4_0; l += 2) {
  9867. const uint8_t vi0 = y[i].qs[l/2] & 0xF;
  9868. const uint8_t vi1 = y[i].qs[l/2] >> 4;
  9869. hist[vi0]++;
  9870. hist[vi1]++;
  9871. }
  9872. }
  9873. }
  9874. return (n/QK4_0*sizeof(block_q4_0));
  9875. }
  9876. size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  9877. assert(k % QK4_1 == 0);
  9878. const int nb = k / QK4_1;
  9879. for (int j = 0; j < n; j += k) {
  9880. block_q4_1 * restrict y = (block_q4_1 *)dst + j/QK4_1;
  9881. quantize_row_q4_1_reference(src + j, y, k);
  9882. for (int i = 0; i < nb; i++) {
  9883. for (int l = 0; l < QK4_1; l += 2) {
  9884. const uint8_t vi0 = y[i].qs[l/2] & 0xF;
  9885. const uint8_t vi1 = y[i].qs[l/2] >> 4;
  9886. hist[vi0]++;
  9887. hist[vi1]++;
  9888. }
  9889. }
  9890. }
  9891. return (n/QK4_1*sizeof(block_q4_1));
  9892. }
  9893. size_t ggml_quantize_q4_2(const float * src, void * dst, int n, int k, int64_t * hist) {
  9894. assert(k % QK4_2 == 0);
  9895. const int nb = k / QK4_2;
  9896. for (int j = 0; j < n; j += k) {
  9897. block_q4_2 * restrict y = (block_q4_2 *)dst + j/QK4_2;
  9898. quantize_row_q4_2_reference(src + j, y, k);
  9899. for (int i = 0; i < nb; i++) {
  9900. for (int l = 0; l < QK4_2; l += 2) {
  9901. const uint8_t vi0 = y[i].qs[l/2] & 0xF;
  9902. const uint8_t vi1 = y[i].qs[l/2] >> 4;
  9903. hist[vi0]++;
  9904. hist[vi1]++;
  9905. }
  9906. }
  9907. }
  9908. return (n/QK4_2*sizeof(block_q4_2));
  9909. }
  9910. ////////////////////////////////////////////////////////////////////////////////
  9911. int ggml_cpu_has_avx(void) {
  9912. #if defined(__AVX__)
  9913. return 1;
  9914. #else
  9915. return 0;
  9916. #endif
  9917. }
  9918. int ggml_cpu_has_avx2(void) {
  9919. #if defined(__AVX2__)
  9920. return 1;
  9921. #else
  9922. return 0;
  9923. #endif
  9924. }
  9925. int ggml_cpu_has_avx512(void) {
  9926. #if defined(__AVX512F__)
  9927. return 1;
  9928. #else
  9929. return 0;
  9930. #endif
  9931. }
  9932. int ggml_cpu_has_avx512_vbmi(void) {
  9933. #if defined(__AVX512VBMI__)
  9934. return 1;
  9935. #else
  9936. return 0;
  9937. #endif
  9938. }
  9939. int ggml_cpu_has_avx512_vnni(void) {
  9940. #if defined(__AVX512VNNI__)
  9941. return 1;
  9942. #else
  9943. return 0;
  9944. #endif
  9945. }
  9946. int ggml_cpu_has_fma(void) {
  9947. #if defined(__FMA__)
  9948. return 1;
  9949. #else
  9950. return 0;
  9951. #endif
  9952. }
  9953. int ggml_cpu_has_neon(void) {
  9954. #if defined(__ARM_NEON)
  9955. return 1;
  9956. #else
  9957. return 0;
  9958. #endif
  9959. }
  9960. int ggml_cpu_has_arm_fma(void) {
  9961. #if defined(__ARM_FEATURE_FMA)
  9962. return 1;
  9963. #else
  9964. return 0;
  9965. #endif
  9966. }
  9967. int ggml_cpu_has_f16c(void) {
  9968. #if defined(__F16C__)
  9969. return 1;
  9970. #else
  9971. return 0;
  9972. #endif
  9973. }
  9974. int ggml_cpu_has_fp16_va(void) {
  9975. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  9976. return 1;
  9977. #else
  9978. return 0;
  9979. #endif
  9980. }
  9981. int ggml_cpu_has_wasm_simd(void) {
  9982. #if defined(__wasm_simd128__)
  9983. return 1;
  9984. #else
  9985. return 0;
  9986. #endif
  9987. }
  9988. int ggml_cpu_has_blas(void) {
  9989. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  9990. return 1;
  9991. #else
  9992. return 0;
  9993. #endif
  9994. }
  9995. int ggml_cpu_has_sse3(void) {
  9996. #if defined(__SSE3__)
  9997. return 1;
  9998. #else
  9999. return 0;
  10000. #endif
  10001. }
  10002. int ggml_cpu_has_vsx(void) {
  10003. #if defined(__POWER9_VECTOR__)
  10004. return 1;
  10005. #else
  10006. return 0;
  10007. #endif
  10008. }
  10009. ////////////////////////////////////////////////////////////////////////////////